diff --git a/README.md b/README.md new file mode 100644 index 000000000..074734140 --- /dev/null +++ b/README.md @@ -0,0 +1,41 @@ +JDBM4 provides HashMap and TreeMap backed by disk storage. It is fast and easy to use embedded Java database. + +Currently there is only early development version. There is not even user friendly API yet. +Only ConcurrentHashMap is implemented. To test it use following code: + + import net.kotek.jdbm.*; + RecordStore db = new RecordStoreCache("filename",true); + HashMap2 map = new HashMap2(db,0L); + //do something with map + db.close() + +To reopen map you need to save its rootRecid between sessions: + + long rootRecid = map.rootRecid; //save this number somewhere + //restart JVM or whatever, and latter reopen map: + RecordStore db = new RecordStoreCache("filename",true); + HashMap2 map = new HashMap2(db,rootRecid); + //do something with map, it is populated with previous data + db.close() + + +What works (or should) + +* low level RecordStorage (basically Map) +* serializers for most `java.lang.*` and `java.util.*` classes +* Hard Reference Cache with autoclear on memory low +* Full thread safety +* Concurrent scalability should be nearly linear with number of cores (even writes) +* All writes are done in background thread + +What is not there yet + +* Transactions +* Weak/Soft/MRU cache +* POJO serialization +* TreeMap aka BTree +* Friendly interface (DB & DBMaker) +* Max record size is currently 64KB. + + + diff --git a/pom.xml b/pom.xml new file mode 100644 index 000000000..5ec9d4dff --- /dev/null +++ b/pom.xml @@ -0,0 +1,104 @@ + + + 4.0.0 + + net.kotek.jdbm + jdbm + 4.0-SNAPSHOT + + + + + Jan Kotek + jan + + + + + + Apache 2 + + + + + UTF-8 + + + + + junit + junit + 4.8.2 + jar + test + false + + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 2.3.2 + + 1.5 + 1.5 + ${project.build.sourceEncoding} + + + + org.apache.maven.plugins + maven-resources-plugin + 2.5 + + ${project.build.sourceEncoding} + + + + + org.apache.maven.plugins + maven-source-plugin + 2.1.2 + + + attach-sources + package + + jar + test-jar + + + + + + + + + + \ No newline at end of file diff --git a/src/main/java/net/kotek/jdbm/CC.java b/src/main/java/net/kotek/jdbm/CC.java new file mode 100644 index 000000000..381fa71f5 --- /dev/null +++ b/src/main/java/net/kotek/jdbm/CC.java @@ -0,0 +1,29 @@ +package net.kotek.jdbm; + +/** + * Compiler Configuration. + * Static final booleans to enable/disable features you want. + * Compiler and dead code elimination will take care of removing unwanted features from bytecode. + */ +interface CC { + + /** + * Compile with assertions. + */ + boolean ASSERT = true; + + /** + * Compile without trace logging statements (Logger.debug and Logger.trace) + */ + boolean TRACE = true; + + /** + * JDBM has some long running acceptance tests. For daily development it makes sense to skip those. + * This flag controls whatever all tests are run. + */ + boolean FULL_TEST = false; + + + + short STORE_FORMAT_VERSION = 10000 + 1; +} diff --git a/src/main/java/net/kotek/jdbm/DataInput2.java b/src/main/java/net/kotek/jdbm/DataInput2.java new file mode 100644 index 000000000..affe7ad5f --- /dev/null +++ b/src/main/java/net/kotek/jdbm/DataInput2.java @@ -0,0 +1,110 @@ +package net.kotek.jdbm; + +import java.io.DataInput; +import java.io.IOException; +import java.nio.ByteBuffer; + +/** + * Wraps ByteBuffer and provides DataInput + */ +public class DataInput2 implements DataInput { + + ByteBuffer buf; + int pos; + + public DataInput2(final ByteBuffer buf, final int pos) { + this.buf = buf; + this.pos = pos; + } + + @Override + public void readFully(byte[] b) throws IOException { + readFully(b, 0, b.length); + } + + @Override + public void readFully(byte[] b, int off, int len) throws IOException { + //naive, but only thread safe way + //TODO investigate + for(int i=off;i + * ByteArrayOutputStream is not used as it requires byte[] copying + * + */ +public final class DataOutput2 implements DataOutput { + + byte[] buf; + int pos; + + DataOutput2(){ + pos = 0; + buf = new byte[16]; + } + + byte[] copyBytes(){ + return Arrays.copyOf(buf, pos); + } + + /** + * make sure there will be enought space in buffer to write N bytes + */ + private void ensureAvail(final int n) { + if (pos + n >= buf.length) { + int newSize = Math.max(pos + n, buf.length * 2); + buf = Arrays.copyOf(buf, newSize); + } + } + + + @Override + public void write(final int b) throws IOException { + ensureAvail(1); + buf[pos++] = (byte) b; + } + + @Override + public void write(final byte[] b) throws IOException { + write(b, 0, b.length); + } + + @Override + public void write(final byte[] b, final int off, final int len) throws IOException { + ensureAvail(len); + System.arraycopy(b, off, buf, pos, len); + pos += len; + } + + @Override + public void writeBoolean(final boolean v) throws IOException { + ensureAvail(1); + buf[pos++] = (byte) (v ? 1 : 0); + } + + @Override + public void writeByte(final int v) throws IOException { + ensureAvail(1); + buf[pos++] = (byte) (v); + } + + @Override + public void writeShort(final int v) throws IOException { + ensureAvail(2); + buf[pos++] = (byte) (0xff & (v >> 8)); + buf[pos++] = (byte) (0xff & (v)); + } + + @Override + public void writeChar(final int v) throws IOException { + writeInt(v); + } + + @Override + public void writeInt(final int v) throws IOException { + ensureAvail(4); + buf[pos++] = (byte) (0xff & (v >> 24)); + buf[pos++] = (byte) (0xff & (v >> 16)); + buf[pos++] = (byte) (0xff & (v >> 8)); + buf[pos++] = (byte) (0xff & (v)); + } + + @Override + public void writeLong(final long v) throws IOException { + ensureAvail(8); + buf[pos++] = (byte) (0xff & (v >> 56)); + buf[pos++] = (byte) (0xff & (v >> 48)); + buf[pos++] = (byte) (0xff & (v >> 40)); + buf[pos++] = (byte) (0xff & (v >> 32)); + buf[pos++] = (byte) (0xff & (v >> 24)); + buf[pos++] = (byte) (0xff & (v >> 16)); + buf[pos++] = (byte) (0xff & (v >> 8)); + buf[pos++] = (byte) (0xff & (v)); + } + + @Override + public void writeFloat(final float v) throws IOException { + ensureAvail(4); + writeInt(Float.floatToIntBits(v)); + } + + @Override + public void writeDouble(final double v) throws IOException { + ensureAvail(8); + writeLong(Double.doubleToLongBits(v)); + } + + @Override + public void writeBytes(final String s) throws IOException { + writeUTF(s); + } + + @Override + public void writeChars(final String s) throws IOException { + writeUTF(s); + } + + @Override + public void writeUTF(final String s) throws IOException { + SerializerBase.serializeString(this, s); + } +} diff --git a/src/main/java/net/kotek/jdbm/HashMap2.java b/src/main/java/net/kotek/jdbm/HashMap2.java new file mode 100644 index 000000000..704a93f71 --- /dev/null +++ b/src/main/java/net/kotek/jdbm/HashMap2.java @@ -0,0 +1,990 @@ +package net.kotek.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.*; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** + * Thread safe concurrent HashMap + *

+ * It uses full 32bit hash from start. There is no initial load factor and rehash. + *

+ * This map is suitable for number of records 1e9 and over. + * Larger number of records will increase hash collisions and performance + * will degrade linearly with number of records (separate chaining). + *

+ * Concurrent scalability is achieved by splitting HashMap into 16 segments, each with separate lock. + * Very similar to ConcurrentHashMap + * + * @author Jan Kotek + */ +public class HashMap2 extends AbstractMap implements ConcurrentMap { + + /** default serializer used for key and values */ + private static final Serializer KV_SERIALIZER = Serializer.BASIC_SERIALIZER; + + + static final int BUCKET_OVERFLOW = 4; + + + protected static class LinkedNode{ + K key; + V value; + long next; + + LinkedNode(final long next, final K key, final V value ){ + this.key = key; + this.value = value; + this.next = next; + } + } + + final Serializer ROOT_SERIALIZER = new Serializer() { + @Override + public void serialize(DataOutput out, long[] value) throws IOException { + for(int i =0;i<16;i++) + JdbmUtil.packLong(out,value[i]); + } + + @Override + public long[] deserialize(DataInput in, int available) throws IOException { + final long[] ret = new long[16]; + for(int i=0;i<16;i++) + ret[i] = JdbmUtil.unpackLong(in); + return ret; + } + }; + + final Serializer> LN_SERIALIZER = new Serializer>() { + @Override + public void serialize(DataOutput out, LinkedNode value) throws IOException { + JdbmUtil.packLong(out,value.next); + KV_SERIALIZER.serialize(out,value.key); + KV_SERIALIZER.serialize(out,value.value); + } + + @Override + public LinkedNode deserialize(DataInput in, int available) throws IOException { + return new LinkedNode( + JdbmUtil.unpackLong(in), + (K) KV_SERIALIZER.deserialize(in,-1), + (V) KV_SERIALIZER.deserialize(in,-1) + ); + } + }; + + final static boolean readonly = false; + + + static final SerializerDIR_SERIALIZER = new Serializer() { + @Override + public void serialize(DataOutput out, long[][] value) throws IOException { + if(value == null) return; + + if(CC.ASSERT && value.length!=16) throw new InternalError(); + + //first write mask which indicate subarray nullability + int nulls = 0; + for(int i = 0;i<16;i++){ + if(value[i]!=null) + nulls |= 1<>>1; + } + + return ret; + } + }; + + + /** list of segments, this is immutable*/ + protected final long[] segmentRecids; + + protected final ReentrantReadWriteLock[] segmentLocks = new ReentrantReadWriteLock[16]; + + + protected final RecordManager recman; + public final long rootRecid; + + + public HashMap2(RecordManager recman, long rootRecid) { + this.recman = recman; + + for(int i=0;i< 16;i++) segmentLocks[i]=new ReentrantReadWriteLock(); + + if(rootRecid == 0){ + //prealocate segmentRecids, so we dont have to lock on those latter + segmentRecids = new long[16]; + for(int i=0;i<16;i++) + segmentRecids[i] = recman.recordPut(null, Serializer.NULL_SERIALIZER); + this.rootRecid = recman.recordPut(segmentRecids, ROOT_SERIALIZER); + }else{ + this.rootRecid = rootRecid; + segmentRecids = recman.recordGet(rootRecid, ROOT_SERIALIZER); + } + } + + + @Override + public boolean containsKey(final Object o){ + return get(o)!=null; + } + + @Override + public int size() { + long counter = 0; + + //search tree, until we find first non null + for(int i=0;i<16;i++){ + try{ + segmentLocks[i].readLock().lock(); + + final long dirRecid = segmentRecids[i]; + counter+=recursiveDirCount(dirRecid); + }finally { + segmentLocks[i].readLock().unlock(); + } + } + + if(counter>Integer.MAX_VALUE) + return Integer.MAX_VALUE; + + return (int) counter; + } + + private long recursiveDirCount(final long dirRecid) { + long[][] dir = recman.recordGet(dirRecid, DIR_SERIALIZER); + if(dir==null) return 0 ; + long counter = 0; + for(long[] subdir:dir){ + if(subdir == null) continue; + for(long recid:subdir){ + if(recid == 0) continue; + if((recid&1)==0){ + //reference to another subdir + recid = recid>>>1; + counter += recursiveDirCount(recid); + }else{ + //reference to linked list, count it + recid = recid>>>1; + while(recid!=0){ + LinkedNode n = recman.recordGet(recid, LN_SERIALIZER); + counter++; + recid = n.next; + } + } + } + } + return counter; + } + + @Override + public boolean isEmpty() { + //search tree, until we find first non null + for(int i=0;i<16;i++){ + try{ + segmentLocks[i].readLock().lock(); + + long dirRecid = segmentRecids[i]; + long[][] dir = recman.recordGet(dirRecid, DIR_SERIALIZER); + if(dir!=null) return false; + }finally { + segmentLocks[i].readLock().unlock(); + } + } + + return true; + } + + + + public V get(final Object o){ + if(o==null) return null; + final int h = hash(o); + final int segment = h >>>28; + try{ + segmentLocks[segment].readLock().lock(); + long recid = segmentRecids[segment]; + for(int level=3;level>=0;level--){ + long[][] dir = recman.recordGet(recid, DIR_SERIALIZER); + if(dir == null) return null; + int slot = (h>>>(level*7 )) & 0x7F; + if(CC.ASSERT && slot>=128) throw new InternalError(); + if(dir[slot/8]==null) return null; + recid = dir[slot/8][slot%8]; + if(recid == 0) return null; + if((recid&1)!=0){ //last bite indicates if referenced record is LinkedNode + recid = recid>>>1; + while(true){ + LinkedNode ln = recman.recordGet(recid, LN_SERIALIZER); + if(ln == null) return null; + if(ln.key.equals(o)) return ln.value; + if(ln.next==0) return null; + recid = ln.next; + } + } + + recid = recid>>>1; + } + + return null; + }finally { + segmentLocks[segment].readLock().unlock(); + } + } + + @Override + public V put(final K key, final V value){ + if (readonly) + throw new UnsupportedOperationException("readonly"); + + if (key == null) + throw new IllegalArgumentException("null key"); + + if (value == null) + throw new IllegalArgumentException("null value"); + + final int h = hash(key); + final int segment = h >>>28; + try{ + segmentLocks[segment].writeLock().lock(); + long dirRecid = segmentRecids[segment]; + + int level = 3; + while(true){ + long[][] dir = recman.recordGet(dirRecid, DIR_SERIALIZER); + final int slot = (h>>>(7*level )) & 0x7F; + if(CC.ASSERT && slot>127) throw new InternalError(); + + if(dir == null ){ + //create new dir + dir = new long[16][]; + } + + if(dir[slot/8] == null){ + dir[slot/8] = new long[8]; + } + + int counter = 0; + long recid = dir[slot/8][slot%8]; + + if(recid!=0){ + if((recid&1) == 0){ + dirRecid = recid>>>1; + level--; + continue; + } + recid = recid>>>1; + + //traverse linked list, try to replace previous value + LinkedNode ln = recman.recordGet(recid, LN_SERIALIZER); + + while(ln!=null){ + if(ln.key.equals(key)){ + //found, replace value at this node + ln.key = key; + V oldVal = ln.value; + ln.value = value; + recman.recordUpdate(recid, ln, LN_SERIALIZER); + return oldVal; + } + recid = ln.next; + ln = recid==0? null : recman.recordGet(recid, LN_SERIALIZER); + counter++; + } + //key was not found at linked list, so just append it to beginning + } + + + //check if linked list has overflow and needs to be expanded to new dir level + if(counter>=BUCKET_OVERFLOW && level>=1){ + long[][] nextDir = new long[16][]; + + { + //add newly inserted record + int pos =(h >>>(7*(level-1) )) & 0x7F; + nextDir[pos/8] = new long[8]; + nextDir[pos/8][pos%8] = (recman.recordPut(new LinkedNode(0, key, value), LN_SERIALIZER) <<1) | 1; + } + + + //redistribute linked bucket into new dir + long nodeRecid = dir[slot/8][slot%8]>>>1; + while(nodeRecid!=0){ + LinkedNode n = recman.recordGet(nodeRecid, LN_SERIALIZER); + final long nextRecid = n.next; + int pos = (hash(n.key) >>>(7*(level -1) )) & 0x7F; + if(nextDir[pos/8]==null) nextDir[pos/8] = new long[8]; + n.next = nextDir[pos/8][pos%8]>>>1; + nextDir[pos/8][pos%8] = (nodeRecid<<1) | 1; + recman.recordUpdate(nodeRecid, n,LN_SERIALIZER); + nodeRecid = nextRecid; + } + + //insert nextDir and update parent dir + long nextDirRecid = recman.recordPut(nextDir, DIR_SERIALIZER); + int parentPos = (h>>>(7*level )) & 0x7F; + dir[parentPos/8][parentPos%8] = (nextDirRecid<<1) | 0; + recman.recordUpdate(dirRecid, dir, DIR_SERIALIZER); + return null; + }else{ + // record does not exist in linked list, so create new one + recid = dir[slot/8][slot%8]>>>1; + long newRecid = recman.recordPut(new LinkedNode(recid, key,value), LN_SERIALIZER); + dir[slot/8][slot%8] = (newRecid<<1) | 1; + recman.recordUpdate(dirRecid, dir,DIR_SERIALIZER); + return null; + } + } + + }finally { + segmentLocks[segment].writeLock().unlock(); + } + } + + @Override + public V remove(Object key){ + if (readonly) + throw new UnsupportedOperationException("readonly"); + + final int h = hash(key); + final int segment = h >>>28; + try{ + segmentLocks[segment].writeLock().lock(); + + final long[] dirRecids = new long[4]; + int level = 3; + dirRecids[level] = segmentRecids[segment]; + + while(true){ + long[][] dir = recman.recordGet(dirRecids[level], DIR_SERIALIZER); + final int slot = (h>>>(7*level )) & 0x7F; + if(CC.ASSERT && slot>127) throw new InternalError(); + + if(dir == null ){ + //create new dir + dir = new long[16][]; + } + + if(dir[slot/8] == null){ + dir[slot/8] = new long[8]; + } + + int counter = 0; + long recid = dir[slot/8][slot%8]; + + if(recid!=0){ + if((recid&1) == 0){ + level--; + dirRecids[level] = recid>>>1; + continue; + } + recid = recid>>>1; + + //traverse linked list, try to remove node + LinkedNode ln = recman.recordGet(recid, LN_SERIALIZER); + LinkedNode prevLn = null; + long prevRecid = 0; + while(ln!=null){ + if(ln.key.equals(key)){ + //remove from linkedList + if(prevLn == null ){ + //referenced directly from dir + if(ln.next==0){ + recursiveDirDelete(h, level, dirRecids, dir, slot); + + + }else{ + dir[slot/8][slot%8] = (ln.next<<1)|1; + recman.recordUpdate(dirRecids[level], dir, DIR_SERIALIZER); + } + + }else{ + //referenced from LinkedNode + prevLn.next = ln.next; + recman.recordUpdate(prevRecid, prevLn, LN_SERIALIZER); + } + //found, remove this node + recman.recordDelete(recid); + return ln.value; + } + prevRecid = recid; + prevLn = ln; + recid = ln.next; + ln = recid==0? null : recman.recordGet(recid, LN_SERIALIZER); + counter++; + } + //key was not found at linked list, so it does not exist + return null; + } + //recid is 0, so entry does not exist + return null; + + } + }finally { + segmentLocks[segment].writeLock().unlock(); + } + } + + + private void recursiveDirDelete(int h, int level, long[] dirRecids, long[][] dir, int slot) { + //was only item in linked list, so try to collapse the dir + dir[slot/8][slot%8] = 0; + //one record was zeroed out, check if subarray can be collapsed to null + boolean allZero = true; + for(long l:dir[slot/8]){ + if(l!=0){ + allZero = false; + break; + } + } + if(allZero) + dir[slot/8] = null; + allZero = true; + for(long[] l:dir){ + if(l!=null){ + allZero = false; + break; + } + } + + if(allZero){ + //delete from parent dir + if(level==3){ + //parent is segment, recid of this dir can not be modified, so just update to null + recman.recordUpdate(dirRecids[level], null, DIR_SERIALIZER); + }else{ + recman.recordDelete(dirRecids[level]); + + final long[][] parentDir = recman.recordGet(dirRecids[level + 1], DIR_SERIALIZER); + final int parentPos = (h >>> (7 * (level + 1))) & 0x7F; + recursiveDirDelete(h,level+1,dirRecids, parentDir, parentPos); + //parentDir[parentPos/8][parentPos%8] = 0; + //recman.recordUpdate(dirRecids[level + 1],parentDir,DIR_SERIALIZER); + + } + }else{ + recman.recordUpdate(dirRecids[level], dir, DIR_SERIALIZER); + } + } + + @Override + public void clear() { + for(int i = 0; i<16;i++) try{ + segmentLocks[i].writeLock().lock(); + + final long dirRecid = segmentRecids[i]; + recursiveDirClear(dirRecid); + + //set dir to null, as segment recid is immutable + recman.recordUpdate(dirRecid, null, DIR_SERIALIZER); + + }finally { + segmentLocks[i].writeLock().unlock(); + } + } + + private void recursiveDirClear(final long dirRecid) { + final long[][] dir = recman.recordGet(dirRecid,DIR_SERIALIZER); + if(dir == null) return; + for(long[] subdir:dir){ + if(subdir==null) continue; + for(long recid:subdir){ + if(recid == 0) continue; + if((recid&1)==0){ + //another dir + recid = recid>>>1; + //recursively remove dir + recursiveDirClear(recid); + recman.recordDelete(recid); + }else{ + //linked list to delete + recid = recid>>>1; + while(recid!=0){ + LinkedNode n = recman.recordGet(recid, LN_SERIALIZER); + recman.recordDelete(recid); + recid = n.next; + } + } + + } + } + } + + + @Override + public boolean containsValue(Object value) { + Iterator vals = values().iterator(); + while(vals.hasNext()){ + if(vals.next().equals(value)) return true; + } + return false; + } + + @Override + public void putAll(Map m) { + for(Entry e:m.entrySet()){ + put(e.getKey(),e.getValue()); + } + } + + + private final Set _keySet = new AbstractSet() { + + @Override + public int size() { + return HashMap2.this.size(); + } + + @Override + public boolean isEmpty() { + return HashMap2.this.isEmpty(); + } + + @Override + public boolean contains(Object o) { + return HashMap2.this.containsKey(o); + } + + @Override + public Iterator iterator() { + return new KeyIterator(); + } + + @Override + public boolean add(K k) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(Object o) { + if(o instanceof Entry){ + Entry e = (Entry) o; + return HashMap2.this.remove(((Entry) o).getKey(),((Entry) o).getValue()); + } + return false; + + } + + + @Override + public void clear() { + HashMap2.this.clear(); + } + }; + + @Override + public Set keySet() { + return _keySet; + } + + private final Collection _values = new AbstractCollection(){ + + @Override + public int size() { + return HashMap2.this.size(); + } + + @Override + public boolean isEmpty() { + return HashMap2.this.isEmpty(); + } + + @Override + public boolean contains(Object o) { + return HashMap2.this.containsValue(o); + } + + + + @Override + public Iterator iterator() { + return new ValueIterator(); + } + + }; + + @Override + public Collection values() { + return _values; + } + + private Set> _entrySet = new AbstractSet>(){ + + @Override + public int size() { + return HashMap2.this.size(); + } + + @Override + public boolean isEmpty() { + return HashMap2.this.isEmpty(); + } + + @Override + public boolean contains(Object o) { + if(o instanceof Entry){ + Entry e = (Entry) o; + Object val = HashMap2.this.get(e.getKey()); + return val!=null && val.equals(e.getValue()); + }else + return false; + } + + @Override + public Iterator> iterator() { + return new EntryIterator(); + } + + + @Override + public boolean add(Entry kvEntry) { + K key = kvEntry.getKey(); + V value = kvEntry.getValue(); + if(key==null || value == null) throw new NullPointerException(); + HashMap2.this.put(key, value); + return true; + } + + @Override + public boolean remove(Object o) { + if(o instanceof Entry){ + Entry e = (Entry) o; + Object key = e.getKey(); + if(key == null) return false; + return HashMap2.this.remove(key, e.getValue()); + } + return false; + + } + + + @Override + public void clear() { + HashMap2.this.clear(); + } + }; + + @Override + public Set> entrySet() { + return _entrySet; + } + + + protected int hash(final Object key) { + int h = key.hashCode(); + //TODO salt + + // Spread bits to regularize both segment and index locations, + // using variant of single-word Wang/Jenkins hash. + h += (h << 15) ^ 0xffffcd7d; + h ^= (h >>> 10); + h += (h << 3); + h ^= (h >>> 6); + h += (h << 2) + (h << 14); + return h ^ (h >>> 16); + } + + + abstract class HashIterator{ + + protected Object[] currentLinkedList; + protected int currentLinkedListPos = 0; + + private K lastReturnedKey = null; + + private int lastSegment = 0; + + HashIterator(){ + currentLinkedList = findNextLinkedNode(0); + } + + public void remove() { + final K keyToRemove = lastReturnedKey; + if (lastReturnedKey == null) + throw new IllegalStateException(); + + lastReturnedKey = null; + HashMap2.this.remove(keyToRemove); + } + + public boolean hasNext(){ + return currentLinkedList!=null && currentLinkedListPos>>28; + + //two phases, first find old item and increase hash + try{ + segmentLocks[segment].readLock().lock(); + + long dirRecid = segmentRecids[segment]; + int level = 3; + //dive into tree, finding last hash position + while(true){ + long[][] dir = recman.recordGet(dirRecid, DIR_SERIALIZER); + int pos = (lastHash>>>(7 * level)) & 0x7F; + + //check if we need to expand deeper + if(dir[pos/8]==null || dir[pos/8][pos%8]==0 || (dir[pos/8][pos%8]&1)==1) { + //increase hash by 1 + if(level!=0) + lastHash = ((lastHash>>>(7 * level)) + 1) << (7*level); //should use mask and XOR + else + lastHash +=1; + break; + } + + //reference is dir, move to next level + dirRecid = dir[pos/8][pos%8]>>>1; + level--; + } + + }finally { + segmentLocks[segment].readLock().unlock(); + } + return findNextLinkedNode(lastHash); + + + } + + private Object[] findNextLinkedNode(int hash) { + //second phase, start search from increased hash to find next items + for(int segment = Math.max(hash >>>28, lastSegment); segment<16;segment++)try{ + + lastSegment = Math.max(segment,lastSegment); + segmentLocks[segment].readLock().lock(); + + long dirRecid = segmentRecids[segment]; + Object ret[] = findNextLinkedNodeRecur(dirRecid, hash, 3); + //System.out.println(Arrays.asList(ret)); + if(ret !=null) return ret; + hash = 0; + }finally { + segmentLocks[segment].readLock().unlock(); + } + + return null; + } + + private Object[] findNextLinkedNodeRecur(long dirRecid, int newHash, int level){ + long[][] dir = recman.recordGet(dirRecid, DIR_SERIALIZER); + if(dir == null) return null; + int pos = (newHash>>>(level*7)) & 0x7F; + boolean first = true; + while(pos<128){ + if(dir[pos/8]!=null){ + long recid = dir[pos/8][pos%8]; + if(recid!=0){ + if((recid&1) == 1){ + recid = recid>>1; + //found linked list, load it into array and return + Object[] array = new Object[2]; + int arrayPos = 0; + while(recid!=0){ + LinkedNode ln = recman.recordGet(recid, LN_SERIALIZER); + //increase array size if needed + if(arrayPos == array.length) + array = Arrays.copyOf(array, array.length+2); + array[arrayPos++] = ln.key; + array[arrayPos++] = ln.value; + recid = ln.next; + } + return array; + }else{ + //found another dir, continue dive + recid = recid>>1; + Object[] ret = findNextLinkedNodeRecur(recid, first ? newHash : 0, level - 1); + if(ret != null) return ret; + } + } + } + first = false; + pos++; + } + return null; + } + } + + class KeyIterator extends HashIterator implements Iterator{ + + @Override + public K next() { + K key = (K) currentLinkedList[currentLinkedListPos]; + moveToNext(); + return key; + } + } + + class ValueIterator extends HashIterator implements Iterator{ + + @Override + public V next() { + V value = (V) currentLinkedList[currentLinkedListPos+1]; + moveToNext(); + return value; + } + } + + class EntryIterator extends HashIterator implements Iterator>{ + + @Override + public Entry next() { + K key = (K) currentLinkedList[currentLinkedListPos]; + moveToNext(); + return new Entry2(key); + } + } + + class Entry2 implements Entry{ + + private final K key; + + Entry2(K key) { + this.key = key; + } + + @Override + public K getKey() { + return key; + } + + @Override + public V getValue() { + return HashMap2.this.get(key); + } + + @Override + public V setValue(V value) { + return HashMap2.this.put(key,value); + } + + @Override + public boolean equals(Object o) { + return (o instanceof Entry) && key.equals(((Entry) o).getKey()); + } + + @Override + public int hashCode() { + final V value = HashMap2.this.get(key); + return (key == null ? 0 : key.hashCode()) ^ + (value == null ? 0 : value.hashCode()); + } + } + + + @Override + public V putIfAbsent(K key, V value) { + if(key==null||value==null) throw new NullPointerException(); + final int segment = HashMap2.this.hash(key) >>>28; + try{ + segmentLocks[segment].writeLock().lock(); + + if (!containsKey(key)) + return put(key, value); + else + return get(key); + + }finally { + segmentLocks[segment].writeLock().unlock(); + } + } + + @Override + public boolean remove(Object key, Object value) { + if(key==null||value==null) throw new NullPointerException(); + final int segment = HashMap2.this.hash(key) >>>28; + try{ + segmentLocks[segment].writeLock().lock(); + + if (containsKey(key) && get(key).equals(value)) { + remove(key); + return true; + }else + return false; + + }finally { + segmentLocks[segment].writeLock().unlock(); + } + } + + @Override + public boolean replace(K key, V oldValue, V newValue) { + if(key==null||oldValue==null||newValue==null) throw new NullPointerException(); + final int segment = HashMap2.this.hash(key) >>>28; + try{ + segmentLocks[segment].writeLock().lock(); + + if (containsKey(key) && get(key).equals(oldValue)) { + put(key, newValue); + return true; + } else + return false; + + }finally { + segmentLocks[segment].writeLock().unlock(); + } + } + + @Override + public V replace(K key, V value) { + if(key==null||value==null) throw new NullPointerException(); + final int segment = HashMap2.this.hash(key) >>>28; + try{ + segmentLocks[segment].writeLock().lock(); + + if (containsKey(key)) + return put(key, value); + else + return null; + }finally { + segmentLocks[segment].writeLock().unlock(); + } + } + +} diff --git a/src/main/java/net/kotek/jdbm/JdbmUtil.java b/src/main/java/net/kotek/jdbm/JdbmUtil.java new file mode 100644 index 000000000..adf55076c --- /dev/null +++ b/src/main/java/net/kotek/jdbm/JdbmUtil.java @@ -0,0 +1,116 @@ +package net.kotek.jdbm; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.logging.Logger; + +/** + * Various IO related utilities + */ +final public class JdbmUtil { + + static final Logger LOG = Logger.getLogger("JDBM"); + + public static final String EMPTY_STRING = ""; + public static final String UTF8 = "UTF8"; + + + /** + * Pack non-negative long into output stream. + * It will occupy 1-10 bytes depending on value (lower values occupy smaller space) + * + * @param os + * @param value + * @throws java.io.IOException + */ + static public void packLong(DataOutput os, long value) throws IOException { + + if (CC.ASSERT && value < 0) { + throw new IllegalArgumentException("negative value: v=" + value); + } + + while ((value & ~0x7FL) != 0) { + os.write((((int) value & 0x7F) | 0x80)); + value >>>= 7; + } + os.write((byte) value); + } + + + /** + * Unpack positive long value from the input stream. + * + * @param is The input stream. + * @return The long value. + * @throws java.io.IOException + */ + static public long unpackLong(DataInput is) throws IOException { + + long result = 0; + for (int offset = 0; offset < 64; offset += 7) { + long b = is.readUnsignedByte(); + result |= (b & 0x7F) << offset; + if ((b & 0x80) == 0) { + return result; + } + } + if(CC.ASSERT) throw new Error("Malformed long."); + else return Long.MIN_VALUE; + } + + + /** + * Pack non-negative long into output stream. + * It will occupy 1-5 bytes depending on value (lower values occupy smaller space) + * + * @param os + * @param value + * @throws IOException + */ + + static public void packInt(DataOutput os, int value) throws IOException { + + if (CC.ASSERT && value < 0) { + throw new IllegalArgumentException("negative value: v=" + value); + } + + while ((value & ~0x7F) != 0) { + os.write(((value & 0x7F) | 0x80)); + value >>>= 7; + } + + os.write((byte) value); + } + + static public int unpackInt(DataInput is) throws IOException { + + + + for (int offset = 0, result = 0; offset < 32; offset += 7) { + int b = is.readUnsignedByte(); + result |= (b & 0x7F) << offset; + if ((b & 0x80) == 0) { + return result; + } + } + if(CC.ASSERT) throw new Error("Malformed int."); + else return Integer.MIN_VALUE; + + } + + + public static int longHash(long key) { + + int h = (int)(key ^ (key >>> 32)); + // Spread bits to regularize both segment and index locations, + // using variant of single-word Wang/Jenkins hash. + h += (h << 15) ^ 0xffffcd7d; + h ^= (h >>> 10); + h += (h << 3); + h ^= (h >>> 6); + h += (h << 2) + (h << 14); + return h ^ (h >>> 16); + + } +} diff --git a/src/main/java/net/kotek/jdbm/LongConcurrentHashMap.java b/src/main/java/net/kotek/jdbm/LongConcurrentHashMap.java new file mode 100644 index 000000000..b6af9696a --- /dev/null +++ b/src/main/java/net/kotek/jdbm/LongConcurrentHashMap.java @@ -0,0 +1,1013 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + */ + +package net.kotek.jdbm; +import java.io.Serializable; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.concurrent.locks.ReentrantLock; + +/** + * A hash table supporting full concurrency of retrievals and + * adjustable expected concurrency for updates. This class obeys the + * same functional specification as {@link java.util.Hashtable}, and + * includes versions of methods corresponding to each method of + * Hashtable. However, even though all operations are + * thread-safe, retrieval operations do not entail locking, + * and there is not any support for locking the entire table + * in a way that prevents all access. This class is fully + * interoperable with Hashtable in programs that rely on its + * thread safety but not on its synchronization details. + * + *

Retrieval operations (including get) generally do not + * block, so may overlap with update operations (including + * put and remove). Retrievals reflect the results + * of the most recently completed update operations holding + * upon their onset. For aggregate operations such as putAll + * and clear, concurrent retrievals may reflect insertion or + * removal of only some entries. Similarly, Iterators and + * Enumerations return elements reflecting the state of the hash table + * at some point at or since the creation of the iterator/enumeration. + * They do not throw {@link java.util.ConcurrentModificationException}. + * However, iterators are designed to be used by only one thread at a time. + * + *

The allowed concurrency among update operations is guided by + * the optional concurrencyLevel constructor argument + * (default 16), which is used as a hint for internal sizing. The + * table is internally partitioned to try to permit the indicated + * number of concurrent updates without contention. Because placement + * in hash tables is essentially random, the actual concurrency will + * vary. Ideally, you should choose a value to accommodate as many + * threads as will ever concurrently modify the table. Using a + * significantly higher value than you need can waste space and time, + * and a significantly lower value can lead to thread contention. But + * overestimates and underestimates within an order of magnitude do + * not usually have much noticeable impact. A value of one is + * appropriate when it is known that only one thread will modify and + * all others will only read. Also, resizing this or any other kind of + * hash table is a relatively slow operation, so, when possible, it is + * a good idea to provide estimates of expected table sizes in + * constructors. + * + *

This class is a member of the + * + * Java Collections Framework. + * + * @since 1.5 + * @author Doug Lea + * @param the type of mapped values + */ +class LongConcurrentHashMap< V> + implements LongMap, Serializable { + private static final long serialVersionUID = 7249069246763182397L; + + /* + * The basic strategy is to subdivide the table among Segments, + * each of which itself is a concurrently readable hash table. + */ + + /* ---------------- Constants -------------- */ + + /** + * The default initial capacity for this table, + * used when not otherwise specified in a constructor. + */ + static final int DEFAULT_INITIAL_CAPACITY = 16; + + /** + * The default load factor for this table, used when not + * otherwise specified in a constructor. + */ + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + /** + * The default concurrency level for this table, used when not + * otherwise specified in a constructor. + */ + static final int DEFAULT_CONCURRENCY_LEVEL = 16; + + /** + * The maximum capacity, used if a higher value is implicitly + * specified by either of the constructors with arguments. MUST + * be a power of two <= 1<<30 to ensure that entries are indexable + * using ints. + */ + static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The maximum number of segments to allow; used to bound + * constructor arguments. + */ + static final int MAX_SEGMENTS = 1 << 16; // slightly conservative + + /** + * Number of unsynchronized retries in size and containsValue + * methods before resorting to locking. This is used to avoid + * unbounded retries if tables undergo continuous modification + * which would make it impossible to obtain an accurate result. + */ + static final int RETRIES_BEFORE_LOCK = 2; + + /* ---------------- Fields -------------- */ + + /** + * Mask value for indexing into segments. The upper bits of a + * key's hash code are used to choose the segment. + */ + final int segmentMask; + + /** + * Shift value for indexing within segments. + */ + final int segmentShift; + + /** + * The segments, each of which is a specialized hash table + */ + final Segment[] segments; + + + /* ---------------- Small Utilities -------------- */ + + + /** + * Returns the segment that should be used for key with given hash + * @param hash the hash code for the key + * @return the segment + */ + final Segment segmentFor(int hash) { + return segments[(hash >>> segmentShift) & segmentMask]; + } + + + /* ---------------- Inner Classes -------------- */ + + /** + * LongConcurrentHashMap list entry. Note that this is never exported + * out as a user-visible Map.Entry. + * + * Because the value field is volatile, not final, it is legal wrt + * the Java Memory Model for an unsynchronized reader to see null + * instead of initial value when read via a data race. Although a + * reordering leading to this is not likely to ever actually + * occur, the Segment.readValueUnderLock method is used as a + * backup in case a null (pre-initialized) value is ever seen in + * an unsynchronized access method. + */ + static final class HashEntry { + final long key; + final int hash; + volatile V value; + final HashEntry next; + + HashEntry(long key, int hash, HashEntry next, V value) { + this.key = key; + this.hash = hash; + this.next = next; + this.value = value; + } + + @SuppressWarnings("unchecked") + static final HashEntry[] newArray(int i) { + return new HashEntry[i]; + } + } + + /** + * Segments are specialized versions of hash tables. This + * subclasses from ReentrantLock opportunistically, just to + * simplify some locking and avoid separate construction. + */ + static final class Segment extends ReentrantLock implements Serializable { + /* + * Segments maintain a table of entry lists that are ALWAYS + * kept in a consistent state, so can be read without locking. + * Next fields of nodes are immutable (final). All list + * additions are performed at the front of each bin. This + * makes it easy to check changes, and also fast to traverse. + * When nodes would otherwise be changed, new nodes are + * created to replace them. This works well for hash tables + * since the bin lists tend to be short. (The average length + * is less than two for the default load factor threshold.) + * + * Read operations can thus proceed without locking, but rely + * on selected uses of volatiles to ensure that completed + * write operations performed by other threads are + * noticed. For most purposes, the "count" field, tracking the + * number of elements, serves as that volatile variable + * ensuring visibility. This is convenient because this field + * needs to be read in many read operations anyway: + * + * - All (unsynchronized) read operations must first read the + * "count" field, and should not look at table entries if + * it is 0. + * + * - All (synchronized) write operations should write to + * the "count" field after structurally changing any bin. + * The operations must not take any action that could even + * momentarily cause a concurrent read operation to see + * inconsistent data. This is made easier by the nature of + * the read operations in Map. For example, no operation + * can reveal that the table has grown but the threshold + * has not yet been updated, so there are no atomicity + * requirements for this with respect to reads. + * + * As a guide, all critical volatile reads and writes to the + * count field are marked in code comments. + */ + + private static final long serialVersionUID = 2249069246763182397L; + + /** + * The number of elements in this segment's region. + */ + transient volatile int count; + + /** + * Number of updates that alter the size of the table. This is + * used during bulk-read methods to make sure they see a + * consistent snapshot: If modCounts change during a traversal + * of segments computing size or checking containsValue, then + * we might have an inconsistent view of state so (usually) + * must retry. + */ + transient int modCount; + + /** + * The table is rehashed when its size exceeds this threshold. + * (The value of this field is always (int)(capacity * + * loadFactor).) + */ + transient int threshold; + + /** + * The per-segment table. + */ + transient volatile HashEntry[] table; + + /** + * The load factor for the hash table. Even though this value + * is same for all segments, it is replicated to avoid needing + * links to outer object. + * @serial + */ + final float loadFactor; + + Segment(int initialCapacity, float lf) { + loadFactor = lf; + setTable(HashEntry.newArray(initialCapacity)); + } + + @SuppressWarnings("unchecked") + static final Segment[] newArray(int i) { + return new Segment[i]; + } + + /** + * Sets table to new HashEntry array. + * Call only while holding lock or in constructor. + */ + void setTable(HashEntry[] newTable) { + threshold = (int)(newTable.length * loadFactor); + table = newTable; + } + + /** + * Returns properly casted first entry of bin for given hash. + */ + HashEntry getFirst(int hash) { + HashEntry[] tab = table; + return tab[hash & (tab.length - 1)]; + } + + /** + * Reads value field of an entry under lock. Called if value + * field ever appears to be null. This is possible only if a + * compiler happens to reorder a HashEntry initialization with + * its table assignment, which is legal under memory model + * but is not known to ever occur. + */ + V readValueUnderLock(HashEntry e) { + lock(); + try { + return e.value; + } finally { + unlock(); + } + } + + /* Specialized implementations of map methods */ + + V get(final long key, int hash) { + if (count != 0) { // read-volatile + HashEntry e = getFirst(hash); + while (e != null) { + if (e.hash == hash && key == e.key) { + V v = e.value; + if (v != null) + return v; + return readValueUnderLock(e); // recheck + } + e = e.next; + } + } + return null; + } + + boolean containsKey(final long key, int hash) { + if (count != 0) { // read-volatile + HashEntry e = getFirst(hash); + while (e != null) { + if (e.hash == hash && key == e.key) + return true; + e = e.next; + } + } + return false; + } + + boolean containsValue(Object value) { + if (count != 0) { // read-volatile + HashEntry[] tab = table; + int len = tab.length; + for (int i = 0 ; i < len; i++) { + for (HashEntry e = tab[i]; e != null; e = e.next) { + V v = e.value; + if (v == null) // recheck + v = readValueUnderLock(e); + if (value.equals(v)) + return true; + } + } + } + return false; + } + + boolean replace(long key, int hash, V oldValue, V newValue) { + lock(); + try { + HashEntry e = getFirst(hash); + while (e != null && (e.hash != hash || key!=e.key)) + e = e.next; + + boolean replaced = false; + if (e != null && oldValue.equals(e.value)) { + replaced = true; + e.value = newValue; + } + return replaced; + } finally { + unlock(); + } + } + + V replace(long key, int hash, V newValue) { + lock(); + try { + HashEntry e = getFirst(hash); + while (e != null && (e.hash != hash || key != e.key)) + e = e.next; + + V oldValue = null; + if (e != null) { + oldValue = e.value; + e.value = newValue; + } + return oldValue; + } finally { + unlock(); + } + } + + + V put(long key, int hash, V value, boolean onlyIfAbsent) { + lock(); + try { + int c = count; + if (c++ > threshold) // ensure capacity + rehash(); + HashEntry[] tab = table; + int index = hash & (tab.length - 1); + HashEntry first = tab[index]; + HashEntry e = first; + while (e != null && (e.hash != hash || key!=e.key)) + e = e.next; + + V oldValue; + if (e != null) { + oldValue = e.value; + if (!onlyIfAbsent) + e.value = value; + } + else { + oldValue = null; + ++modCount; + tab[index] = new HashEntry(key, hash, first, value); + count = c; // write-volatile + } + return oldValue; + } finally { + unlock(); + } + } + + void rehash() { + HashEntry[] oldTable = table; + int oldCapacity = oldTable.length; + if (oldCapacity >= MAXIMUM_CAPACITY) + return; + + /* + * Reclassify nodes in each list to new Map. Because we are + * using power-of-two expansion, the elements from each bin + * must either stay at same index, or move with a power of two + * offset. We eliminate unnecessary node creation by catching + * cases where old nodes can be reused because their next + * fields won't change. Statistically, at the default + * threshold, only about one-sixth of them need cloning when + * a table doubles. The nodes they replace will be garbage + * collectable as soon as they are no longer referenced by any + * reader thread that may be in the midst of traversing table + * right now. + */ + + HashEntry[] newTable = HashEntry.newArray(oldCapacity<<1); + threshold = (int)(newTable.length * loadFactor); + int sizeMask = newTable.length - 1; + for (int i = 0; i < oldCapacity ; i++) { + // We need to guarantee that any existing reads of old Map can + // proceed. So we cannot yet null out each bin. + HashEntry e = oldTable[i]; + + if (e != null) { + HashEntry next = e.next; + int idx = e.hash & sizeMask; + + // Single node on list + if (next == null) + newTable[idx] = e; + + else { + // Reuse trailing consecutive sequence at same slot + HashEntry lastRun = e; + int lastIdx = idx; + for (HashEntry last = next; + last != null; + last = last.next) { + int k = last.hash & sizeMask; + if (k != lastIdx) { + lastIdx = k; + lastRun = last; + } + } + newTable[lastIdx] = lastRun; + + // Clone all remaining nodes + for (HashEntry p = e; p != lastRun; p = p.next) { + int k = p.hash & sizeMask; + HashEntry n = newTable[k]; + newTable[k] = new HashEntry(p.key, p.hash, + n, p.value); + } + } + } + } + table = newTable; + } + + /** + * Remove; match on key only if value null, else match both. + */ + V remove(final long key, int hash, Object value) { + lock(); + try { + int c = count - 1; + HashEntry[] tab = table; + int index = hash & (tab.length - 1); + HashEntry first = tab[index]; + HashEntry e = first; + while (e != null && (e.hash != hash || key!=e.key)) + e = e.next; + + V oldValue = null; + if (e != null) { + V v = e.value; + if (value == null || value.equals(v)) { + oldValue = v; + // All entries following removed node can stay + // in list, but all preceding ones need to be + // cloned. + ++modCount; + HashEntry newFirst = e.next; + for (HashEntry p = first; p != e; p = p.next) + newFirst = new HashEntry(p.key, p.hash, + newFirst, p.value); + tab[index] = newFirst; + count = c; // write-volatile + } + } + return oldValue; + } finally { + unlock(); + } + } + + void clear() { + if (count != 0) { + lock(); + try { + HashEntry[] tab = table; + for (int i = 0; i < tab.length ; i++) + tab[i] = null; + ++modCount; + count = 0; // write-volatile + } finally { + unlock(); + } + } + } + } + + + + /* ---------------- Public operations -------------- */ + + /** + * Creates a new, empty map with the specified initial + * capacity, load factor and concurrency level. + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements. + * @param loadFactor the load factor threshold, used to control resizing. + * Resizing may be performed when the average number of elements per + * bin exceeds this threshold. + * @param concurrencyLevel the estimated number of concurrently + * updating threads. The implementation performs internal sizing + * to try to accommodate this many threads. + * @throws IllegalArgumentException if the initial capacity is + * negative or the load factor or concurrencyLevel are + * nonpositive. + */ + public LongConcurrentHashMap(int initialCapacity, + float loadFactor, int concurrencyLevel) { + if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) + throw new IllegalArgumentException(); + + if (concurrencyLevel > MAX_SEGMENTS) + concurrencyLevel = MAX_SEGMENTS; + + // Find power-of-two sizes best matching arguments + int sshift = 0; + int ssize = 1; + while (ssize < concurrencyLevel) { + ++sshift; + ssize <<= 1; + } + segmentShift = 32 - sshift; + segmentMask = ssize - 1; + this.segments = Segment.newArray(ssize); + + if (initialCapacity > MAXIMUM_CAPACITY) + initialCapacity = MAXIMUM_CAPACITY; + int c = initialCapacity / ssize; + if (c * ssize < initialCapacity) + ++c; + int cap = 1; + while (cap < c) + cap <<= 1; + + for (int i = 0; i < this.segments.length; ++i) + this.segments[i] = new Segment(cap, loadFactor); + } + + /** + * Creates a new, empty map with the specified initial capacity, + * and with default load factor (0.75) and concurrencyLevel (16). + * + * @param initialCapacity the initial capacity. The implementation + * performs internal sizing to accommodate this many elements. + * @throws IllegalArgumentException if the initial capacity of + * elements is negative. + */ + public LongConcurrentHashMap(int initialCapacity) { + this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); + } + + /** + * Creates a new, empty map with a default initial capacity (16), + * load factor (0.75) and concurrencyLevel (16). + */ + public LongConcurrentHashMap() { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); + } + + /** + * Returns true if this map contains no key-value mappings. + * + * @return true if this map contains no key-value mappings + */ + public boolean isEmpty() { + final Segment[] segments = this.segments; + /* + * We keep track of per-segment modCounts to avoid ABA + * problems in which an element in one segment was added and + * in another removed during traversal, in which case the + * table was never actually empty at any point. Note the + * similar use of modCounts in the size() and containsValue() + * methods, which are the only other methods also susceptible + * to ABA problems. + */ + int[] mc = new int[segments.length]; + int mcsum = 0; + for (int i = 0; i < segments.length; ++i) { + if (segments[i].count != 0) + return false; + else + mcsum += mc[i] = segments[i].modCount; + } + // If mcsum happens to be zero, then we know we got a snapshot + // before any modifications at all were made. This is + // probably common enough to bother tracking. + if (mcsum != 0) { + for (int i = 0; i < segments.length; ++i) { + if (segments[i].count != 0 || + mc[i] != segments[i].modCount) + return false; + } + } + return true; + } + + /** + * Returns the number of key-value mappings in this map. If the + * map contains more than Integer.MAX_VALUE elements, returns + * Integer.MAX_VALUE. + * + * @return the number of key-value mappings in this map + */ + public int size() { + final Segment[] segments = this.segments; + long sum = 0; + long check = 0; + int[] mc = new int[segments.length]; + // Try a few times to get accurate count. On failure due to + // continuous async changes in table, resort to locking. + for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) { + check = 0; + sum = 0; + int mcsum = 0; + for (int i = 0; i < segments.length; ++i) { + sum += segments[i].count; + mcsum += mc[i] = segments[i].modCount; + } + if (mcsum != 0) { + for (int i = 0; i < segments.length; ++i) { + check += segments[i].count; + if (mc[i] != segments[i].modCount) { + check = -1; // force retry + break; + } + } + } + if (check == sum) + break; + } + if (check != sum) { // Resort to locking all segments + sum = 0; + for (int i = 0; i < segments.length; ++i) + segments[i].lock(); + for (int i = 0; i < segments.length; ++i) + sum += segments[i].count; + for (int i = 0; i < segments.length; ++i) + segments[i].unlock(); + } + if (sum > Integer.MAX_VALUE) + return Integer.MAX_VALUE; + else + return (int)sum; + } + + @Override + public Iterator valuesIterator() { + return new ValueIterator(); + } + + @Override + public LongMapIterator longMapIterator() { + return new MapIterator(); + } + + /** + * Returns the value to which the specified key is mapped, + * or {@code null} if this map contains no mapping for the key. + * + *

More formally, if this map contains a mapping from a key + * {@code k} to a value {@code v} such that {@code key.equals(k)}, + * then this method returns {@code v}; otherwise it returns + * {@code null}. (There can be at most one such mapping.) + * + * @throws NullPointerException if the specified key is null + */ + public V get(long key) { + final int hash = JdbmUtil.longHash(key); + return segmentFor(hash).get(key, hash); + } + + /** + * Tests if the specified object is a key in this table. + * + * @param key possible key + * @return true if and only if the specified object + * is a key in this table, as determined by the + * equals method; false otherwise. + * @throws NullPointerException if the specified key is null + */ + public boolean containsKey(long key) { + final int hash = JdbmUtil.longHash(key); + return segmentFor(hash).containsKey(key, hash); + } + + /** + * Returns true if this map maps one or more keys to the + * specified value. Note: This method requires a full internal + * traversal of the hash table, and so is much slower than + * method containsKey. + * + * @param value value whose presence in this map is to be tested + * @return true if this map maps one or more keys to the + * specified value + * @throws NullPointerException if the specified value is null + */ + public boolean containsValue(Object value) { + if (value == null) + throw new NullPointerException(); + + // See explanation of modCount use above + + final Segment[] segments = this.segments; + int[] mc = new int[segments.length]; + + // Try a few times without locking + for (int k = 0; k < RETRIES_BEFORE_LOCK; ++k) { + int sum = 0; + int mcsum = 0; + for (int i = 0; i < segments.length; ++i) { + int c = segments[i].count; + mcsum += mc[i] = segments[i].modCount; + if (segments[i].containsValue(value)) + return true; + } + boolean cleanSweep = true; + if (mcsum != 0) { + for (int i = 0; i < segments.length; ++i) { + int c = segments[i].count; + if (mc[i] != segments[i].modCount) { + cleanSweep = false; + break; + } + } + } + if (cleanSweep) + return false; + } + // Resort to locking all segments + for (int i = 0; i < segments.length; ++i) + segments[i].lock(); + boolean found = false; + try { + for (int i = 0; i < segments.length; ++i) { + if (segments[i].containsValue(value)) { + found = true; + break; + } + } + } finally { + for (int i = 0; i < segments.length; ++i) + segments[i].unlock(); + } + return found; + } + + /** + * Legacy method testing if some key maps into the specified value + * in this table. This method is identical in functionality to + * {@link #containsValue}, and exists solely to ensure + * full compatibility with class {@link java.util.Hashtable}, + * which supported this method prior to introduction of the + * Java Collections framework. + + * @param value a value to search for + * @return true if and only if some key maps to the + * value argument in this table as + * determined by the equals method; + * false otherwise + * @throws NullPointerException if the specified value is null + */ + public boolean contains(Object value) { + return containsValue(value); + } + + /** + * Maps the specified key to the specified value in this table. + * Neither the key nor the value can be null. + * + *

The value can be retrieved by calling the get method + * with a key that is equal to the original key. + * + * @param key key with which the specified value is to be associated + * @param value value to be associated with the specified key + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key or value is null + */ + public V put(long key, V value) { + if (value == null) + throw new NullPointerException(); + final int hash = JdbmUtil.longHash(key); + return segmentFor(hash).put(key, hash, value, false); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or null if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + public V putIfAbsent(long key, V value) { + if (value == null) + throw new NullPointerException(); + final int hash = JdbmUtil.longHash(key); + return segmentFor(hash).put(key, hash, value, true); + } + + + /** + * Removes the key (and its corresponding value) from this map. + * This method does nothing if the key is not in the map. + * + * @param key the key that needs to be removed + * @return the previous value associated with key, or + * null if there was no mapping for key + * @throws NullPointerException if the specified key is null + */ + public V remove(long key) { + final int hash = JdbmUtil.longHash(key); + return segmentFor(hash).remove(key, hash, null); + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if the specified key is null + */ + public boolean remove(long key, Object value) { + final int hash = JdbmUtil.longHash(key); + if (value == null) + return false; + return segmentFor(hash).remove(key, hash, value) != null; + } + + /** + * {@inheritDoc} + * + * @throws NullPointerException if any of the arguments are null + */ + public boolean replace(long key, V oldValue, V newValue) { + if (oldValue == null || newValue == null) + throw new NullPointerException(); + final int hash = JdbmUtil.longHash(key); + return segmentFor(hash).replace(key, hash, oldValue, newValue); + } + + /** + * {@inheritDoc} + * + * @return the previous value associated with the specified key, + * or null if there was no mapping for the key + * @throws NullPointerException if the specified key or value is null + */ + public V replace(long key, V value) { + if (value == null) + throw new NullPointerException(); + final int hash = JdbmUtil.longHash(key); + return segmentFor(hash).replace(key, hash, value); + } + + /** + * Removes all of the mappings from this map. + */ + public void clear() { + for (int i = 0; i < segments.length; ++i) + segments[i].clear(); + } + + + + + + /* ---------------- Iterator Support -------------- */ + + abstract class HashIterator { + int nextSegmentIndex; + int nextTableIndex; + HashEntry[] currentTable; + HashEntry< V> nextEntry; + HashEntry< V> lastReturned; + + HashIterator() { + nextSegmentIndex = segments.length - 1; + nextTableIndex = -1; + advance(); + } + + + final void advance() { + if (nextEntry != null && (nextEntry = nextEntry.next) != null) + return; + + while (nextTableIndex >= 0) { + if ( (nextEntry = currentTable[nextTableIndex--]) != null) + return; + } + + while (nextSegmentIndex >= 0) { + Segment seg = segments[nextSegmentIndex--]; + if (seg.count != 0) { + currentTable = seg.table; + for (int j = currentTable.length - 1; j >= 0; --j) { + if ( (nextEntry = currentTable[j]) != null) { + nextTableIndex = j - 1; + return; + } + } + } + } + } + + public boolean hasNext() { return nextEntry != null; } + + HashEntry nextEntry() { + if (nextEntry == null) + throw new NoSuchElementException(); + lastReturned = nextEntry; + advance(); + return lastReturned; + } + + public void remove() { + if (lastReturned == null) + throw new IllegalStateException(); + LongConcurrentHashMap.this.remove(lastReturned.key); + lastReturned = null; + } + } + + final class KeyIterator + extends HashIterator + implements Iterator + { + public Long next() { return super.nextEntry().key; } + } + + final class ValueIterator + extends HashIterator + implements Iterator + { + public V next() { return super.nextEntry().value; } + } + + + final class MapIterator extends HashIterator implements LongMapIterator{ + + private long key; + private V value; + + @Override + public boolean moveToNext() { + if(!hasNext()) return false; + HashEntry next = nextEntry(); + key = next.key; + value = next.value; + return true; + } + + @Override + public long key() { + return key; + } + + @Override + public V value() { + return value; + } + } + + + + + +} \ No newline at end of file diff --git a/src/main/java/net/kotek/jdbm/LongHashMap.java b/src/main/java/net/kotek/jdbm/LongHashMap.java new file mode 100644 index 000000000..80a1e241a --- /dev/null +++ b/src/main/java/net/kotek/jdbm/LongHashMap.java @@ -0,0 +1,409 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package net.kotek.jdbm; + +import java.io.Serializable; +import java.util.Arrays; +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * Hash Map which uses primitive long as key. + * Main advantage is new instanceof of Long does not have to be created for each lookup. + *

+ * This code comes from Android, which in turns comes from Apache Harmony. + * This class was modified to use primitive longs and stripped down to consume less space. + *

+ * Author of JDBM modifications: Jan Kotek + */ +class LongHashMap implements Serializable, LongMap { + private static final long serialVersionUID = 362499999763181265L; + + private int elementCount; + + private Entry[] elementData; + + private final float loadFactor; + + private int threshold; + + private int defaultSize = 16; + + private transient Entry reuseAfterDelete = null; + + static final class Entry implements Serializable{ + private static final long serialVersionUID = 362445231113181265L; + + Entry next; + + V value; + + long key; + + Entry(long theKey) { + this.key = theKey; + this.value = null; + } + + + } + + + static class HashMapIterator implements Iterator { + private int position = 0; + + + boolean canRemove = false; + + Entry entry; + + Entry lastEntry; + + final LongHashMap associatedMap; + + HashMapIterator(LongHashMap hm) { + associatedMap = hm; + } + + public boolean hasNext() { + if (entry != null) { + return true; + } + + Entry[] elementData = associatedMap.elementData; + int length = elementData.length; + int newPosition = position; + boolean result = false; + + while (newPosition < length) { + if (elementData[newPosition] == null) { + newPosition++; + } else { + result = true; + break; + } + } + + position = newPosition; + return result; + } + + public V next() { + + if (!hasNext()) { + throw new NoSuchElementException(); + } + + Entry result; + Entry _entry = entry; + if (_entry == null) { + result = lastEntry = associatedMap.elementData[position++]; + entry = lastEntry.next; + } else { + if (lastEntry.next != _entry) { + lastEntry = lastEntry.next; + } + result = _entry; + entry = _entry.next; + } + canRemove = true; + return result.value; + } + + public void remove() { + if (!canRemove) { + throw new IllegalStateException(); + } + + canRemove = false; + + if (lastEntry.next == entry) { + while (associatedMap.elementData[--position] == null) { + // Do nothing + } + associatedMap.elementData[position] = associatedMap.elementData[position].next; + entry = null; + } else { + lastEntry.next = entry; + } + if (lastEntry != null) { + Entry reuse = lastEntry; + lastEntry = null; + reuse.key = Long.MIN_VALUE; + reuse.value = null; + associatedMap.reuseAfterDelete = reuse; + } + + associatedMap.elementCount--; + } + } + + + @SuppressWarnings("unchecked") + private Entry[] newElementArray(int s) { + return new Entry[s]; + } + + /** + * Constructs a new empty {@code HashMap} instance. + * + * @since Android 1.0 + */ + public LongHashMap() { + this(16); + } + + /** + * Constructs a new {@code HashMap} instance with the specified capacity. + * + * @param capacity the initial capacity of this hash map. + * @throws IllegalArgumentException when the capacity is less than zero. + * @since Android 1.0 + */ + public LongHashMap(int capacity) { + defaultSize = capacity; + if (capacity >= 0) { + elementCount = 0; + elementData = newElementArray(capacity == 0 ? 1 : capacity); + loadFactor = 0.75f; // Default load factor of 0.75 + computeMaxSize(); + } else { + throw new IllegalArgumentException(); + } + } + + + // BEGIN android-changed + + + + @Override + public void clear() { + if (elementCount > 0) { + elementCount = 0; + } + if(elementData.length>1024 && elementData.length>defaultSize) + elementData = new Entry[defaultSize]; + else + Arrays.fill(elementData, null); + computeMaxSize(); + } + // END android-changed + + /** + * Returns a shallow copy of this map. + * + * @return a shallow copy of this map. + * @since Android 1.0 + */ + + + private void computeMaxSize() { + threshold = (int) (elementData.length * loadFactor); + } + + + + + @Override + public V get(final long key) { + + final int hash = JdbmUtil.longHash(key); + final int index = (hash & 0x7FFFFFFF) % elementData.length; + + //find non null entry + Entry m = elementData[index]; + while (m != null) { + if (key == m.key) + return m.value; + m = m.next; + } + + return null; + + } + + + + @Override + public boolean isEmpty() { + return elementCount == 0; + } + + /** + * @return iterator over keys + */ + +// public Iterator keyIterator(){ +// return new HashMapIterator( +// new MapEntry.Type() { +// public K get(Entry entry) { +// return entry.key; +// } +// }, HashMap.this); +// +// } + + + + + @Override + public V put(final long key, final V value) { + + int hash = JdbmUtil.longHash(key); + int index = (hash & 0x7FFFFFFF) % elementData.length; + + //find non null entry + Entry entry = elementData[index]; + while (entry != null && key != entry.key) { + entry = entry.next; + } + + if (entry == null) { + if (++elementCount > threshold) { + rehash(); + index = (hash & 0x7FFFFFFF) % elementData.length; + } + entry = createHashedEntry(key, index); + } + + + V result = entry.value; + entry.value = value; + return result; + } + + + Entry createHashedEntry(final long key, final int index) { + Entry entry = reuseAfterDelete; + if (entry == null) { + entry = new Entry(key); + } else { + reuseAfterDelete = null; + entry.key = key; + entry.value = null; + } + + entry.next = elementData[index]; + elementData[index] = entry; + return entry; + } + + + void rehash(final int capacity) { + int length = (capacity == 0 ? 1 : capacity << 1); + + Entry[] newData = newElementArray(length); + for (int i = 0; i < elementData.length; i++) { + Entry entry = elementData[i]; + while (entry != null) { + int index = ((int) JdbmUtil.longHash(entry.key) & 0x7FFFFFFF) % length; + Entry next = entry.next; + entry.next = newData[index]; + newData[index] = entry; + entry = next; + } + } + elementData = newData; + computeMaxSize(); + } + + void rehash() { + rehash(elementData.length); + } + + /** + * Removes the mapping with the specified key from this map. + * + * @param key the key of the mapping to remove. + * @return the value of the removed mapping or {@code null} if no mapping + * for the specified key was found. + * @since Android 1.0 + */ + + @Override + public V remove(final long key) { + Entry entry = removeEntry(key); + if (entry == null) + return null; + V ret = entry.value; + entry.value = null; + entry.key = Long.MIN_VALUE; + reuseAfterDelete = entry; + + return ret; + } + + Entry removeEntry(final long key) { + Entry last = null; + + final int hash = JdbmUtil.longHash(key); + final int index = (hash & 0x7FFFFFFF) % elementData.length; + Entry entry = elementData[index]; + + while (true) { + if (entry == null) { + return null; + } + + if (key == entry.key) { + if (last == null) { + elementData[index] = entry.next; + } else { + last.next = entry.next; + } + elementCount--; + return entry; + } + + last = entry; + entry = entry.next; + } + } + + /** + * Returns the number of elements in this map. + * + * @return the number of elements in this map. + * @since Android 1.0 + */ + + @Override + public int size() { + return elementCount; + } + + /** + * @returns iterator over values in map + */ + @Override + public Iterator valuesIterator() { + return new HashMapIterator(this); + + } + + @Override + public LongMapIterator longMapIterator() { + return null; //TODO implement + } + + +} + + + diff --git a/src/main/java/net/kotek/jdbm/LongMap.java b/src/main/java/net/kotek/jdbm/LongMap.java new file mode 100644 index 000000000..aa5fdb573 --- /dev/null +++ b/src/main/java/net/kotek/jdbm/LongMap.java @@ -0,0 +1,76 @@ +package net.kotek.jdbm; + +import java.util.Iterator; + +/** + * @author Jan Kotek + */ +interface LongMap { + + /** + * Removes all mappings from this hash map, leaving it empty. + * + * @see #isEmpty + * @see #size + */ + void clear(); + + /** + * Returns the value of the mapping with the specified key. + * + * @param key the key. + * @return the value of the mapping with the specified key, or {@code null} + * if no mapping for the specified key is found. + */ + V get(long key); + + /** + * Returns whether this map is empty. + * + * @return {@code true} if this map has no elements, {@code false} + * otherwise. + * @see #size() + */ + boolean isEmpty(); + + /** + * Maps the specified key to the specified value. + * + * @param key the key. + * @param value the value. + * @return the value of any previous mapping with the specified key or + * {@code null} if there was no such mapping. + */ + V put(long key, V value); + + + /** + * Removes the mapping from this map + * + * @param key to remove + * @return value contained under this key, or null if value did not exist + */ + V remove(long key); + + /** + * Returns the number of elements in this map. + * + * @return the number of elements in this map. + */ + int size(); + + + /** + * @returns iterator over values in map + */ + Iterator valuesIterator(); + + LongMapIterator longMapIterator(); + + + public interface LongMapIterator{ + boolean moveToNext(); + long key(); + V value(); + } +} diff --git a/src/main/java/net/kotek/jdbm/MemoryLowWarningSystem.java b/src/main/java/net/kotek/jdbm/MemoryLowWarningSystem.java new file mode 100644 index 000000000..913fe7f43 --- /dev/null +++ b/src/main/java/net/kotek/jdbm/MemoryLowWarningSystem.java @@ -0,0 +1,103 @@ +package net.kotek.jdbm; + +import javax.management.*; +import java.lang.management.*; +import java.util.*; +import java.util.concurrent.CopyOnWriteArrayList; + +/** + * This memory warning system will call the listener when we + * exceed the percentage of available memory specified. There + * should only be one instance of this object created, since the + * usage threshold can only be set to one number. + *

+ * taken from + * http://www.javaspecialists.eu/archive/Issue092.html + * @author Dr. Heinz M. Kabutz + * Updated for JDBM by Jan Kotek + */ +public class MemoryLowWarningSystem { + private static final Collection listeners = + new CopyOnWriteArrayList(); + +// public interface Listener { +// public void memoryUsageLow(long usedMemory, long maxMemory); +// } + + public static final NotificationListener LISTENER = new NotificationListener() { + public void handleNotification(Notification n, Object hb) { + if (n.getType().equals( + MemoryNotificationInfo.MEMORY_THRESHOLD_EXCEEDED)) { +// long maxMemory = tenuredGenPool.getUsage().getMax(); +// long usedMemory = tenuredGenPool.getUsage().getUsed(); + for (Runnable listener : listeners) { + listener.run(); + } + } + } + }; + + + public static synchronized void addListener(Runnable listener) { + listeners.add(listener); + if(listeners.size()==1){ + MemoryMXBean mbean = ManagementFactory.getMemoryMXBean(); + NotificationEmitter emitter = (NotificationEmitter) mbean; + emitter.addNotificationListener(LISTENER, null, null); + } + + } + + public static synchronized void removeListener(Runnable listener) { + listeners.remove(listener); + if(listeners.isEmpty()){ + //unregister to save some memory + MemoryMXBean mbean = ManagementFactory.getMemoryMXBean(); + NotificationEmitter emitter = (NotificationEmitter) mbean; + try { + emitter.removeNotificationListener(LISTENER); + } catch (ListenerNotFoundException e) { + + } + } + + } + + private static final MemoryPoolMXBean tenuredGenPool = + findTenuredGenPool(); + + private static void setPercentageUsageThreshold(double percentage) { + if (percentage <= 0.0 || percentage > 1.0) { + throw new IllegalArgumentException("Percentage not in range"); + } + long maxMemory = tenuredGenPool.getUsage().getMax(); + long warningThreshold = (long) (maxMemory * percentage); + tenuredGenPool.setUsageThreshold(warningThreshold); + } + + /** + * Tenured Space Pool can be determined by it being of type + * HEAP and by it being possible to set the usage threshold. + */ + private static MemoryPoolMXBean findTenuredGenPool() { + for (MemoryPoolMXBean pool : + ManagementFactory.getMemoryPoolMXBeans()) { + // I don't know whether this approach is better, or whether + // we should rather check for the pool name "Tenured Gen"? + if (pool.getType() == MemoryType.HEAP && + pool.isUsageThresholdSupported()) { + return pool; + } + } + throw new AssertionError("Could not find tenured space"); + } + + + static{ + setPercentageUsageThreshold(0.75); + } + +} + + + diff --git a/src/main/java/net/kotek/jdbm/RecordManager.java b/src/main/java/net/kotek/jdbm/RecordManager.java new file mode 100644 index 000000000..8eb1ba39f --- /dev/null +++ b/src/main/java/net/kotek/jdbm/RecordManager.java @@ -0,0 +1,18 @@ +package net.kotek.jdbm; + +/** + * @author Jan Kotek + */ +public interface RecordManager { + + long recordPut(A value, Serializer serializer); + + A recordGet(long recid, Serializer serializer); + + void recordUpdate(long recid, A value, Serializer serializer); + + void recordDelete(long recid); + + void close(); + +} diff --git a/src/main/java/net/kotek/jdbm/RecordStore.java b/src/main/java/net/kotek/jdbm/RecordStore.java new file mode 100644 index 000000000..4a9de5318 --- /dev/null +++ b/src/main/java/net/kotek/jdbm/RecordStore.java @@ -0,0 +1,648 @@ +package net.kotek.jdbm; + + +import java.io.File; +import java.io.IOError; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; +import java.util.Arrays; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +public class RecordStore implements RecordManager { + + private final File dataFile; + private final File indexFile; + + private FileChannel dataFileChannel; + private FileChannel indexFileChannel; + + protected ByteBuffer[] dataBufs = new ByteBuffer[8]; + protected ByteBuffer[] indexBufs = new ByteBuffer[8]; + + static final int BUF_SIZE = 1<<30; + static final int BUF_SIZE_RECID = BUF_SIZE/8; + + static final int BUF_GROWTH = 1<<23; + + static final long PHYS_OFFSET_MASK = 0x0000FFFFFFFFFFFFL; + + + + + final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); + + /** File header. First 4 bytes are 'JDBM', last two bytes are store format version */ + static final long HEADER = (long)'J' <<(8*7) + (long)'D' <<(8*6) + (long)'B' <<(8*5) + (long)'M' <<(8*4) + CC.STORE_FORMAT_VERSION; + + + static final int RECID_CURRENT_PHYS_FILE_SIZE = 1; + static final int RECID_CURRENT_INDEX_FILE_SIZE = 2; + + /** offset in index file which points to FREEINDEX list (free slots in index file) */ + static final int RECID_FREE_INDEX_SLOTS = 3; + + //TODO slots 4 to 18 are currently unused + /** + * This recid is reserved for user usage. You may put whatever you want here + * It is only used by JDBM during unit tests, not at production + * */ + static final int RECID_USER_WHOTEVER =19; + + static final int RECID_FREE_PHYS_RECORDS_START = 20; + + static final int NUMBER_OF_PHYS_FREE_SLOT =1000 + 1535; + + /** minimal number of longs to grow index file by, prevents to often buffer remapping*/ + static final int MINIMAL_INDEX_FILE_GROW = 1024; + /** when index file overflows it is grown by NEWSIZE = SIZE + SIZE/N */ + static final int INDEX_FILE_GROW_FACTOR= 10; + + static final int MAX_RECORD_SIZE = 65535; + + + + + /** must be smaller then 127 */ + static final byte LONG_STACK_NUM_OF_RECORDS_PER_PAGE = 100; + + static final int LONG_STACK_PAGE_SIZE = 8 + LONG_STACK_NUM_OF_RECORDS_PER_PAGE * 8; + + /** offset in index file from which normal physid starts */ + static final int INDEX_OFFSET_START = RECID_FREE_PHYS_RECORDS_START +NUMBER_OF_PHYS_FREE_SLOT; + + static{ + if(CC.ASSERT && BUF_SIZE%BUF_GROWTH!=0) throw new InternalError(); + if(CC.ASSERT && BUF_GROWTH long recordPut(A value, Serializer serializer) { + try{ + DataOutput2 out = new DataOutput2(); + serializer.serialize(out,value); + if(CC.ASSERT && out.pos>1<<16) throw new InternalError("Record bigger then 64KB"); + + try{ + writeLock_lock(); + //update index file + long recid = freeRecidTake(); + + //get physical record + // first 16 bites is record size, remaining 48 bytes is record offset in phys file + final long indexValue = out.pos!=0? + freePhysRecTake(out.pos): + 0L; + + indexValPut(recid, indexValue); + + final long dataPos = indexValue & PHYS_OFFSET_MASK; + + final ByteBuffer dataBuf = dataBufs[((int) (dataPos / BUF_SIZE))]; + + //set data cursor to desired position + dataBuf.position((int) (dataPos%BUF_SIZE)); + //write data + dataBuf.put(out.buf,0,out.pos); + + return recid; + }finally { + writeLock_unlock(); + } + }catch(IOException e){ + throw new IOError(e); + } + } + + protected long freeRecidTake() throws IOException { + writeLock_checkLocked(); + long recid = longStackTake(RECID_FREE_INDEX_SLOTS); + if(recid == 0){ + //could not reuse recid, so create new one + final long indexSize = indexValGet(RECID_CURRENT_INDEX_FILE_SIZE); + recid = indexSize/8; + if(CC.ASSERT && indexSize%8!=0) throw new InternalError(); + + indexValPut(RECID_CURRENT_INDEX_FILE_SIZE, indexSize+8); + + //grow buffer if necessary + final int indexSlot = (int) (indexSize/BUF_SIZE); + ByteBuffer indexBuf = + indexSlot==indexBufs.length? + null: + indexBufs[indexSlot]; + if(indexBuf == null){ + //nothing was yet allocated at this position, so create new ByteBuffer + if(CC.ASSERT && indexSize%BUF_SIZE!=0) throw new InternalError(); + indexBuf = indexFileChannel.map(FileChannel.MapMode.READ_WRITE, indexSize, BUF_GROWTH); + //make sure array is big enought for new item + if(indexSlot == indexBufs.length){ + indexBufs = Arrays.copyOf(indexBufs, indexBufs.length * 2); + } + + indexBufs[indexSlot] = indexBuf; + }else if(indexSize%BUF_SIZE>=indexBuf.capacity()){ + //grow buffer + indexBuf = indexFileChannel.map( + FileChannel.MapMode.READ_WRITE, + (indexSize/BUF_SIZE)*BUF_SIZE, + indexBuf.capacity() + BUF_GROWTH); + if(CC.ASSERT && indexBuf.capacity()>BUF_SIZE) throw new InternalError(); +// //force old buffer to be written +// if(indexBuf instanceof MappedByteBuffer){ +// ((MappedByteBuffer)indexBuf).force(); +// } + indexBufs[indexSlot] = indexBuf; + } + + } + return recid; + } + + + protected void freeRecidPut(long recid) { + longStackPut(RECID_FREE_INDEX_SLOTS, recid); + } + + + @Override + public A recordGet(long recid, Serializer serializer) { + try{ + try{ + readLock_lock(); + + final long indexValue = indexValGet(recid) ; + final long dataPos = indexValue & PHYS_OFFSET_MASK; + final int dataSize = (int) (indexValue>>>48); + if(dataPos == 0) return null; + + final ByteBuffer dataBuf = dataBufs[((int) (dataPos / BUF_SIZE))]; + + DataInput2 in = new DataInput2(dataBuf, (int) (dataPos%BUF_SIZE)); + final A value = serializer.deserialize(in,dataSize); + + if(CC.ASSERT && in.pos != dataPos%BUF_SIZE + dataSize) + throw new InternalError("Data were not fully read, recid:"+recid+", serializer:"+serializer); + + return value; + }finally{ + readLock_unlock(); + } + + + }catch(IOException e){ + throw new IOError(e); + } + } + + + @Override + public void recordUpdate(long recid, A value, Serializer serializer){ + try{ + DataOutput2 out = new DataOutput2(); + serializer.serialize(out,value); + + //TODO special handling for zero size records + if(CC.ASSERT && out.pos>1<<16) throw new InternalError("Record bigger then 64KB"); + try{ + writeLock_lock(); + + //check if size has changed + final long oldIndexVal = indexValGet(recid); + if(oldIndexVal >>>48 == out.pos ){ + //size is the same, so just write new data + final long dataPos = oldIndexVal&PHYS_OFFSET_MASK; + final ByteBuffer dataBuf = dataBufs[((int) (dataPos / BUF_SIZE))]; + dataBuf.position((int) (dataPos%BUF_SIZE)); + dataBuf.put(out.buf,0,out.pos); + }else{ + //size has changed, so write into new location + final long newIndexValue = freePhysRecTake(out.pos); + final long dataPos = newIndexValue&PHYS_OFFSET_MASK; + final ByteBuffer dataBuf = dataBufs[((int) (dataPos / BUF_SIZE))]; + dataBuf.position((int) (dataPos%BUF_SIZE)); + dataBuf.put(out.buf,0,out.pos); + //update index file with new location + indexValPut(recid,newIndexValue); + + //and set old phys record as free + if(oldIndexVal!=0) + freePhysRecPut(oldIndexVal); + } + }finally { + writeLock_unlock(); + } + }catch(IOException e){ + throw new IOError(e); + } + + } + + @Override + public void recordDelete(long recid){ + try{ + writeLock_lock(); + final long oldIndexVal = indexValGet(recid); + indexValPut(recid, 0L); + freeRecidPut(recid); + if(oldIndexVal!=0) + freePhysRecPut(oldIndexVal); + }finally { + writeLock_unlock(); + } + } + + + + @Override + public void close() { + try{ + writeLock_lock(); +// for(ByteBuffer b : dataBufs){ +// if(b instanceof MappedByteBuffer){ +// ((MappedByteBuffer)b).force(); +// } +// } +// for(ByteBuffer b : indexBufs){ +// if(b instanceof MappedByteBuffer){ +// ((MappedByteBuffer)b).force(); +// } +// } + + dataBufs = null; + indexBufs = null; + +// dataFileChannel.force(true); + dataFileChannel.close(); + dataFileChannel = null; +// indexFileChannel.force(true); + indexFileChannel.close(); + indexFileChannel = null; + + }catch(IOException e){ + throw new IOError(e); + }finally { + writeLock_unlock(); + } + } + + + long longStackTake(final long listRecid) { + final long listPhysid = indexValGet(listRecid) &PHYS_OFFSET_MASK; + if(listPhysid == 0) + return 0; //there is no such list, so just return 0 + + writeLock_checkLocked(); + + final int bufOffset = (int) (listPhysid%BUF_SIZE); + final ByteBuffer dataBuf = dataBufs[((int) (listPhysid / BUF_SIZE))]; + + final byte numberOfRecordsInPage = dataBuf.get(bufOffset); + final long ret = dataBuf.getLong (bufOffset+numberOfRecordsInPage*8); + + //was it only record at that page? + if(numberOfRecordsInPage == 1){ + //yes, delete this page + final long previousListPhysid =dataBuf.getLong(bufOffset) &PHYS_OFFSET_MASK; + if(previousListPhysid !=0){ + //update index so it points to previous page + indexValPut(listRecid, previousListPhysid | (((long) LONG_STACK_PAGE_SIZE) << 48)); + }else{ + //zero out index + indexValPut(listRecid, 0L); + } + //put space used by this page into free list + freePhysRecPut(listPhysid | (((long)LONG_STACK_PAGE_SIZE)<<48)); + }else{ + //no, it was not last record at this page, so just decrement the counter + dataBuf.put(bufOffset, (byte)(numberOfRecordsInPage-1)); + } + return ret; + + } + + void longStackPut(final long listRecid, final long offset) { + writeLock_checkLocked(); + + //index position was cleared, put into free index list + final long listPhysid2 = indexValGet(listRecid) &PHYS_OFFSET_MASK; + + if(listPhysid2 == 0){ //empty list? + //yes empty, create new page and fill it with values + final long listPhysid = freePhysRecTake(LONG_STACK_PAGE_SIZE) &PHYS_OFFSET_MASK; + if(CC.ASSERT && listPhysid == 0) throw new InternalError(); + ByteBuffer dataBuf = dataBufs[((int) (listPhysid / BUF_SIZE))]; + //set previous Free Index List page to zero as this is first page + dataBuf.putLong((int) (listPhysid%BUF_SIZE ), 0L); + //set number of free records in this page to 1 + dataBuf.put((int)(listPhysid%BUF_SIZE),(byte)1); + + //set record + dataBuf.putLong((int) (listPhysid%BUF_SIZE + 8), offset); + //and update index file with new page location + indexValPut(listRecid, (((long) LONG_STACK_PAGE_SIZE) << 48) | listPhysid); + }else{ + + final ByteBuffer dataBuf2 = dataBufs[((int) (listPhysid2 / BUF_SIZE))]; + final byte numberOfRecordsInPage = dataBuf2.get((int) (listPhysid2%BUF_SIZE)); + if(numberOfRecordsInPage == LONG_STACK_NUM_OF_RECORDS_PER_PAGE){ //is current page full? + //yes it is full, so we need to allocate new page and write our number there + + final long listPhysid = freePhysRecTake(LONG_STACK_PAGE_SIZE) &PHYS_OFFSET_MASK; + if(CC.ASSERT && listPhysid == 0) throw new InternalError(); + final ByteBuffer dataBuf = dataBufs[((int) (listPhysid / BUF_SIZE))]; + final int buffOffset =(int) (listPhysid%BUF_SIZE); + //set location to previous page + dataBuf.putLong(buffOffset, listPhysid2); + //set number of free records in this page to 1 + dataBuf.put(buffOffset,(byte)1); + //set free record + dataBuf.putLong(buffOffset + 8, offset); + //and update index file with new page location + indexValPut(listRecid, (((long) LONG_STACK_PAGE_SIZE) << 48) | listPhysid); + }else{ + //there is space on page, so just write released recid and increase the counter + dataBuf2.putLong((int) (listPhysid2%BUF_SIZE + 8 + 8 * numberOfRecordsInPage), offset); + dataBuf2.put((int) (listPhysid2%BUF_SIZE), (byte) (numberOfRecordsInPage+1)); + } + } + } + + + final int freePhysRecSize2FreeSlot(final int size){ + if(CC.ASSERT && size>MAX_RECORD_SIZE) throw new IllegalArgumentException("too big record"); + if(CC.ASSERT && size<0) throw new IllegalArgumentException("negative size"); + + if(size<1535) + return size-1; + else if(size == MAX_RECORD_SIZE) + return NUMBER_OF_PHYS_FREE_SLOT-1; + else + return 1535 -1 + (size-1535)/64; + } + + + final long freePhysRecTake(final int requiredSize){ + writeLock_checkLocked(); + + + int slot = freePhysRecSize2FreeSlot(requiredSize); + //check if this slot can contain smaller records, + if(requiredSize>1 && slot==freePhysRecSize2FreeSlot(requiredSize-1)) + slot ++; //yes, in this case we have to start at next slot with bigger record and divide it + + while(slot< NUMBER_OF_PHYS_FREE_SLOT){ + + final long v = longStackTake(RECID_FREE_PHYS_RECORDS_START +slot); + if(v!=0){ + //we found it, check if we need to split record + final int foundRecSize = (int) (v>>>48); + if(foundRecSize!=requiredSize){ + + //yes we need split + final long newIndexValue = + ((long)(foundRecSize - requiredSize)<<48) | //encode size into new free record + (v & PHYS_OFFSET_MASK) + requiredSize; //and encode new free record phys offset + freePhysRecPut(newIndexValue); + } + + //return offset combined with required size + return (v & PHYS_OFFSET_MASK) | + (((long)requiredSize)<<48); + }else{ + slot++; + } + } + + try{ + + //No free records found, so lets increase the file size. + //We need to take case of growing ByteBuffers. + // Also max size of ByteBuffer is 2GB, so we need to use multiple ones + + final long physFileSize = indexValGet(RECID_CURRENT_PHYS_FILE_SIZE); + if(CC.ASSERT && physFileSize <=0) throw new InternalError(); + + if(physFileSize%BUF_SIZE+requiredSizedataBuf.capacity()){ + //TODO optimize remap to grow slower + int newCapacity = dataBuf.capacity(); + while(physFileSize%BUF_SIZE+requiredSize>newCapacity){ + newCapacity+=BUF_GROWTH; + } + + newCapacity = Math.min(BUF_SIZE, newCapacity); + + final ByteBuffer dataBuf2 = + dataFileChannel.map(FileChannel.MapMode.READ_WRITE, + ((physFileSize/BUF_SIZE)*BUF_SIZE), + newCapacity); + dataBufs[((int) (physFileSize / BUF_SIZE))] = dataBuf2; + +// //force old buffer to be written +// if(dataBuf instanceof MappedByteBuffer){ +// ((MappedByteBuffer)dataBuf).force(); +// } + + } + + //and return this + return (((long)requiredSize)<<48) | physFileSize; + }else{ + //new size is overlapping 2GB ByteBuffer, so map second ByteBuffer + final ByteBuffer dataBuf1 = dataBufs[((int) (physFileSize / BUF_SIZE))]; + if(CC.ASSERT && dataBuf1.capacity()!=BUF_SIZE) throw new InternalError(); + + //required size does not fit into remaining chunk at dataBuf1, so lets create an free records + final long freeSizeToCreate = BUF_SIZE - physFileSize%BUF_SIZE; + if(CC.ASSERT && freeSizeToCreate == 0) throw new InternalError(); + + final long nextBufferStartOffset = physFileSize + freeSizeToCreate; + if(CC.ASSERT && nextBufferStartOffset%BUF_SIZE!=0) throw new InternalError(); + if(CC.ASSERT && dataBufs[((int) (nextBufferStartOffset / BUF_SIZE))]!=null) throw new InternalError(); + + //allocate next ByteBuffer in row + final ByteBuffer dataBuf2 = dataFileChannel.map( + FileChannel.MapMode.READ_WRITE, + nextBufferStartOffset, + BUF_GROWTH + ); + + //grow array if necessary + final int bufSlot = (int) (nextBufferStartOffset / BUF_SIZE); + if(dataBufs.length == bufSlot){ + dataBufs = Arrays.copyOf(dataBufs,dataBufs.length*2); + } + dataBufs[bufSlot] = dataBuf2; + + + //increase the disk size + indexValPut(RECID_CURRENT_PHYS_FILE_SIZE, physFileSize + freeSizeToCreate + requiredSize); + + //previous buffer was not fully filled, so mark it as free record + freePhysRecPut(freeSizeToCreate<<48|physFileSize); + + //and finally return position at beginning of new buffer + return (((long)requiredSize)<<48) | nextBufferStartOffset; + } + }catch(IOException e){ + throw new IOError(e); + } + + } + + + final void freePhysRecPut(final long indexValue){ + if(CC.ASSERT && (indexValue &PHYS_OFFSET_MASK)==0) throw new InternalError("zero indexValue: "); + final int size = (int) (indexValue>>>48); + + final long listRecid = RECID_FREE_PHYS_RECORDS_START + freePhysRecSize2FreeSlot(size); + longStackPut(listRecid, indexValue); + } + + final long indexValGet(final long recid) { + return indexBufs[((int) (recid / BUF_SIZE_RECID))].getLong( (int) (recid%BUF_SIZE_RECID) * 8); + } + + final void indexValPut(final long recid, final long val) { + indexBufs[((int) (recid / BUF_SIZE_RECID))].putLong((int) ((recid % BUF_SIZE_RECID) * 8), val); + } + + + protected void writeLock_lock() { + lock.writeLock().lock(); + } + + protected void writeLock_unlock() { + lock.writeLock().unlock(); + } + + protected void writeLock_checkLocked() { + if(CC.ASSERT && !lock.writeLock().isHeldByCurrentThread()) throw new IllegalAccessError("no write lock"); + } + + + + protected void readLock_unlock() { + lock.readLock().unlock(); + } + + protected void readLock_lock() { + lock.readLock().lock(); + } + + + protected void forceRecordUpdateOnGivenRecid(final long recid, final byte[] value) { + try{ + writeLock_lock(); + //check file size + final long currentIndexFileSize = indexValGet(RECID_CURRENT_INDEX_FILE_SIZE); + if(recid * 8 >currentIndexFileSize){ + //TODO grow index file with buffers overflow + long newIndexFileSize = recid*8; + indexValPut(RECID_CURRENT_INDEX_FILE_SIZE, newIndexFileSize); + } + //size has changed, so write into new location + final long newIndexValue = freePhysRecTake(value.length); + final long dataPos = newIndexValue&PHYS_OFFSET_MASK; + final ByteBuffer dataBuf = dataBufs[((int) (dataPos / BUF_SIZE))]; + dataBuf.position((int) (dataPos%BUF_SIZE)); + dataBuf.put(value); + + long oldIndexValue = indexValGet(recid); + //update index file with new location + indexValPut(recid,newIndexValue); + + //and set old phys record as free + if(oldIndexValue!=0) + freePhysRecPut(oldIndexValue); + }finally { + writeLock_unlock(); + } + } +} \ No newline at end of file diff --git a/src/main/java/net/kotek/jdbm/RecordStoreAsyncWrite.java b/src/main/java/net/kotek/jdbm/RecordStoreAsyncWrite.java new file mode 100644 index 000000000..88d16b5b6 --- /dev/null +++ b/src/main/java/net/kotek/jdbm/RecordStoreAsyncWrite.java @@ -0,0 +1,260 @@ +package net.kotek.jdbm; + +import java.io.IOError; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Iterator; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.logging.Level; + +/** + * RecordStore which stores all modifications in memory. + * All changes are written into store asynchronously in background thread. + * This store is nearly lock free and provides high concurrent scalability. + *

+ * This store does not provide cache. Changes are stored in memory only for + * queue and are written to store ASAP. + * + * + * @author Jan Kotek + */ +public class RecordStoreAsyncWrite extends RecordStore{ + + + private long allocatedIndexFileSize; + + private final boolean lazySerialization; + + /** indicates deleted record */ + protected static final Object DELETED = new Object(); + + /** stores writes */ + final protected LongConcurrentHashMap writes = new LongConcurrentHashMap(); + + private boolean shutdownSignal = false; + private CountDownLatch shutdownResponse = new CountDownLatch(1); + + final protected Object writerNotify = new Object(); + + protected final Thread writerThread = new Thread("JDBM writter"){ + public void run(){ + writerThreadRun(); + } + }; + + private void writerThreadRun() { + while(true)try{ + while(writes.isEmpty() && newRecids.remainingCapacity()==0){ + if(writes.isEmpty() && shutdownSignal){ + //store closed, shutdown this thread + shutdownResponse.countDown(); + return; + } + + //TODO this just sucks, proper notify() + Thread.sleep(100); + } + + + LongMap.LongMapIterator iter = writes.longMapIterator(); + while(iter.moveToNext()){ + final long recid = iter.key(); + final Object value = iter.value(); + if(value==DELETED){ + RecordStoreAsyncWrite.super.recordDelete(recid); + }else{ + byte[] data = lazySerialization ? ((SerRec)value).serialize() : (byte[]) value; + RecordStoreAsyncWrite.super.forceRecordUpdateOnGivenRecid(recid, data); + } + //Record will be only removed if value was not updated. + //If value was updated during write, equality check will fail, and it will stay there + //We just collect it at next round + writes.remove(recid, value); + } + + int toFetch = newRecids.remainingCapacity(); + try{ + writeLock_lock(); + for(int i=0;i newRecids = new ArrayBlockingQueue(128); + + + public RecordStoreAsyncWrite(String fileName, boolean lazySerialization) { + super(fileName); + this.lazySerialization = lazySerialization; + allocatedIndexFileSize = indexValGet(RECID_CURRENT_INDEX_FILE_SIZE); + + writerThread.setDaemon(true); + writerThread.start(); + + } + + @Override + public void recordUpdate(long recid, A value, Serializer serializer) { + Object v; + if(lazySerialization){ + v = new SerRec(value, serializer); + }else{ + DataOutput2 out = new DataOutput2(); + try { + serializer.serialize(out, value); + } catch (IOException e) { + throw new IOError(e); + } + v = out.copyBytes(); + } + Object previous = writes.put(recid,v); + + if(previous== DELETED){ + throw new IllegalArgumentException("Recid was deleted: "+recid); + } + } + + @Override + public void recordDelete(long recid) { + if(CC.ASSERT&& recid == 0) throw new InternalError(); + writes.put(recid, DELETED); + } + + @Override + public long recordPut(A value, Serializer serializer) { + try{ + Object v; + if(lazySerialization){ + v = new SerRec(value,serializer); + }else{ + DataOutput2 out = new DataOutput2(); + serializer.serialize(out, value); + v= out.copyBytes(); + } + + final long newRecid = newRecids.take(); + writes.put(newRecid, v); + return newRecid; + } catch (IOException e) { + throw new IOError(e); + }catch(InterruptedException e){ + throw new RuntimeException(e); + } + + + } + + @Override + public A recordGet(long recid, Serializer serializer) { + Object d = writes.get(recid); + if(d == DELETED){ + return null; + }else if(d!=null){ + if(lazySerialization) + return (A) ((SerRec)d).value; + try { + byte[] b = (byte[]) d; + return serializer.deserialize(new DataInput2(ByteBuffer.wrap(b),0),b.length); + } catch (IOException e) { + throw new IOError(e); + } + } + + return super.recordGet(recid, serializer); + } + + @Override + public void close() { + shutdownSignal = true; + //wait until writer thread finishes and exits + try { + shutdownResponse.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + //put all remaining unused recids into free list + try{ + writeLock_lock(); + for(long recid:newRecids){ + freeRecidPut(recid); + } + }finally { + writeLock_unlock(); + } + + + super.close(); + } + + private final AtomicInteger writeLocksCounter = CC.ASSERT? new AtomicInteger(0) : null; + + @Override + protected void writeLock_lock() { + if(CC.ASSERT &&writeLocksCounter!=null){ + int c = writeLocksCounter.incrementAndGet(); + if(c!=1) throw new InternalError("more then one writer"); + } + } + + @Override + protected void writeLock_unlock() { + if(CC.ASSERT &&writeLocksCounter!=null){ + int c = writeLocksCounter.decrementAndGet(); + if(c!=0) throw new InternalError("more then one writer"); + } + + } + + @Override + protected void writeLock_checkLocked() { + if(CC.ASSERT &&writeLocksCounter!=null){ + if(writeLocksCounter.get()!=1) + throw new InternalError("more then one writer"); + } + } + + @Override + protected void readLock_unlock() { + //do nothing, background thread and cache takes care of write synchronization + } + + @Override + protected void readLock_lock() { + //do nothing, background thread and cache takes care of write synchronization + } + + + protected static class SerRec { + + final Object value; + final Serializer serializer; + + private SerRec(Object value, Serializer serializer) { + this.value = value; + this.serializer = serializer; + } + + byte[] serialize(){ + DataOutput2 out = new DataOutput2(); + try { + serializer.serialize(out, value); + } catch (IOException e) { + throw new IOError(e); + } + return out.copyBytes(); + } + + } + +} diff --git a/src/main/java/net/kotek/jdbm/RecordStoreCache.java b/src/main/java/net/kotek/jdbm/RecordStoreCache.java new file mode 100644 index 000000000..4ef7126cc --- /dev/null +++ b/src/main/java/net/kotek/jdbm/RecordStoreCache.java @@ -0,0 +1,66 @@ +package net.kotek.jdbm; + + + +/** + * Store which caches created objects using hard reference. + * It auto-clears on low memory to prevent OutOfMemoryException. + * + * @author Jan Kotek + */ +public class RecordStoreCache extends RecordStoreAsyncWrite{ + + protected final LongConcurrentHashMap cache = new LongConcurrentHashMap(); + + protected static final Object NULL = new Object(); + + protected final Runnable lowMemoryListener = new Runnable() { + @Override + public void run() { + cache.clear(); + //TODO clear() may have high overhead, maybe just create new map instance + } + }; + + public RecordStoreCache(String fileName, boolean lazySerialization) { + super(fileName, lazySerialization); + MemoryLowWarningSystem.addListener(lowMemoryListener); + } + + @Override + public void recordUpdate(long recid, A value, Serializer serializer) { + cache.put(recid, value!=null?value:NULL); + super.recordUpdate(recid, value, serializer); + } + + @Override + public void recordDelete(long recid) { + cache.remove(recid); + super.recordDelete(recid); + } + + @Override + public long recordPut(A value, Serializer serializer) { + final long recid = super.recordPut(value, serializer); + cache.put(recid,value!=null?value:NULL); + return recid; + } + + @Override + public A recordGet(long recid, Serializer serializer) { + A v = (A) cache.get(recid); + if(v==NULL) return null; + if(v!=null) return v; + v = super.recordGet(recid, serializer); + cache.put(recid, v!=null?v:NULL); + return v; + } + + @Override + public void close() { + MemoryLowWarningSystem.removeListener(lowMemoryListener); + super.close(); + } + + +} diff --git a/src/main/java/net/kotek/jdbm/SerializationHeader.java b/src/main/java/net/kotek/jdbm/SerializationHeader.java new file mode 100644 index 000000000..81c787733 --- /dev/null +++ b/src/main/java/net/kotek/jdbm/SerializationHeader.java @@ -0,0 +1,130 @@ +package net.kotek.jdbm; + +/** + * Header byte, is used at start of each record to indicate data type + * WARNING !!! values bellow must be unique !!!!! + */ +interface SerializationHeader { + + int NULL = 0; + int POJO = 1; + int BOOLEAN_TRUE = 2; + int BOOLEAN_FALSE = 3; + int INTEGER_MINUS_1 = 4; + int INTEGER_0 = 5; + int INTEGER_1 = 6; + int INTEGER_2 = 7; + int INTEGER_3 = 8; + int INTEGER_4 = 9; + int INTEGER_5 = 10; + int INTEGER_6 = 11; + int INTEGER_7 = 12; + int INTEGER_8 = 13; + int INTEGER_255 = 14; + int INTEGER_PACK_NEG = 15; + int INTEGER_PACK = 16; + int LONG_MINUS_1 = 17; + int LONG_0 = 18; + int LONG_1 = 19; + int LONG_2 = 20; + int LONG_3 = 21; + int LONG_4 = 22; + int LONG_5 = 23; + int LONG_6 = 24; + int LONG_7 = 25; + int LONG_8 = 26; + int LONG_PACK_NEG = 27; + int LONG_PACK = 28; + int LONG_255 = 29; + int LONG_MINUS_MAX = 30; + int SHORT_MINUS_1 = 31; + int SHORT_0 = 32; + int SHORT_1 = 33; + int SHORT_255 = 34; + int SHORT_FULL = 35; + int BYTE_MINUS_1 = 36; + int BYTE_0 = 37; + int BYTE_1 = 38; + int BYTE_FULL = 39; + int CHAR = 40; + int FLOAT_MINUS_1 = 41; + int FLOAT_0 = 42; + int FLOAT_1 = 43; + int FLOAT_255 = 44; + int FLOAT_SHORT = 45; + int FLOAT_FULL = 46; + int DOUBLE_MINUS_1 = 47; + int DOUBLE_0 = 48; + int DOUBLE_1 = 49; + int DOUBLE_255 = 50; + int DOUBLE_SHORT = 51; + int DOUBLE_FULL = 52; + int DOUBLE_ARRAY = 53; + int BIGDECIMAL = 54; + int BIGINTEGER = 55; + int FLOAT_ARRAY = 56; + int INTEGER_MINUS_MAX = 57; + int SHORT_ARRAY = 58; + int BOOLEAN_ARRAY = 59; + + int ARRAY_INT_B_255 = 60; + int ARRAY_INT_B_INT = 61; + int ARRAY_INT_S = 62; + int ARRAY_INT_I = 63; + int ARRAY_INT_PACKED = 64; + + int ARRAY_LONG_B = 65; + int ARRAY_LONG_S = 66; + int ARRAY_LONG_I = 67; + int ARRAY_LONG_L = 68; + int ARRAY_LONG_PACKED = 69; + + int CHAR_ARRAY = 70; + int ARRAY_BYTE_INT = 71; + + int ARRAY_OBJECT = 73; + //special cases for BTree values which stores references + int ARRAY_OBJECT_PACKED_LONG = 74; + int ARRAYLIST_PACKED_LONG = 75; + + int STRING_EMPTY = 101; + int STRING = 103; + + int ARRAYLIST = 105; + + + int TREEMAP = 107; + int NOTUSED_HASHMAP_255 = 108; + int HASHMAP = 109; + + int LINKEDHASHMAP = 111; + + + int TREESET = 113; + + int HASHSET = 115; + + int LINKEDHASHSET = 117; + + int LINKEDLIST = 119; + + + int VECTOR = 121; + int IDENTITYHASHMAP = 122; + int HASHTABLE = 123; + int LOCALE = 124; + int PROPERTIES = 125; + + int CLASS = 126; + int DATE = 127; + + + /** + * used for reference to already serialized object in object graph + */ + int OBJECT_STACK = 166; + + int JAVA_SERIALIZATION = 172; + + +} diff --git a/src/main/java/net/kotek/jdbm/Serializer.java b/src/main/java/net/kotek/jdbm/Serializer.java new file mode 100644 index 000000000..c8bcbd1a7 --- /dev/null +++ b/src/main/java/net/kotek/jdbm/Serializer.java @@ -0,0 +1,143 @@ +package net.kotek.jdbm; + +import javax.swing.plaf.basic.BasicInternalFrameTitlePane; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Arrays; + +/** + * Interface used to provide a serialization mechanism other than a class' normal + * serialization. + * + * @author Alex Boisvert + */ +public interface Serializer { + + + + + /** + * Serialize the content of an object into a byte array. + * + * @param out ObjectOutput to save object into + * @param value Object to serialize + */ + public void serialize(DataOutput out, A value) + throws IOException; + + + /** + * Deserialize the content of an object from a byte array. + * + * @param in to read serialized data from + * @param available how many bytes are available in DataInput for reading + * @return deserialized object + * @throws java.io.IOException + * @throws ClassNotFoundException + */ + public A deserialize(DataInput in, int available) + throws IOException; + + + + + /** + * Serializes strings using UTF8 encoding. + * Used mainly for testing. + * Does not handle null values. + */ + Serializer STRING_SERIALIZER = new Serializer() { + + public void serialize(DataOutput out, String value) throws IOException { + final byte[] bytes = value.getBytes(JdbmUtil.UTF8); + out.write(bytes); + } + + + public String deserialize(DataInput in, int available) throws IOException { + byte[] bytes = new byte[available]; + in.readFully(bytes); + return new String(bytes,JdbmUtil.UTF8); + } + }; + + + + + /** Serializes Long into 8 bytes, used mainly for testing. + * Does not handle null values.*/ + Serializer LONG_SERIALIZER = new Serializer() { + @Override + public void serialize(DataOutput out, Long value) throws IOException { + out.writeLong(value); + } + + @Override + public Long deserialize(DataInput in, int available) throws IOException { + return in.readLong(); + } + }; + + /** Serializes Integer into 4 bytes, used mainly for testing. + * Does not handle null values.*/ + Serializer INTEGER_SERIALIZER = new Serializer() { + @Override + public void serialize(DataOutput out, Integer value) throws IOException { + out.writeInt(value); + } + + @Override + public Integer deserialize(DataInput in, int available) throws IOException { + return in.readInt(); + } + }; + + + Serializer BYTE_ARRAY_SERIALIZER = new Serializer() { + + @Override + public void serialize(DataOutput out, byte[] value) throws IOException { + out.write(value); + } + + @Override + public byte[] deserialize(DataInput in, int available) throws IOException { + byte[] ret = new byte[available]; + in.readFully(ret); + return ret; + } + } ; + + + /** deserialises byte[] into integer hash, usefull for debuging */ + Serializer HASH_DESERIALIZER = new Serializer() { + @Override + public void serialize(DataOutput out, Integer value) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public Integer deserialize(DataInput in, int available) throws IOException { + byte[] b = new byte[available]; + in.readFully(b); + return Arrays.hashCode(b); + } + }; + + /** always writes zero length data, and always deserializes it as null */ + Serializer NULL_SERIALIZER = new Serializer() { + @Override + public void serialize(DataOutput out, Object value) throws IOException { + } + + @Override + public Object deserialize(DataInput in, int available) throws IOException { + return null; + } + }; + + /** basic serializer for most classes in 'java.lang' and 'java.util' packages*/ + Serializer BASIC_SERIALIZER = new SerializerBase(); +} + diff --git a/src/main/java/net/kotek/jdbm/SerializerBase.java b/src/main/java/net/kotek/jdbm/SerializerBase.java new file mode 100644 index 000000000..5ee876c88 --- /dev/null +++ b/src/main/java/net/kotek/jdbm/SerializerBase.java @@ -0,0 +1,1218 @@ +package net.kotek.jdbm; + +import java.io.*; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.*; + +import static net.kotek.jdbm.SerializationHeader.*; + +/** + * Serializer which uses 'header byte' to serialize/deserialize + * most of classes from 'java.lang' and 'java.util' packages. + * + * @author Jan Kotek + */ +public class SerializerBase implements Serializer{ + + /** + * print statistics to STDOUT + */ + static final boolean DEBUG = false; + + /** + * Utility class similar to ArrayList, but with fast identity search. + */ + static class FastArrayList { + + private int size = 0; + private K[] elementData = (K[]) new Object[8]; + + K get(int index) { + if (index >= size) throw new IndexOutOfBoundsException(); + return elementData[index]; + } + + void add(K o) { + if (elementData.length == size) { + //grow array if necessary + elementData = Arrays.copyOf(elementData, elementData.length * 2); + } + + elementData[size] = o; + size++; + } + + int size() { + return size; + } + + + /** + * This method is reason why ArrayList is not used. + * Search an item in list and returns its index. + * It uses identity rather than 'equalsTo' + * One could argue that TreeMap should be used instead, + * but we do not expect large object trees. + * This search is VERY FAST compared to Maps, it does not allocate + * new instances or uses method calls. + * + * @param obj + * @return index of object in list or -1 if not found + */ + int identityIndexOf(Object obj) { + for (int i = 0; i < size; i++) { + if (obj == elementData[i]) + return i; + } + return -1; + } + + } + + + + + @Override + public void serialize(final DataOutput out, final Object obj) throws IOException { + serialize(out, obj, null); + } + + + public void serialize(final DataOutput out, final Object obj, FastArrayList objectStack) throws IOException { + + /**try to find object on stack if it exists*/ + if (objectStack != null) { + int indexInObjectStack = objectStack.identityIndexOf(obj); + if (indexInObjectStack != -1) { + //object was already serialized, just write reference to it and return + out.write(OBJECT_STACK); + JdbmUtil.packInt(out, indexInObjectStack); + return; + } + //add this object to objectStack + objectStack.add(obj); + } + + final Class clazz = obj != null ? obj.getClass() : null; + + /** first try to serialize object without initializing object stack*/ + if (obj == null) { + out.write(NULL); + return; + } else if (clazz == Boolean.class) { + if (((Boolean) obj).booleanValue()) + out.write(BOOLEAN_TRUE); + else + out.write(BOOLEAN_FALSE); + return; + } else if (clazz == Integer.class) { + final int val = (Integer) obj; + writeInteger(out, val); + return; + } else if (clazz == Double.class) { + double v = (Double) obj; + if (v == -1d) + out.write(DOUBLE_MINUS_1); + else if (v == 0d) + out.write(DOUBLE_0); + else if (v == 1d) + out.write(DOUBLE_1); + else if (v >= 0 && v <= 255 && (int) v == v) { + out.write(DOUBLE_255); + out.write((int) v); + } else if (v >= Short.MIN_VALUE && v <= Short.MAX_VALUE && (short) v == v) { + out.write(DOUBLE_SHORT); + out.writeShort((int) v); + } else { + out.write(DOUBLE_FULL); + out.writeDouble(v); + } + return; + } else if (clazz == Float.class) { + float v = (Float) obj; + if (v == -1f) + out.write(FLOAT_MINUS_1); + else if (v == 0f) + out.write(FLOAT_0); + else if (v == 1f) + out.write(FLOAT_1); + else if (v >= 0 && v <= 255 && (int) v == v) { + out.write(FLOAT_255); + out.write((int) v); + } else if (v >= Short.MIN_VALUE && v <= Short.MAX_VALUE && (short) v == v) { + out.write(FLOAT_SHORT); + out.writeShort((int) v); + + } else { + out.write(FLOAT_FULL); + out.writeFloat(v); + } + return; + } else if (clazz == BigInteger.class) { + out.write(BIGINTEGER); + byte[] buf = ((BigInteger) obj).toByteArray(); + serializeByteArrayInt(out, buf); + return; + } else if (clazz == BigDecimal.class) { + out.write(BIGDECIMAL); + BigDecimal d = (BigDecimal) obj; + serializeByteArrayInt(out, d.unscaledValue().toByteArray()); + JdbmUtil.packInt(out, d.scale()); + return; + } else if (clazz == Long.class) { + final long val = (Long) obj; + writeLong(out, val); + return; + } else if (clazz == Short.class) { + short val = (Short) obj; + if (val == -1) + out.write(SHORT_MINUS_1); + else if (val == 0) + out.write(SHORT_0); + else if (val == 1) + out.write(SHORT_1); + else if (val > 0 && val < 255) { + out.write(SHORT_255); + out.write(val); + } else { + out.write(SHORT_FULL); + out.writeShort(val); + } + return; + } else if (clazz == Byte.class) { + byte val = (Byte) obj; + if (val == -1) + out.write(BYTE_MINUS_1); + else if (val == 0) + out.write(BYTE_0); + else if (val == 1) + out.write(BYTE_1); + else { + out.write(SHORT_FULL); + out.writeByte(val); + } + return; + } else if (clazz == Character.class) { + out.write(CHAR); + out.writeChar((Character) obj); + return; + } else if (clazz == String.class) { + String s = (String) obj; + if (s.length() == 0) { + out.write(STRING_EMPTY); + } else { + out.write(STRING); + serializeString(out, s); + } + return; + } else if (obj instanceof Class) { + out.write(CLASS); + out.writeUTF(((Class) obj).getName()); + return; + } else if (obj instanceof int[]) { + writeIntArray(out, (int[]) obj); + return; + } else if (obj instanceof long[]) { + writeLongArray(out, (long[]) obj); + return; + } else if (obj instanceof short[]) { + out.write(SHORT_ARRAY); + short[] a = (short[]) obj; + JdbmUtil.packInt(out,a.length); + for(short s:a) out.writeShort(s); + return; + } else if (obj instanceof boolean[]) { + out.write(BOOLEAN_ARRAY); + boolean[] a = (boolean[]) obj; + JdbmUtil.packInt(out,a.length); + for(boolean s:a) out.writeBoolean(s); //TODO pack 8 booleans to single byte + return; + } else if (obj instanceof double[]) { + out.write(DOUBLE_ARRAY); + double[] a = (double[]) obj; + JdbmUtil.packInt(out,a.length); + for(double s:a) out.writeDouble(s); + return; + } else if (obj instanceof float[]) { + out.write(FLOAT_ARRAY); + float[] a = (float[]) obj; + JdbmUtil.packInt(out,a.length); + for(float s:a) out.writeFloat(s); + return; + } else if (obj instanceof char[]) { + out.write(CHAR_ARRAY); + char[] a = (char[]) obj; + JdbmUtil.packInt(out,a.length); + for(char s:a) out.writeChar(s); + return; + } else if (obj instanceof byte[]) { + byte[] b = (byte[]) obj; + out.write(ARRAY_BYTE_INT); + serializeByteArrayInt(out, b); + return; + } else if (clazz == Date.class) { + out.write(DATE); + out.writeLong(((Date) obj).getTime()); + return; + } + + + +// /** classes bellow need object stack, so initialize it if not alredy initialized*/ +// if (objectStack == null) { +// objectStack = new FastArrayList(); +// objectStack.add(obj); +// } + + + if (obj instanceof Object[]) { + Object[] b = (Object[]) obj; + boolean packableLongs = b.length <= 255; + if (packableLongs) { + //check if it contains packable longs + for (Object o : b) { + if (o != null && (o.getClass() != Long.class || (((Long) o).longValue() < 0 && ((Long) o).longValue() != Long.MAX_VALUE))) { + packableLongs = false; + break; + } + } + } + + if (packableLongs) { + //packable Longs is special case, it is often used in JDBM to reference fields + out.write(ARRAY_OBJECT_PACKED_LONG); + out.write(b.length); + for (Object o : b) { + if (o == null) + JdbmUtil.packLong(out, 0); + else + JdbmUtil.packLong(out, ((Long) o).longValue() + 1); + } + + } else { + out.write(ARRAY_OBJECT); + JdbmUtil.packInt(out, b.length); + +// // Write class id for components +// Class componentType = obj.getClass().getComponentType(); +// registerClass(componentType); +// //write class header +// int classId = getClassId(componentType); +// JdbmUtil.packInt(out, classId); + + for (Object o : b) + serialize(out, o, objectStack); + + } + + } else if (clazz == ArrayList.class) { + ArrayList l = (ArrayList) obj; + boolean packableLongs = l.size() < 255; + if (packableLongs) { + //packable Longs is special case, it is often used in JDBM to reference fields + for (Object o : l) { + if (o != null && (o.getClass() != Long.class || (((Long) o).longValue() < 0 && ((Long) o).longValue() != Long.MAX_VALUE))) { + packableLongs = false; + break; + } + } + } + if (packableLongs) { + out.write(ARRAYLIST_PACKED_LONG); + out.write(l.size()); + for (Object o : l) { + if (o == null) + JdbmUtil.packLong(out, 0); + else + JdbmUtil.packLong(out, ((Long) o).longValue() + 1); + } + } else { + serializeCollection(ARRAYLIST, out, obj, objectStack); + } + + } else if (clazz == java.util.LinkedList.class) { + serializeCollection(LINKEDLIST, out, obj, objectStack); + } else if (clazz == Vector.class) { + serializeCollection(VECTOR, out, obj, objectStack); + } else if (clazz == TreeSet.class) { + TreeSet l = (TreeSet) obj; + out.write(TREESET); + JdbmUtil.packInt(out, l.size()); + serialize(out, l.comparator(), objectStack); + for (Object o : l) + serialize(out, o, objectStack); + } else if (clazz == HashSet.class) { + serializeCollection(HASHSET, out, obj, objectStack); + } else if (clazz == LinkedHashSet.class) { + serializeCollection(LINKEDHASHSET, out, obj, objectStack); + } else if (clazz == TreeMap.class) { + TreeMap l = (TreeMap) obj; + out.write(TREEMAP); + JdbmUtil.packInt(out, l.size()); + serialize(out, l.comparator(), objectStack); + for (Object o : l.keySet()) { + serialize(out, o, objectStack); + serialize(out, l.get(o), objectStack); + } + } else if (clazz == HashMap.class) { + serializeMap(HASHMAP, out, obj, objectStack); + } else if (clazz == IdentityHashMap.class) { + serializeMap(IDENTITYHASHMAP, out, obj, objectStack); + } else if (clazz == LinkedHashMap.class) { + serializeMap(LINKEDHASHMAP, out, obj, objectStack); + } else if (clazz == Hashtable.class) { + serializeMap(HASHTABLE, out, obj, objectStack); + } else if (clazz == Properties.class) { + serializeMap(PROPERTIES, out, obj, objectStack); + } else if (clazz == Locale.class){ + out.write(LOCALE); + Locale l = (Locale) obj; + out.writeUTF(l.getLanguage()); + out.writeUTF(l.getCountry()); + out.writeUTF(l.getVariant()); + } else { + out.write(POJO); + throw new InternalError("POJO serialization not supported yet"); + //writeObject(out, obj, objectStack); + } + + } + + + static void serializeString(DataOutput out, String obj) throws IOException { + final int len = obj.length(); + JdbmUtil.packInt(out, len); + for (int i = 0; i < len; i++) { + int c = (int) obj.charAt(i); //TODO investigate if c could be negative here + JdbmUtil.packInt(out, c); + } + + } + + private void serializeMap(int header, DataOutput out, Object obj, FastArrayList objectStack) throws IOException { + Map l = (Map) obj; + out.write(header); + JdbmUtil.packInt(out, l.size()); + for (Object o : l.keySet()) { + serialize(out, o, objectStack); + serialize(out, l.get(o), objectStack); + } + } + + private void serializeCollection(int header, DataOutput out, Object obj, FastArrayList objectStack) throws IOException { + Collection l = (Collection) obj; + out.write(header); + JdbmUtil.packInt(out, l.size()); + + for (Object o : l) + serialize(out, o, objectStack); + + } + + private void serializeByteArrayInt(DataOutput out, byte[] b) throws IOException { + JdbmUtil.packInt(out, b.length); + out.write(b); + } + + + private void writeLongArray(DataOutput da, long[] obj) throws IOException { + long max = Long.MIN_VALUE; + long min = Long.MAX_VALUE; + for (long i : obj) { + max = Math.max(max, i); + min = Math.min(min, i); + } + + if (0 <= min && max <= 255) { + da.write(ARRAY_LONG_B); + JdbmUtil.packInt(da, obj.length); + for (long l : obj) + da.write((int) l); + } else if (0 <= min && max <= Long.MAX_VALUE) { + da.write(ARRAY_LONG_PACKED); + JdbmUtil.packInt(da, obj.length); + for (long l : obj) + JdbmUtil.packLong(da, l); + } else if (Short.MIN_VALUE <= min && max <= Short.MAX_VALUE) { + da.write(ARRAY_LONG_S); + JdbmUtil.packInt(da, obj.length); + for (long l : obj) + da.writeShort((short) l); + } else if (Integer.MIN_VALUE <= min && max <= Integer.MAX_VALUE) { + da.write(ARRAY_LONG_I); + JdbmUtil.packInt(da, obj.length); + for (long l : obj) + da.writeInt((int) l); + } else { + da.write(ARRAY_LONG_L); + JdbmUtil.packInt(da, obj.length); + for (long l : obj) + da.writeLong(l); + } + + } + + + private void writeIntArray(DataOutput da, int[] obj) throws IOException { + int max = Integer.MIN_VALUE; + int min = Integer.MAX_VALUE; + for (int i : obj) { + max = Math.max(max, i); + min = Math.min(min, i); + } + + boolean fitsInByte = 0 <= min && max <= 255; + boolean fitsInShort = Short.MIN_VALUE >= min && max <= Short.MAX_VALUE; + + + if (obj.length <= 255 && fitsInByte) { + da.write(ARRAY_INT_B_255); + da.write(obj.length); + for (int i : obj) + da.write(i); + } else if (fitsInByte) { + da.write(ARRAY_INT_B_INT); + JdbmUtil.packInt(da, obj.length); + for (int i : obj) + da.write(i); + } else if (0 <= min && max <= Integer.MAX_VALUE) { + da.write(ARRAY_INT_PACKED); + JdbmUtil.packInt(da, obj.length); + for (int l : obj) + JdbmUtil.packInt(da, l); + } else if (fitsInShort) { + da.write(ARRAY_INT_S); + JdbmUtil.packInt(da, obj.length); + for (int i : obj) + da.writeShort(i); + } else { + da.write(ARRAY_INT_I); + JdbmUtil.packInt(da, obj.length); + for (int i : obj) + da.writeInt(i); + } + + } + + + private void writeInteger(DataOutput da, final int val) throws IOException { + if (val == -1) + da.write(INTEGER_MINUS_1); + else if (val == 0) + da.write(INTEGER_0); + else if (val == 1) + da.write(INTEGER_1); + else if (val == 2) + da.write(INTEGER_2); + else if (val == 3) + da.write(INTEGER_3); + else if (val == 4) + da.write(INTEGER_4); + else if (val == 5) + da.write(INTEGER_5); + else if (val == 6) + da.write(INTEGER_6); + else if (val == 7) + da.write(INTEGER_7); + else if (val == 8) + da.write(INTEGER_8); + else if (val == Integer.MIN_VALUE) + da.write(INTEGER_MINUS_MAX); + else if (val > 0 && val < 255) { + da.write(INTEGER_255); + da.write(val); + } else if (val < 0) { + da.write(INTEGER_PACK_NEG); + JdbmUtil.packInt(da, -val); + } else { + da.write(INTEGER_PACK); + JdbmUtil.packInt(da, val); + } + } + + private void writeLong(DataOutput da, final long val) throws IOException { + if (val == -1) + da.write(LONG_MINUS_1); + else if (val == 0) + da.write(LONG_0); + else if (val == 1) + da.write(LONG_1); + else if (val == 2) + da.write(LONG_2); + else if (val == 3) + da.write(LONG_3); + else if (val == 4) + da.write(LONG_4); + else if (val == 5) + da.write(LONG_5); + else if (val == 6) + da.write(LONG_6); + else if (val == 7) + da.write(LONG_7); + else if (val == 8) + da.write(LONG_8); + else if (val == Long.MIN_VALUE) + da.write(LONG_MINUS_MAX); + else if (val > 0 && val < 255) { + da.write(LONG_255); + da.write((int) val); + } else if (val < 0) { + da.write(LONG_PACK_NEG); + JdbmUtil.packLong(da, -val); + } else { + da.write(LONG_PACK); + JdbmUtil.packLong(da, val); + } + } + + + + static String deserializeString(DataInput buf) throws IOException { + int len = JdbmUtil.unpackInt(buf); + char[] b = new char[len]; + for (int i = 0; i < len; i++) + b[i] = (char) JdbmUtil.unpackInt(buf); + + return new String(b); + } + + + @Override + public Object deserialize(DataInput is, int capacity) throws IOException { + return deserialize(is, null); + } + + public Object deserialize(DataInput is, FastArrayList objectStack) throws IOException { + + Object ret = null; + + final int head = is.readUnsignedByte(); + + /** first try to deserialize object without allocating object stack*/ + switch (head) { + case NULL: + break; + case BOOLEAN_TRUE: + ret = Boolean.TRUE; + break; + case BOOLEAN_FALSE: + ret = Boolean.FALSE; + break; + case INTEGER_MINUS_1: + ret = Integer.valueOf(-1); + break; + case INTEGER_0: + ret = Integer.valueOf(0); + break; + case INTEGER_1: + ret = Integer.valueOf(1); + break; + case INTEGER_2: + ret = Integer.valueOf(2); + break; + case INTEGER_3: + ret = Integer.valueOf(3); + break; + case INTEGER_4: + ret = Integer.valueOf(4); + break; + case INTEGER_5: + ret = Integer.valueOf(5); + break; + case INTEGER_6: + ret = Integer.valueOf(6); + break; + case INTEGER_7: + ret = Integer.valueOf(7); + break; + case INTEGER_8: + ret = Integer.valueOf(8); + break; + case INTEGER_MINUS_MAX: + ret = Integer.valueOf(Integer.MIN_VALUE); + break; + case INTEGER_255: + ret = Integer.valueOf(is.readUnsignedByte()); + break; + case INTEGER_PACK_NEG: + ret = Integer.valueOf(-JdbmUtil.unpackInt(is)); + break; + case INTEGER_PACK: + ret = Integer.valueOf(JdbmUtil.unpackInt(is)); + break; + case LONG_MINUS_1: + ret = Long.valueOf(-1); + break; + case LONG_0: + ret = Long.valueOf(0); + break; + case LONG_1: + ret = Long.valueOf(1); + break; + case LONG_2: + ret = Long.valueOf(2); + break; + case LONG_3: + ret = Long.valueOf(3); + break; + case LONG_4: + ret = Long.valueOf(4); + break; + case LONG_5: + ret = Long.valueOf(5); + break; + case LONG_6: + ret = Long.valueOf(6); + break; + case LONG_7: + ret = Long.valueOf(7); + break; + case LONG_8: + ret = Long.valueOf(8); + break; + case LONG_255: + ret = Long.valueOf(is.readUnsignedByte()); + break; + case LONG_PACK_NEG: + ret = Long.valueOf(-JdbmUtil.unpackLong(is)); + break; + case LONG_PACK: + ret = Long.valueOf(JdbmUtil.unpackLong(is)); + break; + case LONG_MINUS_MAX: + ret = Long.valueOf(Long.MIN_VALUE); + break; + case SHORT_MINUS_1: + ret = Short.valueOf((short) -1); + break; + case SHORT_0: + ret = Short.valueOf((short) 0); + break; + case SHORT_1: + ret = Short.valueOf((short) 1); + break; + case SHORT_255: + ret = Short.valueOf((short) is.readUnsignedByte()); + break; + case SHORT_FULL: + ret = Short.valueOf(is.readShort()); + break; + case BYTE_MINUS_1: + ret = Byte.valueOf((byte) -1); + break; + case BYTE_0: + ret = Byte.valueOf((byte) 0); + break; + case BYTE_1: + ret = Byte.valueOf((byte) 1); + break; + case BYTE_FULL: + ret = Byte.valueOf(is.readByte()); + break; + case SHORT_ARRAY: + int size = JdbmUtil.unpackInt(is); + ret = new short[size]; + for(int i=0;i deserializeArrayList(DataInput is, FastArrayList objectStack) throws IOException { + int size = JdbmUtil.unpackInt(is); + ArrayList s = new ArrayList(size); + objectStack.add(s); + for (int i = 0; i < size; i++) { + s.add(deserialize(is, objectStack)); + } + return s; + } + + private ArrayList deserializeArrayListPackedLong(DataInput is) throws IOException { + int size = is.readUnsignedByte(); + if (size < 0) + throw new EOFException(); + + ArrayList s = new ArrayList(size); + for (int i = 0; i < size; i++) { + long l = JdbmUtil.unpackLong(is); + if (l == 0) + s.add(null); + else + s.add(Long.valueOf(l - 1)); + } + return s; + } + + + private java.util.LinkedList deserializeLinkedList(DataInput is, FastArrayList objectStack) throws IOException { + int size = JdbmUtil.unpackInt(is); + java.util.LinkedList s = new java.util.LinkedList(); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.add(deserialize(is, objectStack)); + return s; + } + + + private Vector deserializeVector(DataInput is, FastArrayList objectStack) throws IOException { + int size = JdbmUtil.unpackInt(is); + Vector s = new Vector(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.add(deserialize(is, objectStack)); + return s; + } + + + private HashSet deserializeHashSet(DataInput is, FastArrayList objectStack) throws IOException { + int size = JdbmUtil.unpackInt(is); + HashSet s = new HashSet(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.add(deserialize(is, objectStack)); + return s; + } + + + private LinkedHashSet deserializeLinkedHashSet(DataInput is, FastArrayList objectStack) throws IOException { + int size = JdbmUtil.unpackInt(is); + LinkedHashSet s = new LinkedHashSet(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.add(deserialize(is, objectStack)); + return s; + } + + + private TreeSet deserializeTreeSet(DataInput is, FastArrayList objectStack) throws IOException { + int size = JdbmUtil.unpackInt(is); + TreeSet s = new TreeSet(); + objectStack.add(s); + Comparator comparator = (Comparator) deserialize(is, objectStack); + if (comparator != null) + s = new TreeSet(comparator); + + for (int i = 0; i < size; i++) + s.add(deserialize(is, objectStack)); + return s; + } + + + private TreeMap deserializeTreeMap(DataInput is, FastArrayList objectStack) throws IOException { + int size = JdbmUtil.unpackInt(is); + + TreeMap s = new TreeMap(); + objectStack.add(s); + Comparator comparator = (Comparator) deserialize(is, objectStack); + if (comparator != null) + s = new TreeMap(comparator); + for (int i = 0; i < size; i++) + s.put(deserialize(is, objectStack), deserialize(is, objectStack)); + return s; + } + + + private HashMap deserializeHashMap(DataInput is, FastArrayList objectStack) throws IOException { + int size = JdbmUtil.unpackInt(is); + + HashMap s = new HashMap(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.put(deserialize(is, objectStack), deserialize(is, objectStack)); + return s; + } + + private IdentityHashMap deserializeIdentityHashMap(DataInput is, FastArrayList objectStack) throws IOException { + int size = JdbmUtil.unpackInt(is); + + IdentityHashMap s = new IdentityHashMap(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.put(deserialize(is, objectStack), deserialize(is, objectStack)); + return s; + } + + private LinkedHashMap deserializeLinkedHashMap(DataInput is, FastArrayList objectStack) throws IOException { + int size = JdbmUtil.unpackInt(is); + + LinkedHashMap s = new LinkedHashMap(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.put(deserialize(is, objectStack), deserialize(is, objectStack)); + return s; + } + + + private Hashtable deserializeHashtable(DataInput is, FastArrayList objectStack) throws IOException { + int size = JdbmUtil.unpackInt(is); + + Hashtable s = new Hashtable(size); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.put(deserialize(is, objectStack), deserialize(is, objectStack)); + return s; + } + + + private Properties deserializeProperties(DataInput is, FastArrayList objectStack) throws IOException { + int size = JdbmUtil.unpackInt(is); + + Properties s = new Properties(); + objectStack.add(s); + for (int i = 0; i < size; i++) + s.put(deserialize(is, objectStack), deserialize(is, objectStack)); + return s; + } + + + + +} diff --git a/src/test/java/net/kotek/jdbm/ConcurrentMapInterfaceTest.java b/src/test/java/net/kotek/jdbm/ConcurrentMapInterfaceTest.java new file mode 100644 index 000000000..70ce81947 --- /dev/null +++ b/src/test/java/net/kotek/jdbm/ConcurrentMapInterfaceTest.java @@ -0,0 +1,809 @@ +/* + * Copyright (C) 2008 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package net.kotek.jdbm; + +import java.util.concurrent.ConcurrentMap; + +/** + * Tests representing the contract of {@link ConcurrentMap}. Concrete + * subclasses of this base class test conformance of concrete + * {@link ConcurrentMap} subclasses to that contract. + * + *

The tests in this class for null keys and values only check maps for + * which null keys and values are not allowed. There are currently no + * {@link ConcurrentMap} implementations that support nulls. + * + * @author Jared Levy + */ +public abstract class ConcurrentMapInterfaceTest + extends MapInterfaceTest { + + protected ConcurrentMapInterfaceTest(boolean allowsNullKeys, + boolean allowsNullValues, boolean supportsPut, boolean supportsRemove, + boolean supportsClear, boolean supportsIteratorRemove) { + super(allowsNullKeys, allowsNullValues, supportsPut, supportsRemove, + supportsClear,supportsIteratorRemove); + } + + /** + * Creates a new value that is not expected to be found in + * {@link #makePopulatedMap()} and differs from the value returned by + * {@link #getValueNotInPopulatedMap()}. + * + * @return a value + * @throws UnsupportedOperationException if it's not possible to make a value + * that will not be found in the map + */ + protected abstract V getSecondValueNotInPopulatedMap() + throws UnsupportedOperationException; + + @Override protected abstract ConcurrentMap makeEmptyMap() + throws UnsupportedOperationException; + + @Override protected abstract ConcurrentMap makePopulatedMap() + throws UnsupportedOperationException; + + @Override protected ConcurrentMap makeEitherMap() { + try { + return makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return makeEmptyMap(); + } + } + + public void testPutIfAbsentNewKey() { + final ConcurrentMap map; + final K keyToPut; + final V valueToPut; + try { + map = makeEitherMap(); + keyToPut = getKeyNotInPopulatedMap(); + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + if (supportsPut) { + int initialSize = map.size(); + V oldValue = map.putIfAbsent(keyToPut, valueToPut); + assertEquals(valueToPut, map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(valueToPut)); + + assertEquals(initialSize + 1, map.size()); + assertNull(oldValue); + } else { + try { + map.putIfAbsent(keyToPut, valueToPut); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutIfAbsentExistingKey() { + final ConcurrentMap map; + final K keyToPut; + final V valueToPut; + try { + map = makePopulatedMap(); + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToPut = map.keySet().iterator().next(); + if (supportsPut) { + V oldValue = map.get(keyToPut); + int initialSize = map.size(); + assertEquals(oldValue, map.putIfAbsent(keyToPut, valueToPut)); + assertEquals(oldValue, map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(oldValue)); + assertFalse(map.containsValue(valueToPut)); + assertEquals(initialSize, map.size()); + } else { + try { + map.putIfAbsent(keyToPut, valueToPut); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutIfAbsentNullKey() { + if (allowsNullKeys) { + return; // Not yet implemented + } + final ConcurrentMap map; + final V valueToPut; + try { + map = makeEitherMap(); + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + int initialSize = map.size(); + if (supportsPut) { + try { + map.putIfAbsent(null, valueToPut); + fail("Expected NullPointerException"); + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + map.putIfAbsent(null, valueToPut); + fail("Expected UnsupportedOperationException or NullPointerException"); + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testPutIfAbsentNewKeyNullValue() { + if (allowsNullValues) { + return; // Not yet implemented + } + final ConcurrentMap map; + final K keyToPut; + try { + map = makeEitherMap(); + keyToPut = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + int initialSize = map.size(); + if (supportsPut) { + try { + map.putIfAbsent(keyToPut, null); + fail("Expected NullPointerException"); + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + map.putIfAbsent(keyToPut, null); + fail("Expected UnsupportedOperationException or NullPointerException"); + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + + public void testRemoveKeyValueExisting() { + final ConcurrentMap map; + final K keyToRemove; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToRemove = map.keySet().iterator().next(); + V oldValue = map.get(keyToRemove); + if (supportsRemove) { + int initialSize = map.size(); + assertTrue(map.remove(keyToRemove, oldValue)); + assertFalse(map.containsKey(keyToRemove)); + assertEquals(initialSize - 1, map.size()); + } else { + try { + map.remove(keyToRemove, oldValue); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testRemoveKeyValueMissingKey() { + final ConcurrentMap map; + final K keyToRemove; + final V valueToRemove; + try { + map = makePopulatedMap(); + keyToRemove = getKeyNotInPopulatedMap(); + valueToRemove = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + if (supportsRemove) { + int initialSize = map.size(); + assertFalse(map.remove(keyToRemove, valueToRemove)); + assertEquals(initialSize, map.size()); + } else { + try { + map.remove(keyToRemove, valueToRemove); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testRemoveKeyValueDifferentValue() { + final ConcurrentMap map; + final K keyToRemove; + final V valueToRemove; + try { + map = makePopulatedMap(); + valueToRemove = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToRemove = map.keySet().iterator().next(); + if (supportsRemove) { + int initialSize = map.size(); + V oldValue = map.get(keyToRemove); + assertFalse(map.remove(keyToRemove, valueToRemove)); + assertEquals(oldValue, map.get(keyToRemove)); + assertTrue(map.containsKey(keyToRemove)); + assertEquals(initialSize, map.size()); + } else { + try { + map.remove(keyToRemove, valueToRemove); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testRemoveKeyValueNullKey() { + if (allowsNullKeys) { + return; // Not yet implemented + } + final ConcurrentMap map; + final V valueToRemove; + try { + map = makeEitherMap(); + valueToRemove = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + int initialSize = map.size(); + if (supportsRemove) { + try { + assertFalse(map.remove(null, valueToRemove)); + } catch (NullPointerException e) { + // Optional. + } + } else { + try { + assertFalse(map.remove(null, valueToRemove)); + } catch (UnsupportedOperationException e) { + // Optional. + } catch (NullPointerException e) { + // Optional. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testRemoveKeyValueExistingKeyNullValue() { + if (allowsNullValues) { + return; // Not yet implemented + } + final ConcurrentMap map; + final K keyToRemove; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToRemove = map.keySet().iterator().next(); + int initialSize = map.size(); + if (supportsRemove) { + try { + assertFalse(map.remove(keyToRemove, null)); + } catch (NullPointerException e) { + // Optional. + } + } else { + try { + assertFalse(map.remove(keyToRemove, null)); + } catch (UnsupportedOperationException e) { + // Optional. + } catch (NullPointerException e) { + // Optional. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testRemoveKeyValueMissingKeyNullValue() { + if (allowsNullValues) { + return; // Not yet implemented + } + final ConcurrentMap map; + final K keyToRemove; + try { + map = makeEitherMap(); + keyToRemove = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + int initialSize = map.size(); + if (supportsRemove) { + try { + assertFalse(map.remove(keyToRemove, null)); + } catch (NullPointerException e) { + // Optional. + } + } else { + try { + assertFalse(map.remove(keyToRemove, null)); + } catch (UnsupportedOperationException e) { + // Optional. + } catch (NullPointerException e) { + // Optional. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + /* Replace2 tests call 2-parameter replace(key, value) */ + + public void testReplace2ExistingKey() { + final ConcurrentMap map; + final K keyToReplace; + final V newValue; + try { + map = makePopulatedMap(); + newValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToReplace = map.keySet().iterator().next(); + if (supportsPut) { + V oldValue = map.get(keyToReplace); + int initialSize = map.size(); + assertEquals(oldValue, map.replace(keyToReplace, newValue)); + assertEquals(newValue, map.get(keyToReplace)); + assertTrue(map.containsKey(keyToReplace)); + assertTrue(map.containsValue(newValue)); + assertEquals(initialSize, map.size()); + } else { + try { + map.replace(keyToReplace, newValue); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testReplace2MissingKey() { + final ConcurrentMap map; + final K keyToReplace; + final V newValue; + try { + map = makeEitherMap(); + keyToReplace = getKeyNotInPopulatedMap(); + newValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + if (supportsPut) { + int initialSize = map.size(); + assertNull(map.replace(keyToReplace, newValue)); + assertNull(map.get(keyToReplace)); + assertFalse(map.containsKey(keyToReplace)); + assertFalse(map.containsValue(newValue)); + assertEquals(initialSize, map.size()); + } else { + try { + map.replace(keyToReplace, newValue); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testReplace2NullKey() { + if (allowsNullKeys) { + return; // Not yet implemented + } + final ConcurrentMap map; + final V valueToReplace; + try { + map = makeEitherMap(); + valueToReplace = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + int initialSize = map.size(); + if (supportsPut) { + try { + assertNull(map.replace(null, valueToReplace)); + } catch (NullPointerException e) { + // Optional. + } + } else { + try { + assertNull(map.replace(null, valueToReplace)); + } catch (UnsupportedOperationException e) { + // Optional. + } catch (NullPointerException e) { + // Optional. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testReplace2ExistingKeyNullValue() { + if (allowsNullValues) { + return; // Not yet implemented + } + final ConcurrentMap map; + final K keyToReplace; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToReplace = map.keySet().iterator().next(); + int initialSize = map.size(); + if (supportsPut) { + try { + map.replace(keyToReplace, null); + fail("Expected NullPointerException"); + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + map.replace(keyToReplace, null); + fail("Expected UnsupportedOperationException or NullPointerException"); + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testReplace2MissingKeyNullValue() { + if (allowsNullValues) { + return; // Not yet implemented + } + final ConcurrentMap map; + final K keyToReplace; + try { + map = makeEitherMap(); + keyToReplace = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + int initialSize = map.size(); + if (supportsPut) { + try { + assertNull(map.replace(keyToReplace, null)); + } catch (NullPointerException e) { + // Optional. + } + } else { + try { + assertNull(map.replace(keyToReplace, null)); + } catch (UnsupportedOperationException e) { + // Optional. + } catch (NullPointerException e) { + // Optional. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + /* + * Replace3 tests call 3-parameter replace(key, oldValue, newValue) + */ + + public void testReplace3ExistingKeyValue() { + final ConcurrentMap map; + final K keyToReplace; + final V oldValue; + final V newValue; + try { + map = makePopulatedMap(); + newValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToReplace = map.keySet().iterator().next(); + oldValue = map.get(keyToReplace); + if (supportsPut) { + int initialSize = map.size(); + assertTrue(map.replace(keyToReplace, oldValue, newValue)); + assertEquals(newValue, map.get(keyToReplace)); + assertTrue(map.containsKey(keyToReplace)); + assertTrue(map.containsValue(newValue)); + assertFalse(map.containsValue(oldValue)); + assertEquals(initialSize, map.size()); + } else { + try { + map.replace(keyToReplace, oldValue, newValue); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testReplace3ExistingKeyDifferentValue() { + final ConcurrentMap map; + final K keyToReplace; + final V oldValue; + final V newValue; + try { + map = makePopulatedMap(); + oldValue = getValueNotInPopulatedMap(); + newValue = getSecondValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToReplace = map.keySet().iterator().next(); + final V originalValue = map.get(keyToReplace); + int initialSize = map.size(); + if (supportsPut) { + assertFalse(map.replace(keyToReplace, oldValue, newValue)); + } else { + try { + map.replace(keyToReplace, oldValue, newValue); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertTrue(map.containsKey(keyToReplace)); + assertFalse(map.containsValue(newValue)); + assertFalse(map.containsValue(oldValue)); + assertEquals(originalValue, map.get(keyToReplace)); + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testReplace3MissingKey() { + final ConcurrentMap map; + final K keyToReplace; + final V oldValue; + final V newValue; + try { + map = makeEitherMap(); + keyToReplace = getKeyNotInPopulatedMap(); + oldValue = getValueNotInPopulatedMap(); + newValue = getSecondValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + int initialSize = map.size(); + if (supportsPut) { + assertFalse(map.replace(keyToReplace, oldValue, newValue)); + } else { + try { + map.replace(keyToReplace, oldValue, newValue); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertFalse(map.containsKey(keyToReplace)); + assertFalse(map.containsValue(newValue)); + assertFalse(map.containsValue(oldValue)); + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testReplace3NullKey() { + if (allowsNullKeys) { + return; // Not yet implemented + } + final ConcurrentMap map; + final V oldValue; + final V newValue; + try { + map = makeEitherMap(); + oldValue = getValueNotInPopulatedMap(); + newValue = getSecondValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + int initialSize = map.size(); + if (supportsPut) { + try { + assertFalse(map.replace(null, oldValue, newValue)); + } catch (NullPointerException e) { + // Optional. + } + } else { + try { + assertFalse(map.replace(null, oldValue, newValue)); + } catch (UnsupportedOperationException e) { + // Optional. + } catch (NullPointerException e) { + // Optional. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testReplace3ExistingKeyNullOldValue() { + if (allowsNullValues) { + return; // Not yet implemented + } + final ConcurrentMap map; + final K keyToReplace; + final V newValue; + try { + map = makePopulatedMap(); + newValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToReplace = map.keySet().iterator().next(); + final V originalValue = map.get(keyToReplace); + int initialSize = map.size(); + if (supportsPut) { + try { + assertFalse(map.replace(keyToReplace, null, newValue)); + } catch (NullPointerException e) { + // Optional. + } + } else { + try { + assertFalse(map.replace(keyToReplace, null, newValue)); + } catch (UnsupportedOperationException e) { + // Optional. + } catch (NullPointerException e) { + // Optional. + } + } + assertEquals(initialSize, map.size()); + assertEquals(originalValue, map.get(keyToReplace)); + assertInvariants(map); + } + + public void testReplace3MissingKeyNullOldValue() { + if (allowsNullValues) { + return; // Not yet implemented + } + final ConcurrentMap map; + final K keyToReplace; + final V newValue; + try { + map = makeEitherMap(); + keyToReplace = getKeyNotInPopulatedMap(); + newValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + int initialSize = map.size(); + if (supportsPut) { + try { + assertFalse(map.replace(keyToReplace, null, newValue)); + } catch (NullPointerException e) { + // Optional. + } + } else { + try { + assertFalse(map.replace(keyToReplace, null, newValue)); + } catch (UnsupportedOperationException e) { + // Optional. + } catch (NullPointerException e) { + // Optional. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testReplace3MissingKeyNullNewValue() { + if (allowsNullValues) { + return; // Not yet implemented + } + final ConcurrentMap map; + final K keyToReplace; + final V oldValue; + try { + map = makeEitherMap(); + keyToReplace = getKeyNotInPopulatedMap(); + oldValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + int initialSize = map.size(); + if (supportsPut) { + try { + map.replace(keyToReplace, oldValue, null); + } catch (NullPointerException e) { + // Optional. + } + } else { + try { + map.replace(keyToReplace, oldValue, null); + } catch (UnsupportedOperationException e) { + // Optional. + } catch (NullPointerException e) { + // Optional. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testReplace3ExistingKeyValueNullNewValue() { + if (allowsNullValues) { + return; // Not yet implemented + } + final ConcurrentMap map; + final K keyToReplace; + final V oldValue; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToReplace = map.keySet().iterator().next(); + oldValue = map.get(keyToReplace); + int initialSize = map.size(); + if (supportsPut) { + try { + map.replace(keyToReplace, oldValue, null); + fail("Expected NullPointerException"); + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + map.replace(keyToReplace, oldValue, null); + fail("Expected UnsupportedOperationException or NullPointerException"); + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertEquals(initialSize, map.size()); + assertEquals(oldValue, map.get(keyToReplace)); + assertInvariants(map); + } +} diff --git a/src/test/java/net/kotek/jdbm/HashMap2Test.java b/src/test/java/net/kotek/jdbm/HashMap2Test.java new file mode 100644 index 000000000..9f54ef756 --- /dev/null +++ b/src/test/java/net/kotek/jdbm/HashMap2Test.java @@ -0,0 +1,328 @@ +package net.kotek.jdbm; + +import org.junit.Test; + +import java.io.IOException; +import java.util.*; + +import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; + +public class HashMap2Test extends JdbmTestCase { + + + void printMap(HashMap2 m){ + System.out.println(toString(m.segmentRecids, recman)); + } + + static String toString(long[] rootRecids, RecordStore recman){ + String s = "Arrays.asList(\n"; + for(long r:rootRecids){ + s+= (r==0)?null:recursiveToString(r,"", recman); + } + //s=s.substring(0,s.length()-2); + s+=");"; + return s; + } + + static private String recursiveToString(long r, String prefix, RecordStore recman) { + prefix+=" "; + String s=""; + long[][] nn = recman.recordGet(r, HashMap2.DIR_SERIALIZER); + if(nn==null){ + s+=prefix+"null,\n"; + }else{ + s+= prefix+"Arrays.asList(\n"; + for(long[] n:nn){ + if(n==null){ + s+=prefix+" null,\n"; + }else{ + s+=prefix+" Arrays.asList(\n"; + for(long r2:n){ + if(r2==0){ + s+=prefix+" "+"null,\n"; + }else{ + if((r2&1)==0){ + s+=recursiveToString(r2>>>1, prefix+" ", recman); + }else{ + s+=prefix+" "+"Array.asList("; + TreeMap m = new TreeMap(); + HashMap2.LinkedNode node = + (HashMap2.LinkedNode) recman.recordGet + (r2>>>1,new HashMap2(recman,0L).LN_SERIALIZER ); + while(node!=null){ + m.put(node.key, node.value); + node = (HashMap2.LinkedNode) recman.recordGet (node.next,new HashMap2(recman,0L).LN_SERIALIZER ); + } + for(Object k:m.keySet()){ + s+= k+","+m.get(k)+","; + } + //s=s.substring(0,s.length()-1); + s+="),\n"; + } + } + } + s+=prefix+" ),\n"; + } + } +// s=s.substring(0,s.length()-2); + s+=prefix+"),\n"; + } + return s; + } + + + @Test public void testDirSerializer() throws IOException { + + long[][] l = new long[16][]; + l[3] = new long[] {0,0,12,13,14,0,Long.MAX_VALUE,0}; + l[6] = new long[] {1,2,3,4,5,6,7,8}; + + DataOutput2 out = new DataOutput2(); + HashMap2.DIR_SERIALIZER.serialize(out,l); + + DataInput2 in = swap(out); + + long[][] b = HashMap2.DIR_SERIALIZER.deserialize(in, -1); + + assertEquals(null, b[0]); + assertEquals(null, b[1]); + assertEquals(null, b[2]); + assertEquals(Arrays.toString(new long[] {0,0,12,13,14,0,Long.MAX_VALUE,0}), Arrays.toString(b[3])); + assertEquals(null, b[4]); + assertEquals(null, b[5]); + assertEquals(Arrays.toString(new long[] {1,2,3,4,5,6,7,8}), Arrays.toString(b[6])); + assertEquals(null, b[7]); + + } + + @Test public void ln_serialization() throws IOException { + HashMap2.LinkedNode n = new HashMap2.LinkedNode(123456, 123L, 456L); + + DataOutput2 out = new DataOutput2(); + + Serializer ln_serializer = new HashMap2(recman, 0).LN_SERIALIZER; + ln_serializer.serialize(out, n); + + DataInput2 in = swap(out); + + HashMap2.LinkedNode n2 = (HashMap2.LinkedNode) ln_serializer.deserialize(in, -1); + + assertEquals(123456, n2.next); + assertEquals(123L,n2.key); + assertEquals(456L,n2.value); + } + + @Test public void test_simple_put(){ + + HashMap2 m = new HashMap2(recman,0L); + + m.put(111L, 222L); + m.put(333L, 444L); + assertTrue(m.containsKey(111L)); + assertTrue(!m.containsKey(222L)); + assertTrue(m.containsKey(333L)); + assertTrue(!m.containsKey(444L)); + + assertEquals(222L, m.get(111L)); + assertEquals(null, m.get(222L)); + assertEquals(444l, m.get(333L)); + } + + @Test public void test_hash_collision(){ + HashMap2 m = new HashMap2(recman,0L){ + @Override + protected int hash(Object key) { + return 0; + } + }; + + for(long i = 0;i<20;i++){ + m.put(i,i+100); + } + + for(long i = 0;i<20;i++){ + assertTrue(m.containsKey(i)); + assertEquals(i+100, m.get(i)); + } + + m.put(11L, 1111L); + assertEquals(1111L,m.get(11L) ); + } + + @Test public void test_hash_dir_expand(){ + HashMap2 m = new HashMap2(recman,0L){ + @Override + protected int hash(Object key) { + return 0; + } + }; + + for(long i = 0;i>>1; + + for(long i = HashMap2.BUCKET_OVERFLOW -1; i>=0; i--){ + assertTrue(recid!=0); + HashMap2.LinkedNode n = (HashMap2.LinkedNode) recman.recordGet(recid, m.LN_SERIALIZER); + assertEquals(i, n.key); + assertEquals(i, n.value); + recid = n.next; + } + + //adding one more item should trigger dir expansion to next level + m.put((long)HashMap2.BUCKET_OVERFLOW, (long)HashMap2.BUCKET_OVERFLOW); + + recid = m.segmentRecids[0]; + + l = recman.recordGet(recid, HashMap2.DIR_SERIALIZER); + assertNotNull(l[0]); + for(int j=1;j<8;j++){ //all others should be null + assertEquals(null, l[j]); + } + + assertEquals(0, l[0][0]&1); //last bite indicates leaf + for(int j=1;j<8;j++){ //all others should be zero + assertEquals(0, l[0][j]); + } + + recid = l[0][0]>>>1; + + + l = recman.recordGet(recid, HashMap2.DIR_SERIALIZER); + assertNotNull(l[0]); + for(int j=1;j<8;j++){ //all others should be null + assertEquals(null, l[j]); + } + + assertEquals(1, l[0][0]&1); //last bite indicates leaf + for(int j=1;j<8;j++){ //all others should be zero + assertEquals(0, l[0][j]); + } + + recid = l[0][0]>>>1; + + for(long i = 0; i<=HashMap2.BUCKET_OVERFLOW; i++){ + assertTrue(recid!=0); + HashMap2.LinkedNode n = (HashMap2.LinkedNode) recman.recordGet(recid, m.LN_SERIALIZER); + + assertNotNull(n); + assertEquals(i, n.key); + assertEquals(i, n.value); + recid = n.next; + } + + } + + + @Test public void test_delete(){ + HashMap2 m = new HashMap2(recman,0L){ + @Override + protected int hash(Object key) { + return 0; + } + }; + + for(long i = 0;i<20;i++){ + m.put(i,i+100); + } + + for(long i = 0;i<20;i++){ + assertTrue(m.containsKey(i)); + assertEquals(i+100, m.get(i)); + } + + + for(long i = 0;i<20;i++){ + m.remove(i); + } + + for(long i = 0;i<20;i++){ + assertTrue(!m.containsKey(i)); + assertEquals(null, m.get(i)); + } + } + + @Test public void clear(){ + HashMap2 m = new HashMap2(recman,0L); + for(Integer i=0;i<1e5;i++){ + m.put(i,i); + } + m.clear(); + assertTrue(m.isEmpty()); + assertEquals(0, m.size()); + } + + @Test //(timeout = 10000) + public void testIteration(){ + HashMap2 m = new HashMap2(recman, 0L){ + @Override + protected int hash(Object key) { + return ((Integer)key).intValue(); + } + }; + + final int max = 140; + final int inc = 111111; + + for(Integer i=0;i keys = m.keySet().iterator(); + for(Integer i=0;i vals = m.values().iterator(); + for(Integer i=inc;i { + + public HashMap3Test() { + super(false, false, true, true, true, true); + } + + RecordStore r; + + public void setUp() throws Exception { + r = new RecordStoreCache("testdb/"+System.currentTimeMillis(), false); + } + + @Override + protected Integer getKeyNotInPopulatedMap() throws UnsupportedOperationException { + return -100; + } + + @Override + protected String getValueNotInPopulatedMap() throws UnsupportedOperationException { + return "XYZ"; + } + + @Override + protected String getSecondValueNotInPopulatedMap() throws UnsupportedOperationException { + return "AAAA"; + } + + @Override + protected ConcurrentMap makeEmptyMap() throws UnsupportedOperationException { + return new HashMap2(r,0L); + } + + @Override + protected ConcurrentMap makePopulatedMap() throws UnsupportedOperationException { + ConcurrentMap map = makeEmptyMap(); + for (int i = 0; i < 100; i++) + map.put(i, "aa" + i); + return map; + } + +} diff --git a/src/test/java/net/kotek/jdbm/JdbmTestCase.java b/src/test/java/net/kotek/jdbm/JdbmTestCase.java new file mode 100644 index 000000000..eed682044 --- /dev/null +++ b/src/test/java/net/kotek/jdbm/JdbmTestCase.java @@ -0,0 +1,124 @@ +package net.kotek.jdbm; + + +import org.junit.After; +import org.junit.Before; + +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +/** + * JUnit test case which provides JDBM specific staff + */ +abstract public class JdbmTestCase { + + + String fileName; + + RecordStore recman; + + @Before + public void setUp() throws Exception { + new File("testdb").mkdirs(); + fileName = "testdb/test"+Math.random(); + + recman = openRecordManager(); + } + + protected RecordStore openRecordManager() { + return new RecordStore(fileName); + } + + @After + public void tearDown() throws Exception { + for(File f:new File("testdb").listFiles()){ + if(!f.delete())f.deleteOnExit(); + } + } + + + void reopenStore() { + recman.close(); + recman = openRecordManager(); + } + + + DataInput2 swap(DataOutput2 d){ + byte[] b = d.copyBytes(); + return new DataInput2(ByteBuffer.wrap(b),0); + } + + + int countIndexRecords(){ + int ret = 0; + final long indexFileSize = recman.indexBufs[0].getLong(RecordStore.RECID_CURRENT_INDEX_FILE_SIZE*8); + for(int pos = RecordStore.INDEX_OFFSET_START * 8; + pos getLongStack(long recid){ + ArrayList ret =new ArrayList(); + + long pagePhysid = recman.indexValGet(recid) & RecordStore.PHYS_OFFSET_MASK; + + ByteBuffer dataBuf = recman.dataBufs[((int) (pagePhysid / RecordStore.BUF_SIZE))]; + + while(pagePhysid!=0){ + final byte numberOfRecordsInPage = dataBuf.get((int) (pagePhysid% RecordStore.BUF_SIZE)); + + for(int rec = numberOfRecordsInPage; rec>0;rec--){ + final long l = dataBuf.getLong((int) (pagePhysid% RecordStore.BUF_SIZE+ rec*8)); + ret.add(l); + } + + //read location of previous page + pagePhysid = dataBuf.getLong((int)(pagePhysid% RecordStore.BUF_SIZE)) & RecordStore.PHYS_OFFSET_MASK; + } + + + return ret; + } + + int readUnsignedShort(ByteBuffer buf, long pos) throws IOException { + return (((int) (buf.get((int) pos) & 0xff) << 8) | + ((int) (buf.get((int) (pos+1)) & 0xff))); + } + + + final List arrayList(long... vals){ + ArrayList ret = new ArrayList(); + for(Long l:vals){ + ret.add(l); + } + return ret; + } + + final Map getDataContent(){ + Map ret = new TreeMap(); + final long indexFileSize = recman.indexBufs[0].getLong(RecordStore.RECID_CURRENT_INDEX_FILE_SIZE*8); + for(long recid = RecordStore.INDEX_OFFSET_START ; + recid*8-1; i = i + 1 + i/1111){ //overflow is expected + out.pos = 0; + + JdbmUtil.packInt(out, i); + in.pos = 0; + in.buf.clear(); + + int i2 = JdbmUtil.unpackInt(in); + + Assert.assertEquals(i, i2); + + } + + } + + public void testPackLong() throws Exception { + + DataOutput2 out = new DataOutput2(); + DataInput2 in = new DataInput2(ByteBuffer.wrap(out.buf,0, out.pos),0); + for(long i = 0;i>-1L ; i=i+1 + i/111){ //overflow is expected + out.pos = 0; + + JdbmUtil.packLong(out, i); + in.pos = 0; + in.buf.clear(); + + long i2 = JdbmUtil.unpackLong(in); + Assert.assertEquals(i, i2); + + } + + } + +} diff --git a/src/test/java/net/kotek/jdbm/JunkTest.java b/src/test/java/net/kotek/jdbm/JunkTest.java new file mode 100644 index 000000000..338b07fbb --- /dev/null +++ b/src/test/java/net/kotek/jdbm/JunkTest.java @@ -0,0 +1,40 @@ +package net.kotek.jdbm; + +import org.junit.Assert; +import org.junit.Assume; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.MappedByteBuffer; +import java.nio.channels.FileChannel; + +import static org.junit.Assert.assertEquals; + +public class JunkTest extends JdbmTestCase{ + + + @Test + public void test_mapped_byte_buffer_reopen() throws IOException { + + Assume.assumeTrue(false); + + File f = File.createTempFile("whatever","ads"); + FileChannel c = new RandomAccessFile(f,"rw").getChannel(); + + MappedByteBuffer b = c.map(FileChannel.MapMode.READ_WRITE, 0, 8); + b.putLong(0, 111L); + + c.close(); + + c = new RandomAccessFile(f,"rw").getChannel(); + b = c.map(FileChannel.MapMode.READ_WRITE, 0, 8); + long l = b.getLong(0); + + Assert.assertEquals(111L, l); + + } + + +} diff --git a/src/test/java/net/kotek/jdbm/LongConcurrentHashMapTest.java b/src/test/java/net/kotek/jdbm/LongConcurrentHashMapTest.java new file mode 100644 index 000000000..3005b4e19 --- /dev/null +++ b/src/test/java/net/kotek/jdbm/LongConcurrentHashMapTest.java @@ -0,0 +1,270 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/licenses/publicdomain + * Other contributors include Andrew Wright, Jeffrey Hayes, + * Pat Fisher, Mike Judd. + */ + +package net.kotek.jdbm; + +import junit.framework.*; +import java.util.*; +import java.io.*; + +public class LongConcurrentHashMapTest extends TestCase{ + + + + /** + * Create a map from Integers 1-5 to Strings "A"-"E". + */ + private static LongConcurrentHashMap map5() { + LongConcurrentHashMap map = new LongConcurrentHashMap(5); + assertTrue(map.isEmpty()); + map.put(1, "A"); + map.put(2, "B"); + map.put(3, "C"); + map.put(4, "D"); + map.put(5, "E"); + assertFalse(map.isEmpty()); + assertEquals(5, map.size()); + return map; + } + + /** + * clear removes all pairs + */ + public void testClear() { + LongConcurrentHashMap map = map5(); + map.clear(); + assertEquals(map.size(), 0); + } + + + + /** + * contains returns true for contained value + */ + public void testContains() { + LongConcurrentHashMap map = map5(); + assertTrue(map.contains("A")); + assertFalse(map.contains("Z")); + } + + /** + * containsKey returns true for contained key + */ + public void testContainsKey() { + LongConcurrentHashMap map = map5(); + assertTrue(map.containsKey(1)); + assertFalse(map.containsKey(0)); + } + + /** + * containsValue returns true for held values + */ + public void testContainsValue() { + LongConcurrentHashMap map = map5(); + assertTrue(map.containsValue("A")); + assertFalse(map.containsValue("Z")); + } + + /** + * enumeration returns an enumeration containing the correct + * elements + */ + public void testEnumeration() { + LongConcurrentHashMap map = map5(); + Iterator e = map.valuesIterator(); + int count = 0; + while(e.hasNext()){ + count++; + e.next(); + } + assertEquals(5, count); + } + + /** + * get returns the correct element at the given key, + * or null if not present + */ + public void testGet() { + LongConcurrentHashMap map = map5(); + assertEquals("A", (String)map.get(1)); + LongConcurrentHashMap empty = new LongConcurrentHashMap(); + assertNull(map.get(-1)); + } + + /** + * isEmpty is true of empty map and false for non-empty + */ + public void testIsEmpty() { + LongConcurrentHashMap empty = new LongConcurrentHashMap(); + LongConcurrentHashMap map = map5(); + assertTrue(empty.isEmpty()); + assertFalse(map.isEmpty()); + } + + + + + /** + * putIfAbsent works when the given key is not present + */ + public void testPutIfAbsent() { + LongConcurrentHashMap map = map5(); + map.putIfAbsent(6, "Z"); + assertTrue(map.containsKey(6)); + } + + /** + * putIfAbsent does not add the pair if the key is already present + */ + public void testPutIfAbsent2() { + LongConcurrentHashMap map = map5(); + assertEquals("A", map.putIfAbsent(1, "Z")); + } + + /** + * replace fails when the given key is not present + */ + public void testReplace() { + LongConcurrentHashMap map = map5(); + assertNull(map.replace(6, "Z")); + assertFalse(map.containsKey(6)); + } + + /** + * replace succeeds if the key is already present + */ + public void testReplace2() { + LongConcurrentHashMap map = map5(); + assertNotNull(map.replace(1, "Z")); + assertEquals("Z", map.get(1)); + } + + + /** + * replace value fails when the given key not mapped to expected value + */ + public void testReplaceValue() { + LongConcurrentHashMap map = map5(); + assertEquals("A", map.get(1)); + assertFalse(map.replace(1, "Z", "Z")); + assertEquals("A", map.get(1)); + } + + /** + * replace value succeeds when the given key mapped to expected value + */ + public void testReplaceValue2() { + LongConcurrentHashMap map = map5(); + assertEquals("A", map.get(1)); + assertTrue(map.replace(1, "A", "Z")); + assertEquals("Z", map.get(1)); + } + + + /** + * remove removes the correct key-value pair from the map + */ + public void testRemove() { + LongConcurrentHashMap map = map5(); + map.remove(5); + assertEquals(4, map.size()); + assertFalse(map.containsKey(5)); + } + + /** + * remove(key,value) removes only if pair present + */ + public void testRemove2() { + LongConcurrentHashMap map = map5(); + map.remove(5, "E"); + assertEquals(4, map.size()); + assertFalse(map.containsKey(5)); + map.remove(4, "A"); + assertEquals(4, map.size()); + assertTrue(map.containsKey(4)); + + } + + /** + * size returns the correct values + */ + public void testSize() { + LongConcurrentHashMap map = map5(); + LongConcurrentHashMap empty = new LongConcurrentHashMap(); + assertEquals(0, empty.size()); + assertEquals(5, map.size()); + } + + + // Exception tests + + /** + * Cannot create with negative capacity + */ + public void testConstructor1() { + try { + new LongConcurrentHashMap(-1,0,1); + shouldThrow(); + } catch(IllegalArgumentException e){} + } + + /** + * Cannot create with negative concurrency level + */ + public void testConstructor2() { + try { + new LongConcurrentHashMap(1,0,-1); + shouldThrow(); + } catch(IllegalArgumentException e){} + } + + /** + * Cannot create with only negative capacity + */ + public void testConstructor3() { + try { + new LongConcurrentHashMap(-1); + shouldThrow(); + } catch(IllegalArgumentException e){} + } + + + + /** + * containsValue(null) throws NPE + */ + public void testContainsValue_NullPointerException() { + try { + LongConcurrentHashMap c = new LongConcurrentHashMap(5); + c.containsValue(null); + shouldThrow(); + } catch(NullPointerException e){} + } + + /** + * contains(null) throws NPE + */ + public void testContains_NullPointerException() { + try { + LongConcurrentHashMap c = new LongConcurrentHashMap(5); + c.contains(null); + shouldThrow(); + } catch(NullPointerException e){} + } + + + + + /** + * fail with message "should throw exception" + */ + public void shouldThrow() { + fail("Should throw exception"); + } + +} \ No newline at end of file diff --git a/src/test/java/net/kotek/jdbm/LongHashMapTest.java b/src/test/java/net/kotek/jdbm/LongHashMapTest.java new file mode 100644 index 000000000..b541ab674 --- /dev/null +++ b/src/test/java/net/kotek/jdbm/LongHashMapTest.java @@ -0,0 +1,133 @@ +/******************************************************************************* + * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ +package net.kotek.jdbm; + +import java.util.Iterator; +import java.util.Random; +import java.util.TreeMap; + +import junit.framework.TestCase; + +public class LongHashMapTest extends TestCase { + + public void testAll() { + LongHashMap t = new LongHashMap(); + t.put(1, "aa"); + t.put(2, "bb"); + t.put(2, "bb"); + t.put(4, "cc"); + t.put(9, "FF"); + assertEquals(4, t.size()); + t.remove(1); + assertEquals(3, t.size()); + assertEquals(t.get(1), null); + assertEquals(t.get(2), "bb"); + assertEquals(t.get(3), null); + assertEquals(t.get(4), "cc"); + assertEquals(t.get(5), null); + assertEquals(t.get(-1), null); + assertEquals(t.get(9), "FF"); + + Iterator vals = t.valuesIterator(); + assertTrue(vals.hasNext()); + assertEquals(vals.next(), "FF"); + assertTrue(vals.hasNext()); + assertEquals(vals.next(), "cc"); + assertTrue(vals.hasNext()); + assertEquals(vals.next(), "bb"); + + assertFalse(vals.hasNext()); + + t.clear(); + assertEquals(0, t.size()); + t.put(2, "bb"); + assertEquals(1, t.size()); + assertEquals(t.get(1), null); + assertEquals(t.get(2), "bb"); + assertEquals(t.get(3), null); + + } + + public void testRandomCompare() { + LongHashMap v1 = new LongHashMap(); + TreeMap v2 = new TreeMap(); + Random d = new Random(); + for (int i = 0; i < 1000; i++) { + long key = d.nextInt() % 100; + double random = d.nextDouble(); + if (random < 0.8) { +// System.out.println("put "+key); + v1.put(key, "" + key); + v2.put(key, "" + key); + } else { +// System.out.println("remove "+key); + v1.remove(key); + v2.remove(key); + } + checkEquals(v1, v2); + + } + } + + public void checkEquals(LongMap v1, TreeMap v2) { + assertEquals(v1.size(), v2.size()); + for (long k : v2.keySet()) { + assertEquals(v1.get(k), v2.get(k)); + } + + int counter = 0; + Iterator it = v1.valuesIterator(); + while (it.hasNext()) { + String v = it.next(); + long key = Long.valueOf(v); + assertEquals(v1.get(key), v); + assertEquals("" + key, v); + counter++; + } + assertEquals(counter, v2.size()); + } + + + public void test2() { + LongHashMap v1 = new LongHashMap(); + v1.put(1611, "1611"); + v1.put(15500, "15500"); + v1.put(9446, "9446"); + System.out.println(v1.get(9446)); + System.out.println(v1.toString()); + assertEquals(3, v1.size()); + assertEquals(v1.get(9446), "9446"); + + } + + public void testMemoryConsuptio() { + System.out.println("Memory available: " + (Runtime.getRuntime().maxMemory() / 1e6) + "MB"); + System.out.println("Memory used: " + ((Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1e6) + "MB"); + long counter = 0; + LongHashMap e = new LongHashMap(); + //LongKeyChainedHashMap e = new LongKeyChainedHashMap(); + //LongTreeMap e = new LongTreeMap(); + while (counter < 1e6) { + counter++; + e.put(counter, ""); + } + System.out.println(counter + " items"); + System.out.println("Memory used: " + ((Runtime.getRuntime().totalMemory() - Runtime.getRuntime().freeMemory()) / 1e6) + "MB"); + + + } + +} diff --git a/src/test/java/net/kotek/jdbm/MapInterfaceTest.java b/src/test/java/net/kotek/jdbm/MapInterfaceTest.java new file mode 100644 index 000000000..3189f0cba --- /dev/null +++ b/src/test/java/net/kotek/jdbm/MapInterfaceTest.java @@ -0,0 +1,1528 @@ +/* + * Copyright (C) 2008 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package net.kotek.jdbm; + +import static java.util.Collections.singleton; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; +import java.util.Map.Entry; + +import junit.framework.TestCase; + +/** + * Tests representing the contract of {@link Map}. Concrete subclasses of this + * base class test conformance of concrete {@link Map} subclasses to that + * contract. + *

+ * TODO: Descriptive assertion messages, with hints as to probable + * fixes. + * TODO: Add another constructor parameter indicating whether the + * class under test is ordered, and check the order if so. + * TODO: Refactor to share code with SetTestBuilder &c. + * + * @param the type of keys used by the maps under test + * @param the type of mapped values used the maps under test + * @author George van den Driessche + */ +public abstract class MapInterfaceTest extends TestCase { + protected final boolean supportsPut; + protected final boolean supportsRemove; + protected final boolean supportsClear; + protected final boolean allowsNullKeys; + protected final boolean allowsNullValues; + protected final boolean supportsIteratorRemove; + + /** + * Creates a new, empty instance of the class under test. + * + * @return a new, empty map instance. + * @throws UnsupportedOperationException if it's not possible to make an + * empty instance of the class under test. + */ + protected abstract Map makeEmptyMap() + throws UnsupportedOperationException; + + /** + * Creates a new, non-empty instance of the class under test. + * + * @return a new, non-empty map instance. + * @throws UnsupportedOperationException if it's not possible to make a + * non-empty instance of the class under test. + */ + protected abstract Map makePopulatedMap() + throws UnsupportedOperationException; + + /** + * Creates a new key that is not expected to be found + * in {@link #makePopulatedMap()}. + * + * @return a key. + * @throws UnsupportedOperationException if it's not possible to make a key + * that will not be found in the map. + */ + protected abstract K getKeyNotInPopulatedMap() + throws UnsupportedOperationException; + + /** + * Creates a new value that is not expected to be found + * in {@link #makePopulatedMap()}. + * + * @return a value. + * @throws UnsupportedOperationException if it's not possible to make a value + * that will not be found in the map. + */ + protected abstract V getValueNotInPopulatedMap() + throws UnsupportedOperationException; + + + /** + * Constructor with an explicit {@code supportsIteratorRemove} parameter. + */ + protected MapInterfaceTest( + boolean allowsNullKeys, + boolean allowsNullValues, + boolean supportsPut, + boolean supportsRemove, + boolean supportsClear, + boolean supportsIteratorRemove) { + this.supportsPut = supportsPut; + this.supportsRemove = supportsRemove; + this.supportsClear = supportsClear; + this.allowsNullKeys = allowsNullKeys; + this.allowsNullValues = allowsNullValues; + this.supportsIteratorRemove = supportsIteratorRemove; + } + + /** + * Used by tests that require a map, but don't care whether it's + * populated or not. + * + * @return a new map instance. + */ + protected Map makeEitherMap() { + try { + return makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return makeEmptyMap(); + } + } + + protected final boolean supportsValuesHashCode(Map map) { + // get the first non-null value + Collection values = map.values(); + for (V value : values) { + if (value != null) { + try { + value.hashCode(); + } catch (Exception e) { + return false; + } + return true; + } + } + return true; + } + + /** + * Checks all the properties that should always hold of a map. Also calls + * {@link #assertMoreInvariants} to check invariants that are peculiar to + * specific implementations. + * + * @param map the map to check. + * @see #assertMoreInvariants + */ + protected final void assertInvariants(Map map) { + Set keySet = map.keySet(); + Collection valueCollection = map.values(); + Set> entrySet = map.entrySet(); + + assertEquals(map.size() == 0, map.isEmpty()); + assertEquals(map.size(), keySet.size()); + assertEquals(keySet.size() == 0, keySet.isEmpty()); + assertEquals(!keySet.isEmpty(), keySet.iterator().hasNext()); + + int expectedKeySetHash = 0; + for (K key : keySet) { + V value = map.get(key); + expectedKeySetHash += key != null ? key.hashCode() : 0; + assertTrue(map.containsKey(key)); + assertTrue(map.containsValue(value)); + assertTrue(valueCollection.contains(value)); + assertTrue(valueCollection.containsAll(Collections.singleton(value))); + assertTrue(entrySet.contains(mapEntry(key, value))); + assertTrue(allowsNullKeys || (key != null)); + } + assertEquals(expectedKeySetHash, keySet.hashCode()); + + assertEquals(map.size(), valueCollection.size()); + assertEquals(valueCollection.size() == 0, valueCollection.isEmpty()); + assertEquals( + !valueCollection.isEmpty(), valueCollection.iterator().hasNext()); + for (V value : valueCollection) { + assertTrue(map.containsValue(value)); + assertTrue(allowsNullValues || (value != null)); + } + + assertEquals(map.size(), entrySet.size()); + assertEquals(entrySet.size() == 0, entrySet.isEmpty()); + assertEquals(!entrySet.isEmpty(), entrySet.iterator().hasNext()); + assertTrue(!entrySet.contains("foo")); + + boolean supportsValuesHashCode = supportsValuesHashCode(map); + if (supportsValuesHashCode) { + int expectedEntrySetHash = 0; + for (Entry entry : entrySet) { + assertTrue(map.containsKey(entry.getKey())); + assertTrue(map.containsValue(entry.getValue())); + int expectedHash = + (entry.getKey() == null ? 0 : entry.getKey().hashCode()) ^ + (entry.getValue() == null ? 0 : entry.getValue().hashCode()); + assertEquals(expectedHash, entry.hashCode()); + expectedEntrySetHash += expectedHash; + } + assertEquals(expectedEntrySetHash, entrySet.hashCode()); + assertTrue(entrySet.containsAll(new HashSet>(entrySet))); + assertTrue(entrySet.equals(new HashSet>(entrySet))); + } + + Object[] entrySetToArray1 = entrySet.toArray(); + assertEquals(map.size(), entrySetToArray1.length); + assertTrue(Arrays.asList(entrySetToArray1).containsAll(entrySet)); + + Entry[] entrySetToArray2 = new Entry[map.size() + 2]; + entrySetToArray2[map.size()] = mapEntry("foo", 1); + assertSame(entrySetToArray2, entrySet.toArray(entrySetToArray2)); + assertNull(entrySetToArray2[map.size()]); + assertTrue(Arrays.asList(entrySetToArray2).containsAll(entrySet)); + + Object[] valuesToArray1 = valueCollection.toArray(); + assertEquals(map.size(), valuesToArray1.length); + assertTrue(Arrays.asList(valuesToArray1).containsAll(valueCollection)); + + Object[] valuesToArray2 = new Object[map.size() + 2]; + valuesToArray2[map.size()] = "foo"; + assertSame(valuesToArray2, valueCollection.toArray(valuesToArray2)); + assertNull(valuesToArray2[map.size()]); + assertTrue(Arrays.asList(valuesToArray2).containsAll(valueCollection)); + + if (supportsValuesHashCode) { + int expectedHash = 0; + for (Entry entry : entrySet) { + expectedHash += entry.hashCode(); + } + assertEquals(expectedHash, map.hashCode()); + } + + assertMoreInvariants(map); + } + + /** + * Override this to check invariants which should hold true for a particular + * implementation, but which are not generally applicable to every instance + * of Map. + * + * @param map the map whose additional invariants to check. + */ + protected void assertMoreInvariants(Map map) { + } + + public void testClear() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + if (supportsClear) { + map.clear(); + assertTrue(map.isEmpty()); + } else { + try { + map.clear(); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testContainsKey() { + final Map map; + final K unmappedKey; + try { + map = makePopulatedMap(); + unmappedKey = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertTrue(!map.containsKey(unmappedKey)); + assertTrue(map.containsKey(map.keySet().iterator().next())); + if (allowsNullKeys) { + map.containsKey(null); + } else { + try { + map.containsKey(null); + } catch (NullPointerException optional) { + } + } + assertInvariants(map); + } + + public void testContainsValue() { + final Map map; + final V unmappedValue; + try { + map = makePopulatedMap(); + unmappedValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertTrue(!map.containsValue(unmappedValue)); + assertTrue(map.containsValue(map.values().iterator().next())); + if (allowsNullValues) { + map.containsValue(null); + } else { + try { + map.containsKey(null); + } catch (NullPointerException optional) { + } + } + assertInvariants(map); + } + + public void testEntrySet() { + final Map map; + final Set> entrySet; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + + entrySet = map.entrySet(); + final K unmappedKey; + final V unmappedValue; + try { + unmappedKey = getKeyNotInPopulatedMap(); + unmappedValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + for (Entry entry : entrySet) { + assertTrue(!unmappedKey.equals(entry.getKey())); + assertTrue(!unmappedValue.equals(entry.getValue())); + } + } + + public void testEntrySetForEmptyMap() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + } + + public void testEntrySetContainsEntryNullKeyPresent() { + if (!allowsNullKeys || !supportsPut) { + return; + } + final Map map; + final Set> entrySet; + try { + map = makeEitherMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + + entrySet = map.entrySet(); + final V unmappedValue; + try { + unmappedValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + map.put(null, unmappedValue); + Entry entry = mapEntry(null, unmappedValue); + assertTrue(entrySet.contains(entry)); + assertTrue(!entrySet.contains(mapEntry(null, null))); + } + + public void testEntrySetContainsEntryNullKeyMissing() { + final Map map; + final Set> entrySet; + try { + map = makeEitherMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + + entrySet = map.entrySet(); + final V unmappedValue; + try { + unmappedValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + Entry entry = mapEntry(null, unmappedValue); + assertTrue(!entrySet.contains(entry)); + assertTrue(!entrySet.contains(mapEntry(null, null))); + } + + public void testEntrySetIteratorRemove() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Iterator> iterator = entrySet.iterator(); + if (supportsIteratorRemove) { + int initialSize = map.size(); + Entry entry = iterator.next(); + iterator.remove(); + assertEquals(initialSize - 1, map.size()); + assertTrue(!entrySet.contains(entry)); + assertInvariants(map); + try { + iterator.remove(); + fail("Expected IllegalStateException."); + } catch (IllegalStateException e) { + // Expected. + } + } else { + try { + iterator.next(); + iterator.remove(); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetRemove() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + if (supportsRemove) { + int initialSize = map.size(); + boolean didRemove = entrySet.remove(entrySet.iterator().next()); + assertTrue(didRemove); + assertEquals(initialSize - 1, map.size()); + } else { + try { + entrySet.remove(entrySet.iterator().next()); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetRemoveMissingKey() { + final Map map; + final K key; + try { + map = makeEitherMap(); + key = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Entry entry + = mapEntry(key, getValueNotInPopulatedMap()); + int initialSize = map.size(); + if (supportsRemove) { + boolean didRemove = entrySet.remove(entry); + assertTrue(!didRemove); + } else { + try { + boolean didRemove = entrySet.remove(entry); + assertTrue(!didRemove); + } catch (UnsupportedOperationException optional) { + } + } + assertEquals(initialSize, map.size()); + assertTrue(!map.containsKey(key)); + assertInvariants(map); + } + + public void testEntrySetRemoveDifferentValue() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + K key = map.keySet().iterator().next(); + Entry entry + = mapEntry(key, getValueNotInPopulatedMap()); + int initialSize = map.size(); + if (supportsRemove) { + boolean didRemove = entrySet.remove(entry); + assertTrue(!didRemove); + } else { + try { + boolean didRemove = entrySet.remove(entry); + assertTrue(!didRemove); + } catch (UnsupportedOperationException optional) { + } + } + assertEquals(initialSize, map.size()); + assertTrue(map.containsKey(key)); + assertInvariants(map); + } + + public void testEntrySetRemoveNullKeyPresent() { + if (!allowsNullKeys || !supportsPut || !supportsRemove) { + return; + } + final Map map; + final Set> entrySet; + try { + map = makeEitherMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + + entrySet = map.entrySet(); + final V unmappedValue; + try { + unmappedValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + map.put(null, unmappedValue); + assertEquals(unmappedValue, map.get(null)); + assertTrue(map.containsKey(null)); + Entry entry = mapEntry(null, unmappedValue); + assertTrue(entrySet.remove(entry)); + assertNull(map.get(null)); + assertTrue(!map.containsKey(null)); + } + + public void testEntrySetRemoveNullKeyMissing() { + final Map map; + try { + map = makeEitherMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Entry entry + = mapEntry(null, getValueNotInPopulatedMap()); + int initialSize = map.size(); + if (supportsRemove) { + boolean didRemove = entrySet.remove(entry); + assertTrue(!didRemove); + } else { + try { + boolean didRemove = entrySet.remove(entry); + assertTrue(!didRemove); + } catch (UnsupportedOperationException optional) { + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testEntrySetRemoveAll() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Set> entriesToRemove = + singleton(entrySet.iterator().next()); + if (supportsRemove) { + int initialSize = map.size(); + boolean didRemove = entrySet.removeAll(entriesToRemove); + assertTrue(didRemove); + assertEquals(initialSize - entriesToRemove.size(), map.size()); + for (Entry entry : entriesToRemove) { + assertTrue(!entrySet.contains(entry)); + } + } else { + try { + entrySet.removeAll(entriesToRemove); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetRemoveAllNullFromEmpty() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + if (supportsRemove) { + try { + entrySet.removeAll(null); + fail("Expected NullPointerException."); + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + entrySet.removeAll(null); + fail("Expected UnsupportedOperationException or NullPointerException."); + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetRetainAll() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Set> entriesToRetain = + singleton(entrySet.iterator().next()); + if (supportsRemove) { + boolean shouldRemove = (entrySet.size() > entriesToRetain.size()); + boolean didRemove = entrySet.retainAll(entriesToRetain); + assertEquals(shouldRemove, didRemove); + assertEquals(entriesToRetain.size(), map.size()); + for (Entry entry : entriesToRetain) { + assertTrue(entrySet.contains(entry)); + } + } else { + try { + entrySet.retainAll(entriesToRetain); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetRetainAllNullFromEmpty() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + if (supportsRemove) { + try { + entrySet.retainAll(null); + // Returning successfully is not ideal, but tolerated. + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + entrySet.retainAll(null); + // We have to tolerate a successful return (Sun bug 4802647) + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetClear() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + if (supportsClear) { + entrySet.clear(); + assertTrue(entrySet.isEmpty()); + } else { + try { + entrySet.clear(); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testEntrySetAddAndAddAll() { + final Map map = makeEitherMap(); + + Set> entrySet = map.entrySet(); + final Entry entryToAdd = mapEntry(null, null); + try { + entrySet.add(entryToAdd); + fail("Expected UnsupportedOperationException or NullPointerException."); + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + assertInvariants(map); + + try { + entrySet.addAll(singleton(entryToAdd)); + fail("Expected UnsupportedOperationException or NullPointerException."); + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + assertInvariants(map); + } + + public void testEntrySetSetValue() { + // TODO: Investigate the extent to which, in practice, maps that support + // put() also support Entry.setValue(). + if (!supportsPut) { + return; + } + + final Map map; + final V valueToSet; + try { + map = makePopulatedMap(); + valueToSet = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Entry entry = entrySet.iterator().next(); + final V oldValue = entry.getValue(); + final V returnedValue = entry.setValue(valueToSet); + assertEquals(oldValue, returnedValue); + assertTrue(entrySet.contains( + mapEntry(entry.getKey(), valueToSet))); + assertEquals(valueToSet, map.get(entry.getKey())); + assertInvariants(map); + } + + public void testEntrySetSetValueSameValue() { + // TODO: Investigate the extent to which, in practice, maps that support + // put() also support Entry.setValue(). + if (!supportsPut) { + return; + } + + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set> entrySet = map.entrySet(); + Entry entry = entrySet.iterator().next(); + final V oldValue = entry.getValue(); + final V returnedValue = entry.setValue(oldValue); + assertEquals(oldValue, returnedValue); + assertTrue(entrySet.contains( + mapEntry(entry.getKey(), oldValue))); + assertEquals(oldValue, map.get(entry.getKey())); + assertInvariants(map); + } + + public void testEqualsForEqualMap() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + assertEquals(map, map); + assertEquals(makePopulatedMap(), map); + assertTrue(!map.equals(Collections.emptyMap())); + //no-inspection ObjectEqualsNull + assertTrue(!map.equals(null)); + } + + public void testEqualsForLargerMap() { + if (!supportsPut) { + return; + } + + final Map map; + final Map largerMap; + try { + map = makePopulatedMap(); + largerMap = makePopulatedMap(); + largerMap.put(getKeyNotInPopulatedMap(), getValueNotInPopulatedMap()); + } catch (UnsupportedOperationException e) { + return; + } + + assertTrue(!map.equals(largerMap)); + } + + public void testEqualsForSmallerMap() { + if (!supportsRemove) { + return; + } + + final Map map; + final Map smallerMap; + try { + map = makePopulatedMap(); + smallerMap = new LinkedHashMap(map); +// smallerMap = makePopulatedMap(); + smallerMap.remove(smallerMap.keySet().iterator().next()); + } catch (UnsupportedOperationException e) { + return; + } + + assertTrue(!map.equals(smallerMap)); + } + + public void testEqualsForEmptyMap() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + assertEquals(map, map); + assertEquals(makeEmptyMap(), map); + assertEquals(Collections.emptyMap(), map); + assertTrue(!map.equals(Collections.emptySet())); + //noinspection ObjectEqualsNull + assertTrue(!map.equals(null)); + } + + public void testGet() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + for (Entry entry : map.entrySet()) { + assertEquals(entry.getValue(), map.get(entry.getKey())); + } + + K unmappedKey = null; + try { + unmappedKey = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertNull(map.get(unmappedKey)); + } + + public void testGetForEmptyMap() { + final Map map; + K unmappedKey = null; + try { + map = makeEmptyMap(); + unmappedKey = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertNull(map.get(unmappedKey)); + } + + public void testGetNull() { + Map map = makeEitherMap(); + if (allowsNullKeys) { + if (allowsNullValues) { + // TODO: decide what to test here. + } else { + assertEquals(map.containsKey(null), map.get(null) != null); + } + } else { + try { + map.get(null); + } catch (NullPointerException optional) { + } + } + assertInvariants(map); + } + + public void testHashCode() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + } + + public void testHashCodeForEmptyMap() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + } + + public void testPutNewKey() { + final Map map = makeEitherMap(); + final K keyToPut; + final V valueToPut; + try { + keyToPut = getKeyNotInPopulatedMap(); + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + if (supportsPut) { + int initialSize = map.size(); + V oldValue = map.put(keyToPut, valueToPut); + assertEquals(valueToPut, map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(valueToPut)); + assertEquals(initialSize + 1, map.size()); + assertNull(oldValue); + } else { + try { + map.put(keyToPut, valueToPut); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutExistingKey() { + final Map map; + final K keyToPut; + final V valueToPut; + try { + map = makePopulatedMap(); + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToPut = map.keySet().iterator().next(); + if (supportsPut) { + int initialSize = map.size(); + map.put(keyToPut, valueToPut); + assertEquals(valueToPut, map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(valueToPut)); + assertEquals(initialSize, map.size()); + } else { + try { + map.put(keyToPut, valueToPut); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutNullKey() { + if (!supportsPut) { + return; + } + final Map map = makeEitherMap(); + final V valueToPut; + try { + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + if (allowsNullKeys) { + final V oldValue = map.get(null); + final V returnedValue = map.put(null, valueToPut); + assertEquals(oldValue, returnedValue); + assertEquals(valueToPut, map.get(null)); + assertTrue(map.containsKey(null)); + assertTrue(map.containsValue(valueToPut)); + } else { + try { + map.put(null, valueToPut); + fail("Expected RuntimeException"); + } catch (RuntimeException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutNullValue() { + if (!supportsPut) { + return; + } + final Map map = makeEitherMap(); + final K keyToPut; + try { + keyToPut = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + if (allowsNullValues) { + int initialSize = map.size(); + final V oldValue = map.get(keyToPut); + final V returnedValue = map.put(keyToPut, null); + assertEquals(oldValue, returnedValue); + assertNull(map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(null)); + assertEquals(initialSize + 1, map.size()); + } else { + try { + map.put(keyToPut, null); + fail("Expected RuntimeException"); + } catch (RuntimeException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutNullValueForExistingKey() { + if (!supportsPut) { + return; + } + final Map map; + final K keyToPut; + try { + map = makePopulatedMap(); + keyToPut = map.keySet().iterator().next(); + } catch (UnsupportedOperationException e) { + return; + } + if (allowsNullValues) { + int initialSize = map.size(); + final V oldValue = map.get(keyToPut); + final V returnedValue = map.put(keyToPut, null); + assertEquals(oldValue, returnedValue); + assertNull(map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(null)); + assertEquals(initialSize, map.size()); + } else { + try { + map.put(keyToPut, null); + fail("Expected RuntimeException"); + } catch (RuntimeException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutAllNewKey() { + final Map map = makeEitherMap(); + final K keyToPut; + final V valueToPut; + try { + keyToPut = getKeyNotInPopulatedMap(); + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + final Map mapToPut = Collections.singletonMap(keyToPut, valueToPut); + if (supportsPut) { + int initialSize = map.size(); + map.putAll(mapToPut); + assertEquals(valueToPut, map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(valueToPut)); + assertEquals(initialSize + 1, map.size()); + } else { + try { + map.putAll(mapToPut); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testPutAllExistingKey() { + final Map map; + final K keyToPut; + final V valueToPut; + try { + map = makePopulatedMap(); + valueToPut = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToPut = map.keySet().iterator().next(); + final Map mapToPut = Collections.singletonMap(keyToPut, valueToPut); + int initialSize = map.size(); + if (supportsPut) { + map.putAll(mapToPut); + assertEquals(valueToPut, map.get(keyToPut)); + assertTrue(map.containsKey(keyToPut)); + assertTrue(map.containsValue(valueToPut)); + } else { + try { + map.putAll(mapToPut); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testRemove() { + final Map map; + final K keyToRemove; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + keyToRemove = map.keySet().iterator().next(); + if (supportsRemove) { + int initialSize = map.size(); + V expectedValue = map.get(keyToRemove); + V oldValue = map.remove(keyToRemove); + assertEquals(expectedValue, oldValue); + assertTrue(!map.containsKey(keyToRemove)); + assertEquals(initialSize - 1, map.size()); + } else { + try { + map.remove(keyToRemove); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testRemoveMissingKey() { + final Map map; + final K keyToRemove; + try { + map = makePopulatedMap(); + keyToRemove = getKeyNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + if (supportsRemove) { + int initialSize = map.size(); + assertNull(map.remove(keyToRemove)); + assertEquals(initialSize, map.size()); + } else { + try { + map.remove(keyToRemove); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testSize() { + assertInvariants(makeEitherMap()); + } + + public void testKeySetClear() { + final Map map; + try { + map = makeEitherMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set keySet = map.keySet(); + if (supportsClear) { + keySet.clear(); + assertTrue(keySet.isEmpty()); + } else { + try { + keySet.clear(); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testKeySetRemoveAllNullFromEmpty() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set keySet = map.keySet(); + if (supportsRemove) { + try { + keySet.removeAll(null); + fail("Expected NullPointerException."); + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + keySet.removeAll(null); + fail("Expected UnsupportedOperationException or NullPointerException."); + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testKeySetRetainAllNullFromEmpty() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Set keySet = map.keySet(); + if (supportsRemove) { + try { + keySet.retainAll(null); + // Returning successfully is not ideal, but tolerated. + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + keySet.retainAll(null); + // We have to tolerate a successful return (Sun bug 4802647) + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValues() { + final Map map; + final Collection valueCollection; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + assertInvariants(map); + + valueCollection = map.values(); + final V unmappedValue; + try { + unmappedValue = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + for (V value : valueCollection) { + assertTrue(!unmappedValue.equals(value)); + } + } + + public void testValuesIteratorRemove() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection valueCollection = map.values(); + Iterator iterator = valueCollection.iterator(); + if (supportsIteratorRemove) { + int initialSize = map.size(); + iterator.next(); + iterator.remove(); + assertEquals(initialSize - 1, map.size()); + // (We can't assert that the values collection no longer contains the + // removed value, because the underlying map can have multiple mappings + // to the same value.) + assertInvariants(map); + try { + iterator.remove(); + fail("Expected IllegalStateException."); + } catch (IllegalStateException e) { + // Expected. + } + } else { + try { + iterator.next(); + iterator.remove(); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValuesRemove() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection valueCollection = map.values(); + if (supportsRemove) { + int initialSize = map.size(); + valueCollection.remove(valueCollection.iterator().next()); + assertEquals(initialSize - 1, map.size()); + // (We can't assert that the values collection no longer contains the + // removed value, because the underlying map can have multiple mappings + // to the same value.) + } else { + try { + valueCollection.remove(valueCollection.iterator().next()); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValuesRemoveMissing() { + final Map map; + final V valueToRemove; + try { + map = makeEitherMap(); + valueToRemove = getValueNotInPopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection valueCollection = map.values(); + int initialSize = map.size(); + if (supportsRemove) { + assertTrue(!valueCollection.remove(valueToRemove)); + } else { + try { + assertTrue(!valueCollection.remove(valueToRemove)); + } catch (UnsupportedOperationException e) { + // Tolerated. + } + } + assertEquals(initialSize, map.size()); + assertInvariants(map); + } + + public void testValuesRemoveAll() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection valueCollection = map.values(); + Set valuesToRemove = singleton(valueCollection.iterator().next()); + if (supportsRemove) { + valueCollection.removeAll(valuesToRemove); + for (V value : valuesToRemove) { + assertTrue(!valueCollection.contains(value)); + } + for (V value : valueCollection) { + assertTrue(!valuesToRemove.contains(value)); + } + } else { + try { + valueCollection.removeAll(valuesToRemove); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValuesRemoveAllNullFromEmpty() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection values = map.values(); + if (supportsRemove) { + try { + values.removeAll(null); + // Returning successfully is not ideal, but tolerated. + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + values.removeAll(null); + // We have to tolerate a successful return (Sun bug 4802647) + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValuesRetainAll() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection valueCollection = map.values(); + Set valuesToRetain = singleton(valueCollection.iterator().next()); + if (supportsRemove) { + valueCollection.retainAll(valuesToRetain); + for (V value : valuesToRetain) { + assertTrue(valueCollection.contains(value)); + } + for (V value : valueCollection) { + assertTrue(valuesToRetain.contains(value)); + } + } else { + try { + valueCollection.retainAll(valuesToRetain); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValuesRetainAllNullFromEmpty() { + final Map map; + try { + map = makeEmptyMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection values = map.values(); + if (supportsRemove) { + try { + values.retainAll(null); + // Returning successfully is not ideal, but tolerated. + } catch (NullPointerException e) { + // Expected. + } + } else { + try { + values.retainAll(null); + // We have to tolerate a successful return (Sun bug 4802647) + } catch (UnsupportedOperationException e) { + // Expected. + } catch (NullPointerException e) { + // Expected. + } + } + assertInvariants(map); + } + + public void testValuesClear() { + final Map map; + try { + map = makePopulatedMap(); + } catch (UnsupportedOperationException e) { + return; + } + + Collection valueCollection = map.values(); + if (supportsClear) { + valueCollection.clear(); + assertTrue(valueCollection.isEmpty()); + } else { + try { + valueCollection.clear(); + fail("Expected UnsupportedOperationException."); + } catch (UnsupportedOperationException e) { + // Expected. + } + } + assertInvariants(map); + } + + private static Entry mapEntry(K key, V value) { + return Collections.singletonMap(key, value).entrySet().iterator().next(); + } +} diff --git a/src/test/java/net/kotek/jdbm/RecordStoreAsyncWriteTest.java b/src/test/java/net/kotek/jdbm/RecordStoreAsyncWriteTest.java new file mode 100644 index 000000000..52befb887 --- /dev/null +++ b/src/test/java/net/kotek/jdbm/RecordStoreAsyncWriteTest.java @@ -0,0 +1,84 @@ +package net.kotek.jdbm; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; + +/** + * @author Jan Kotek + */ +public class RecordStoreAsyncWriteTest extends JdbmTestCase{ + + @Override + protected RecordStore openRecordManager() { +// return new RecordStoreAsyncWrite(fileName, false); + return new RecordStoreCache(fileName,true); + } + + + @Test public void write_fetch_update_delete(){ + + + long recid = recman.recordPut("aaa",Serializer.STRING_SERIALIZER); + Assert.assertEquals("aaa",recman.recordGet(recid, Serializer.STRING_SERIALIZER)); + reopenStore(); + Assert.assertEquals("aaa",recman.recordGet(recid, Serializer.STRING_SERIALIZER)); + recman.recordUpdate(recid,"bbb",Serializer.STRING_SERIALIZER); + Assert.assertEquals("bbb",recman.recordGet(recid, Serializer.STRING_SERIALIZER)); + reopenStore(); + Assert.assertEquals("bbb",recman.recordGet(recid, Serializer.STRING_SERIALIZER)); + + } + + + @Test(timeout = 0xFFFF) + public void concurrent_updates_test() throws InterruptedException { + + + final int threadNum = 64; + final int updates = 10000; + final CountDownLatch latch = new CountDownLatch(threadNum); + final Map recids = new ConcurrentHashMap(); + + + for(int i = 0;i recids= new ArrayList(); + for(int i = 0;i recids2= new ArrayList(); + for(int i = 0;i0;i--){ + assertEquals(i, recman.longStackTake(RecordStore.RECID_USER_WHOTEVER)); + } + + assertEquals(0, getLongStack(RecordStore.RECID_USER_WHOTEVER).size()); + + } + + @Test public void test_long_buffer_put_take_simple(){ + recman.lock.writeLock().lock(); + recman.longStackPut(RecordStore.RECID_USER_WHOTEVER, 111); + assertEquals(111L, recman.longStackTake(RecordStore.RECID_USER_WHOTEVER)); + } + + + @Test public void test_basic_long_buffer(){ + + //dirty hack to make sure we have lock + recman.lock.writeLock().lock(); + final long max = 150; + ArrayList list = new ArrayList(); + for(long i=1;i= 0); + assertTrue(oldSlot <= slot); + assertTrue(slot - oldSlot <= 1); + oldSlot= slot; + } + assertEquals(RecordStore.NUMBER_OF_PHYS_FREE_SLOT - 1, oldSlot); + } + + @Test public void test_freePhysRecSize2FreeSlot_max_size_has_unique_slot(){ + int slotMax = recman.freePhysRecSize2FreeSlot(RecordStore.MAX_RECORD_SIZE); + int slotMaxMinus1 = recman.freePhysRecSize2FreeSlot(RecordStore.MAX_RECORD_SIZE - 1); + assertEquals(slotMax, slotMaxMinus1 + 1); + } + + @Test public void test_freePhys_PutAndTake(){ + recman.lock.writeLock().lock(); + + final long offset = 1111000; + final int size = 344; + final long indexVal =(((long)size) <<48) |offset; + + recman.freePhysRecPut(indexVal); + + assertEquals(indexVal, recman.freePhysRecTake(size)); + assertEquals(arrayList(), getLongStack(RecordStore.RECID_FREE_PHYS_RECORDS_START + recman.freePhysRecSize2FreeSlot(size))); + } + + @Test public void test_freePhys_Put_and_Take_2(){ + + byte[] zero = new byte[RecordStore.NUMBER_OF_PHYS_FREE_SLOT*8]; + + final int inc = CC.FULL_TEST ? 1 : 111; + + + recman.lock.writeLock().lock(); + + for(int origSize =1;origSize<=1500;origSize+=inc){ + for(int newSize = 1;newSize<=origSize;newSize+=inc){ + + final long offset = 111111; + final long indexVal = ((long)origSize)<<48 | offset; + + recman.freePhysRecPut(indexVal); + + //check it is in list + assertEquals(arrayList(indexVal), + getLongStack(RecordStore.RECID_FREE_PHYS_RECORDS_START + + recman.freePhysRecSize2FreeSlot(origSize))); + + //require new record + final long newIndexVal = recman.freePhysRecTake(newSize); + + assertEquals(((long) newSize) << 48 | offset, newIndexVal); + + //find remaining free record if any + if(origSize!=newSize){ + final int rsize = origSize - newSize; + final int rslot = recman.freePhysRecSize2FreeSlot(rsize); + final long rIndexVal = (offset + newSize) | (((long)rsize)<<48); + assertEquals(new Long(rIndexVal), + getLongStack(RecordStore.RECID_FREE_PHYS_RECORDS_START + rslot).get(0)); + } + + + + //zero out all records + recman.indexBufs[0].position(RecordStore.RECID_FREE_PHYS_RECORDS_START*8); + recman.indexBufs[0].put(zero); + recman.indexBufs[0].putLong(RecordStore.RECID_CURRENT_PHYS_FILE_SIZE * 8, 8); + } + } + } + + + + @Test public void test_2GB_over() throws IOException { + Assume.assumeTrue(CC.FULL_TEST); + + byte[] data = new byte[51111]; + Integer dataHash = Arrays.hashCode(data); + + Set recids = new TreeSet(); + + for(int i = 0; i<1e5;i++){ + long recid = recman.recordPut(data, Serializer.BYTE_ARRAY_SERIALIZER); + recids.add(recid); + } + + Map m1 = new TreeMap(); + for(Long l:recids){ + m1.put(l,dataHash); + } + + Map m2 = getDataContent(); + + assertEquals(m1.size(), m2.size()); + assertTrue(m1.equals(m2)); + + + } + + + @Test public void test_store_reopen(){ + long recid = recman.recordPut("aaa", Serializer.STRING_SERIALIZER); + + reopenStore(); + + String aaa = recman.recordGet(recid, Serializer.STRING_SERIALIZER); + assertEquals("aaa",aaa); + } + + @Test public void test_store_reopen_over_2GB(){ + Assume.assumeTrue(CC.FULL_TEST); + + byte[] data = new byte[11111]; + final long max = RecordStore.BUF_SIZE*2L/data.length; + final Integer hash = Arrays.hashCode(data); + + List recids = new ArrayList(); + + for(int i = 0;i s = new TreeSet(); + for (Field f : c.getDeclaredFields()) { + f.setAccessible(true); + int value = f.getInt(null); + + assertTrue("Value already used: " + value, !s.contains(value)); + s.add(value); + } + assertTrue(!s.isEmpty()); + } +} diff --git a/src/test/java/net/kotek/jdbm/SerializerBaseTest.java b/src/test/java/net/kotek/jdbm/SerializerBaseTest.java new file mode 100644 index 000000000..d14efddeb --- /dev/null +++ b/src/test/java/net/kotek/jdbm/SerializerBaseTest.java @@ -0,0 +1,456 @@ +/******************************************************************************* + * Copyright 2010 Cees De Groot, Alex Boisvert, Jan Kotek + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + ******************************************************************************/ +package net.kotek.jdbm; + +import junit.framework.TestCase; + +import java.io.*; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.nio.ByteBuffer; +import java.util.AbstractMap.SimpleEntry; +import java.util.*; + +@SuppressWarnings("unchecked") +public class SerializerBaseTest extends TestCase { + + SerializerBase ser = new SerializerBase(); + + private byte[] serialize(Object i) throws IOException { + DataOutput2 in = new DataOutput2(); + ser.serialize(in, i); + return in.copyBytes(); + } + + private Object deserialize(byte[] buf) throws IOException { + return ser.deserialize(new DataInput2(ByteBuffer.wrap(buf),0),-1); + } + + + + public void testInt() throws IOException{ + int[] vals = { + Integer.MIN_VALUE, + -Short.MIN_VALUE * 2, + -Short.MIN_VALUE + 1, + -Short.MIN_VALUE, + -10, -9, -8, -7, -6, -5, -4, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 127, 254, 255, 256, Short.MAX_VALUE, Short.MAX_VALUE + 1, + Short.MAX_VALUE * 2, Integer.MAX_VALUE + }; + for (int i : vals) { + byte[] buf = serialize(i); + Object l2 = deserialize(buf); + assertTrue(l2.getClass() == Integer.class); + assertEquals(l2, i); + } + } + + + + public void testShort() throws IOException{ + short[] vals = { + (short) (-Short.MIN_VALUE + 1), + (short) -Short.MIN_VALUE, + -10, -9, -8, -7, -6, -5, -4, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 127, 254, 255, 256, Short.MAX_VALUE, Short.MAX_VALUE - 1, + Short.MAX_VALUE + }; + for (short i : vals) { + byte[] buf = serialize(i); + Object l2 = deserialize(buf); + assertTrue(l2.getClass() == Short.class); + assertEquals(l2, i); + } + } + + public void testDouble() throws IOException{ + double[] vals = { + 1f, 0f, -1f, Math.PI, 255, 256, Short.MAX_VALUE, Short.MAX_VALUE + 1, -100 + }; + for (double i : vals) { + byte[] buf = serialize(i); + Object l2 = deserialize(buf); + assertTrue(l2.getClass() == Double.class); + assertEquals(l2, i); + } + } + + + public void testFloat() throws IOException{ + float[] vals = { + 1f, 0f, -1f, (float) Math.PI, 255, 256, Short.MAX_VALUE, Short.MAX_VALUE + 1, -100 + }; + for (float i : vals) { + byte[] buf = serialize(i); + Object l2 = deserialize(buf); + assertTrue(l2.getClass() == Float.class); + assertEquals(l2, i); + } + } + + public void testChar() throws IOException{ + char[] vals = { + 'a', ' ' + }; + for (char i : vals) { + byte[] buf = serialize(i); + Object l2 = deserialize(buf); + assertEquals(l2.getClass(), Character.class); + assertEquals(l2, i); + } + } + + + public void testLong() throws IOException{ + long[] vals = { + Long.MIN_VALUE, + Integer.MIN_VALUE, Integer.MIN_VALUE - 1, Integer.MIN_VALUE + 1, + -Short.MIN_VALUE * 2, + -Short.MIN_VALUE + 1, + -Short.MIN_VALUE, + -10, -9, -8, -7, -6, -5, -4, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + 127, 254, 255, 256, Short.MAX_VALUE, Short.MAX_VALUE + 1, + Short.MAX_VALUE * 2, Integer.MAX_VALUE, Integer.MAX_VALUE + 1, Long.MAX_VALUE + }; + for (long i : vals) { + byte[] buf = serialize(i); + Object l2 = deserialize(buf); + assertTrue(l2.getClass() == Long.class); + assertEquals(l2, i); + } + } + + public void testBoolean1() throws IOException{ + byte[] buf = serialize(true); + Object l2 = deserialize(buf); + assertTrue(l2.getClass() == Boolean.class); + assertEquals(l2, true); + + byte[] buf2 = serialize(false); + Object l22 = deserialize(buf2); + assertTrue(l22.getClass() == Boolean.class); + assertEquals(l22, false); + + } + + public void testString() throws IOException{ + byte[] buf = serialize("Abcd"); + String l2 = (String) deserialize(buf); + assertEquals(l2, "Abcd"); + } + + public void testBigString() throws IOException{ + String bigString = ""; + for (int i = 0; i < 1e4; i++) + bigString += i % 10; + byte[] buf = serialize(bigString); + String l2 = (String) deserialize(buf); + assertEquals(l2, bigString); + } + + +// public void testObject() throws ClassNotFoundException, IOException { +// SimpleEntry a = new SimpleEntry(1, "11"); +// byte[] buf = serialize(a); +// SimpleEntry l2 = (SimpleEntry) deserialize(buf); +// assertEquals(l2, a); +// } + + public void testNoArgumentConstructorInJavaSerialization() throws ClassNotFoundException, IOException { + SimpleEntry a = new SimpleEntry(1, "11"); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + new ObjectOutputStream(out).writeObject(a); + ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(out.toByteArray())); + SimpleEntry a2 = (SimpleEntry) in.readObject(); + assertEquals(a, a2); + } + + + public void testArrayList() throws ClassNotFoundException, IOException { + Collection c = new ArrayList(); + for (int i = 0; i < 200; i++) + c.add(i); + assertEquals(c, deserialize(serialize(c))); + for (int i = 0; i < 2000; i++) + c.add(i); + assertEquals(c, deserialize(serialize(c))); + } + + public void testLinkedList() throws ClassNotFoundException, IOException { + Collection c = new java.util.LinkedList(); + for (int i = 0; i < 200; i++) + c.add(i); + assertEquals(c, deserialize(serialize(c))); + for (int i = 0; i < 2000; i++) + c.add(i); + assertEquals(c, deserialize(serialize(c))); + } + + public void testVector() throws ClassNotFoundException, IOException { + Collection c = new Vector(); + for (int i = 0; i < 200; i++) + c.add(i); + assertEquals(c, deserialize(serialize(c))); + for (int i = 0; i < 2000; i++) + c.add(i); + assertEquals(c, deserialize(serialize(c))); + } + + + public void testTreeSet() throws ClassNotFoundException, IOException { + Collection c = new TreeSet(); + for (int i = 0; i < 200; i++) + c.add(i); + assertEquals(c, deserialize(serialize(c))); + for (int i = 0; i < 2000; i++) + c.add(i); + assertEquals(c, deserialize(serialize(c))); + } + + public void testHashSet() throws ClassNotFoundException, IOException { + Collection c = new HashSet(); + for (int i = 0; i < 200; i++) + c.add(i); + assertEquals(c, deserialize(serialize(c))); + for (int i = 0; i < 2000; i++) + c.add(i); + assertEquals(c, deserialize(serialize(c))); + } + + public void testLinkedHashSet() throws ClassNotFoundException, IOException { + Collection c = new LinkedHashSet(); + for (int i = 0; i < 200; i++) + c.add(i); + assertEquals(c, deserialize(serialize(c))); + for (int i = 0; i < 2000; i++) + c.add(i); + assertEquals(c, deserialize(serialize(c))); + } + + public void testHashMap() throws ClassNotFoundException, IOException { + Map c = new HashMap(); + for (int i = 0; i < 200; i++) + c.put(i, i + 10000); + assertEquals(c, deserialize(serialize(c))); + for (int i = 0; i < 2000; i++) + c.put(i, i + 10000); + assertEquals(c, deserialize(serialize(c))); + } + + public void testTreeMap() throws ClassNotFoundException, IOException { + Map c = new TreeMap(); + for (int i = 0; i < 200; i++) + c.put(i, i + 10000); + assertEquals(c, deserialize(serialize(c))); + for (int i = 0; i < 2000; i++) + c.put(i, i + 10000); + assertEquals(c, deserialize(serialize(c))); + } + + public void testLinkedHashMap() throws ClassNotFoundException, IOException { + Map c = new LinkedHashMap(); + for (int i = 0; i < 200; i++) + c.put(i, i + 10000); + assertEquals(c, deserialize(serialize(c))); + for (int i = 0; i < 2000; i++) + c.put(i, i + 10000); + assertEquals(c, deserialize(serialize(c))); + } + + public void testHashtable() throws ClassNotFoundException, IOException { + Map c = new Hashtable(); + for (int i = 0; i < 200; i++) + c.put(i, i + 10000); + assertEquals(c, deserialize(serialize(c))); + for (int i = 0; i < 2000; i++) + c.put(i, i + 10000); + assertEquals(c, deserialize(serialize(c))); + } + + public void testProperties() throws ClassNotFoundException, IOException { + Properties c = new Properties(); + for (int i = 0; i < 200; i++) + c.put(i, i + 10000); + assertEquals(c, deserialize(serialize(c))); + for (int i = 0; i < 2000; i++) + c.put(i, i + 10000); + assertEquals(c, deserialize(serialize(c))); + } + + + public void testClass() throws IOException{ + byte[] buf = serialize(String.class); + Class l2 = (Class) deserialize(buf); + assertEquals(l2, String.class); + } + + public void testClass2() throws IOException{ + byte[] buf = serialize(long[].class); + Class l2 = (Class) deserialize(buf); + assertEquals(l2, long[].class); + } + + + public void testUnicodeString() throws ClassNotFoundException, IOException { + String s = "Ciudad Bolíva"; + byte[] buf = serialize(s); + assertTrue("text is not unicode", buf.length != s.length()); + Object l2 = deserialize(buf); + assertEquals(l2, s); + } + + public void testPackedLongCollection() throws ClassNotFoundException, IOException { + ArrayList l1 = new ArrayList(); + l1.add(0L); + l1.add(1L); + l1.add(0L); + assertEquals(l1, deserialize(serialize(l1))); + l1.add(-1L); + assertEquals(l1, deserialize(serialize(l1))); + } + + public void testNegativeLongsArray() throws ClassNotFoundException, IOException { + long[] l = new long[] { -12 }; + Object deserialize = deserialize(serialize(l)); + assertTrue(Arrays.equals(l, (long[]) deserialize)); + } + + + public void testNegativeIntArray() throws ClassNotFoundException, IOException { + int[] l = new int[] { -12 }; + Object deserialize = deserialize(serialize(l)); + assertTrue(Arrays.equals(l, (int[]) deserialize)); + } + + + public void testNegativeShortArray() throws ClassNotFoundException, IOException { + short[] l = new short[] { -12 }; + Object deserialize = deserialize(serialize(l)); + assertTrue(Arrays.equals(l, (short[]) deserialize)); + } + + public void testBooleanArray() throws ClassNotFoundException, IOException { + boolean[] l = new boolean[] { true,false }; + Object deserialize = deserialize(serialize(l)); + assertTrue(Arrays.equals(l, (boolean[]) deserialize)); + } + + public void testDoubleArray() throws ClassNotFoundException, IOException { + double[] l = new double[] { Math.PI, 1D }; + Object deserialize = deserialize(serialize(l)); + assertTrue(Arrays.equals(l, (double[]) deserialize)); + } + + public void testFloatArray() throws ClassNotFoundException, IOException { + float[] l = new float[] { 1F, 1.234235F }; + Object deserialize = deserialize(serialize(l)); + assertTrue(Arrays.equals(l, (float[]) deserialize)); + } + + public void testByteArray() throws ClassNotFoundException, IOException { + byte[] l = new byte[] { 1,34,-5 }; + Object deserialize = deserialize(serialize(l)); + assertTrue(Arrays.equals(l, (byte[]) deserialize)); + } + + public void testCharArray() throws ClassNotFoundException, IOException { + char[] l = new char[] { '1','a','&' }; + Object deserialize = deserialize(serialize(l)); + assertTrue(Arrays.equals(l, (char[]) deserialize)); + } + + + public void testDate() throws IOException{ + Date d = new Date(6546565565656L); + assertEquals(d, deserialize(serialize(d))); + d = new Date(System.currentTimeMillis()); + assertEquals(d, deserialize(serialize(d))); + } + + public void testBigDecimal() throws IOException{ + BigDecimal d = new BigDecimal("445656.7889889895165654423236"); + assertEquals(d, deserialize(serialize(d))); + d = new BigDecimal("-53534534534534445656.7889889895165654423236"); + assertEquals(d, deserialize(serialize(d))); + } + + public void testBigInteger() throws IOException{ + BigInteger d = new BigInteger("4456567889889895165654423236"); + assertEquals(d, deserialize(serialize(d))); + d = new BigInteger("-535345345345344456567889889895165654423236"); + assertEquals(d, deserialize(serialize(d))); + } + + + public void testLocale() throws Exception{ + assertEquals(Locale.FRANCE, deserialize(serialize(Locale.FRANCE))); + assertEquals(Locale.CANADA_FRENCH, deserialize(serialize(Locale.CANADA_FRENCH))); + assertEquals(Locale.SIMPLIFIED_CHINESE, deserialize(serialize(Locale.SIMPLIFIED_CHINESE))); + + } + +// enum Order +// { +// ASCENDING, +// DESCENDING +// } +// public void testEnum() throws Exception{ +// Order o = Order.ASCENDING; +// o = (Order) deserialize(serialize(o)); +// assertEquals(o,Order.ASCENDING ); +// assertEquals(o.ordinal(),Order.ASCENDING .ordinal()); +// assertEquals(o.name(),Order.ASCENDING .name()); +// +// o = Order.DESCENDING; +// o = (Order) deserialize(serialize(o)); +// assertEquals(o,Order.DESCENDING ); +// assertEquals(o.ordinal(),Order.DESCENDING .ordinal()); +// assertEquals(o.name(),Order.DESCENDING .name()); +// +// } + + +// static class Extr implements Externalizable{ +// +// int aaa = 11; +// String l = "agfa"; +// +// public void writeExternal(ObjectOutput out) throws IOException { +// out.writeObject(l); +// out.writeInt(aaa); +// +// } +// +// public void readExternal(ObjectInput in) throws IOException{ +// l = (String) in.readObject(); +// aaa = in.readInt()+1; +// +// } +// } +// +// public void testExternalizable() throws Exception{ +// Extr e = new Extr(); +// e.aaa = 15; +// e.l = "pakla"; +// +// e = (Extr) deserialize(serialize(e)); +// assertEquals(e.aaa,16); //was incremented during serialization +// assertEquals(e.l,"pakla"); +// +// } + +} diff --git a/store-format.md b/store-format.md new file mode 100644 index 000000000..1c1f00ec3 --- /dev/null +++ b/store-format.md @@ -0,0 +1,100 @@ +Store Format + ========= + +JDBM4 storage has two different areas: the index and data files. The index file is a list of 8-byte-long addresses pointing into the data file. +The data file contains raw record data. + +Store internal information, such as the list of free records, is maintained using 'long stacks'. +It is a reverse linked list of 8-byte-longs storing addresses. It is stored in a data file, as any other records. + +JDBM4 store is very minimalistic with little redundancy. Its design is focused on maximum speed and minimum overhead. +A lot of operations (transaction, locking) are done in memory. + + +Index File +----------- +The index file is a list of 8-byte-long addresses pointing to the data file. Each address is identified by its position in the index file. +When `RecordManager` returns `recid` it actually returns a location in the index file. + +Each address is 8-byte-long, so the index file offset is calculated by multiplying recid with eight: + + index-file-offset = recid * 8 + +Some starting index positions are reserved for internal use. Those still contain 8-byte-long addresses, but point to +internal data, such as the list of free records. `Recid` returned by `RecordManager` will be always greater than 2555. + + 0 - file header and store format version 'JDBMXXXX'.. + 1 - current size of data file (pointer to end of data file) + 2 - data address of 'long stack' containing free positions in the index file + 3 to 18 - reserved addresses for JDBM internal objects, such as Serializer or Name Table. + 19 - unreserved address (feel free to use it for your own stuff) + 20 to 2555 - data addresses of free data records (see next chapters) + 2556 to infinity - contains recids of user records. + +8-byte-long addresses stored in the index file have two parts: The first 2 bytes is the data record size and the last 6 bytes are the offset in the data file. +JDBM store currently supports a record of maximum size 64KB (2^16), some workaround for bigger records will come in the future. +And the entire store has maximum size 256 TeraBytes (2^48). + +Some addresses in the index file may have 0 value. This indicates that the record with the given recid has been deleted, or not yet claimed. + +Data File +---------- +The data file has no structure, it just contains records followed by other records. There are no metadata, checksums or separators. +All structural information is stored in the index file. + + The first 8 bytes of the data file are reserved for the file header and store format version in format 'JDBMXXXX'. + This is to prevent zero from being a valid address. + +Long Stack +------------- +Long Stack is reverse linked list of 8-byte-longs implemented on top of the record store. JDBM uses it to store its internal information such as a list of free records. It supports only two operations (`pop` and `take`), which add and remove a number from the head of the list. +Numbers are inserted and removed in LIFO (Last In, First Out) fashion. + +It is very low-level and tightly integrated into RecordManager, to minimise the number of IO operations. Each operation typically requires only +9 bytes to be read and written. It also takes nearly constant time and is not affected by store fragmentation + +Each Long Stack has: + +** Long Stack Recid ** in Index File which contains address to Head Record. Each Long Stack is identified by this recid. + +** Head Record ** containing most recently inserted numbers. It is located in the Data File. It is referenced from the Index File. + +** Previous Records ** contains previously inserted numbers. It is located in the Data File and is basically a Linked List of records starting at Head Record. + +Numbers are grouped into records, each containing 100 numbers. Records are chained, as the reverse-linked list starting at Head Record. + +When a number is inserted into the record and it overflows 100, a new Head Record is created. This new Head Record contains the address to the Previous Record. + Also the address in the Index File is updated to point to the new Head Record. + + Each Long Stack Record has the following structure: + + byte 0 - how many numbers are inserted in this record + byte 1 - unused + byte 2 to 7 - offset of Previous Record in the Data File + byte 7 to 15 - first number on this record + byte 16 to 23 - second number on this record.... + + +Free positions in Index File +-------------------------------- +When a record is deleted, the `RecordManager` sets the given position in the Data File to zero. Recently released recid are stored in Long Stack with recid 2 for reuse. + +List of free data records +----------------------------- +The List of free data records is stored in multiple Long Stacks determined by the record size. When a data record with size N is released, it is added to Long Stack N. +When a data record is allocated, it has to look at Long Stack N, if it contains any free record to use. +It does not have to make a linear scan across the list of all free data records. + +TODO describe this more. + +Defrag +-------- +Defragmentation recreates store in new file. It traverse Index File, reads addresses and insert data records into new RecordManager. +New file is then moved over the old file. + + + + + + +