diff --git a/create-export-package-metadata-pom.xml b/create-export-package-metadata-pom.xml index 2cc517951..667a15e06 100644 --- a/create-export-package-metadata-pom.xml +++ b/create-export-package-metadata-pom.xml @@ -38,6 +38,14 @@ + + org.apache.maven.plugins + maven-compiler-plugin + + 8 + 8 + + diff --git a/src/com/sun/jna/Memory.java b/src/com/sun/jna/Memory.java index cea967e61..b16520a86 100644 --- a/src/com/sun/jna/Memory.java +++ b/src/com/sun/jna/Memory.java @@ -22,16 +22,15 @@ */ package com.sun.jna; +import com.sun.jna.internal.Cleaner; +import com.sun.jna.internal.ConcurrentLongHashMap; + import java.io.Closeable; import java.lang.ref.Reference; import java.lang.ref.WeakReference; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import com.sun.jna.internal.Cleaner; /** * A Pointer to memory obtained from the native heap via a @@ -53,8 +52,8 @@ */ public class Memory extends Pointer implements Closeable { /** Keep track of all allocated memory so we can dispose of it before unloading. */ - private static final Map> allocatedMemory = - new ConcurrentHashMap<>(); + private static final ConcurrentLongHashMap> allocatedMemory = + new ConcurrentLongHashMap<>(); private static final WeakMemoryHolder buffers = new WeakMemoryHolder(); diff --git a/src/com/sun/jna/internal/ConcurrentLongHashMap.java b/src/com/sun/jna/internal/ConcurrentLongHashMap.java new file mode 100644 index 000000000..f378904b0 --- /dev/null +++ b/src/com/sun/jna/internal/ConcurrentLongHashMap.java @@ -0,0 +1,505 @@ +/* + * The contents of this file is dual-licensed under 2 + * alternative Open Source/Free licenses: LGPL 2.1 or later and + * Apache License 2.0. (starting with JNA version 4.0.0). + * + * You can freely decide which license you want to apply to + * the project. + * + * You may obtain a copy of the LGPL License at: + * + * http://www.gnu.org/licenses/licenses.html + * + * A copy is also included in the downloadable source code package + * containing JNA, in file "LGPL2.1". + * + * You may obtain a copy of the Apache License at: + * + * http://www.apache.org/licenses/ + * + * A copy is also included in the downloadable source code package + * containing JNA, in file "AL2.0". + */ +package com.sun.jna.internal; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.locks.StampedLock; +import java.util.function.LongFunction; + +/** + * Map from long to an Object. + *

+ * Provides similar methods as a {@literal ConcurrentMap} with 2 differences: + *

    + *
  1. No boxing/unboxing from {@literal long -> Long} + *
  2. Open hash map with linear probing, no node allocations to store the values + *
+ */ +@SuppressWarnings("unchecked") +public class ConcurrentLongHashMap { + + private static final Object EmptyValue = null; + private static final Object DeletedValue = new Object(); + + private static final float MapFillFactor = 0.66f; + + private static final int DefaultExpectedItems = 256; + private static final int DefaultConcurrencyLevel = 16; + + private final Section[] sections; + + public ConcurrentLongHashMap() { + this(DefaultExpectedItems); + } + + public ConcurrentLongHashMap(int expectedItems) { + this(expectedItems, DefaultConcurrencyLevel); + } + + public ConcurrentLongHashMap(int expectedItems, int numSections) { + if (numSections <= 0) { + throw new IllegalArgumentException("numSections must be > 0"); + } + + if (expectedItems < numSections) { + expectedItems = numSections; + } + + int perSectionExpectedItems = expectedItems / numSections; + int perSectionCapacity = (int) (perSectionExpectedItems / MapFillFactor); + this.sections = (Section[]) new Section[numSections]; + + for (int i = 0; i < numSections; i++) { + sections[i] = new Section<>(perSectionCapacity); + } + } + + public int size() { + int size = 0; + for (Section s : sections) { + //read-acquire s.size that was write-released by s.unlockWrite + s.tryOptimisticRead(); + //a stale value won't hurt: anyway it's subject to concurrent modifications + size += s.size; + } + return size; + } + + long getUsedBucketCount() { + long usedBucketCount = 0; + for (Section s : sections) { + usedBucketCount += s.usedBuckets; + } + return usedBucketCount; + } + + public long capacity() { + long capacity = 0; + for (Section s : sections) { + capacity += s.capacity; + } + return capacity; + } + + public boolean isEmpty() { + for (Section s : sections) { + //read-acquire s.size that was write-released by s.unlockWrite + s.tryOptimisticRead(); + //a stale value won't hurt: anyway it's subject to concurrent modifications + if (s.size != 0) { + return false; + } + } + + return true; + } + + public V get(long key) { + long h = hash(key); + return getSection(h).get(key, (int) h); + } + + public boolean containsKey(long key) { + return get(key) != null; + } + + public V put(long key, V value) { + Objects.requireNonNull(value); + long h = hash(key); + return getSection(h).put(key, value, (int) h, false, null); + } + + public V putIfAbsent(long key, V value) { + Objects.requireNonNull(value); + long h = hash(key); + return getSection(h).put(key, value, (int) h, true, null); + } + + public V computeIfAbsent(long key, LongFunction provider) { + Objects.requireNonNull(provider); + long h = hash(key); + return getSection(h).put(key, null, (int) h, true, provider); + } + + public V remove(long key) { + long h = hash(key); + return getSection(h).remove(key, null, (int) h); + } + + public boolean remove(long key, Object value) { + Objects.requireNonNull(value); + long h = hash(key); + return getSection(h).remove(key, value, (int) h) != null; + } + + private Section getSection(long hash) { + // Use 32 msb out of long to get the section + final int sectionIdx = (int) (hash >>> 32) & (sections.length - 1); + return sections[sectionIdx]; + } + + public void clear() { + for (Section s : sections) { + s.clear(); + } + } + + public void forEach(EntryProcessor processor) { + for (Section s : sections) { + s.forEach(processor); + } + } + + /** + * @return a new list of all keys (makes a copy) + */ + public List keys() { + List keys = new ArrayList<>(size()); + forEach((key, value) -> keys.add(key)); + return keys; + } + + public List values() { + List values = new ArrayList<>(size()); + forEach((key, value) -> values.add(value)); + return values; + } + + public interface EntryProcessor { + void accept(long key, V value); + } + + // A section is a portion of the hash map that is covered by a single + private static final class Section extends StampedLock { + + private static final AtomicIntegerFieldUpdater
CAPACITY_UPDATER = AtomicIntegerFieldUpdater.newUpdater(Section.class, "capacity"); + private long[] keys; + private V[] values; + + private volatile int capacity; + private int size; + private int usedBuckets; + private int resizeThreshold; + + Section(int capacity) { + this.capacity = alignToPowerOfTwo(capacity); + this.keys = new long[this.capacity]; + this.values = (V[]) new Object[this.capacity]; + this.size = 0; + this.usedBuckets = 0; + this.resizeThreshold = (int) (this.capacity * MapFillFactor); + } + + @SuppressWarnings("NonAtomicVolatileUpdate") + V get(long key, int keyHash) { + int bucket = keyHash; + + long stamp = tryOptimisticRead(); + boolean acquiredLock = false; + + try { + while (true) { + int capacity = this.capacity; + bucket = signSafeMod(bucket, capacity); + + // First try optimistic locking + long storedKey = keys[bucket]; + V storedValue = values[bucket]; + + if (!acquiredLock && validate(stamp)) { + // The values we have read are consistent + if (storedKey == key) { + return storedValue != DeletedValue ? storedValue : null; + } else if (storedValue == EmptyValue) { + // Not found + return null; + } + } else { + // Fallback to acquiring read lock + if (!acquiredLock) { + stamp = readLock(); + acquiredLock = true; + storedKey = keys[bucket]; + storedValue = values[bucket]; + } + + if (capacity != this.capacity) { + // There has been a rehashing. We need to restart the search + bucket = keyHash; + continue; + } + + if (storedKey == key) { + return storedValue != DeletedValue ? storedValue : null; + } else if (storedValue == EmptyValue) { + // Not found + return null; + } + } + + ++bucket; + } + } finally { + if (acquiredLock) { + unlockRead(stamp); + } + } + } + + @SuppressWarnings("NonAtomicVolatileUpdate") + V put(long key, V value, int keyHash, boolean onlyIfAbsent, LongFunction valueProvider) { + int bucket = keyHash; + + long stamp = writeLock(); + int capacity = this.capacity; + + // Remember where we find the first available spot + int firstDeletedKey = -1; + + try { + while (true) { + bucket = signSafeMod(bucket, capacity); + + long storedKey = keys[bucket]; + V storedValue = values[bucket]; + + if (storedKey == key) { + if (storedValue == EmptyValue) { + values[bucket] = value != null ? value : (valueProvider != null ? valueProvider.apply(key) : null); + ++size; + ++usedBuckets; + return valueProvider != null ? values[bucket] : null; + } else if (storedValue == DeletedValue) { + values[bucket] = value != null ? value : (valueProvider != null ? valueProvider.apply(key) : null); + ++size; + return valueProvider != null ? values[bucket] : null; + } else if (!onlyIfAbsent) { + // Over written an old value for same key + values[bucket] = value; + return storedValue; + } else { + return storedValue; + } + } else if (storedValue == EmptyValue) { + // Found an empty bucket. This means the key is not in the map. If we've already seen a deleted + // key, we should write at that position + if (firstDeletedKey != -1) { + bucket = firstDeletedKey; + } else { + ++usedBuckets; + } + + keys[bucket] = key; + values[bucket] = value != null ? value : (valueProvider != null ? valueProvider.apply(key) : null); + ++size; + return valueProvider != null ? values[bucket] : null; + } else if (storedValue == DeletedValue) { + // The bucket contained a different deleted key + if (firstDeletedKey == -1) { + firstDeletedKey = bucket; + } + } + + ++bucket; + } + } finally { + if (usedBuckets > resizeThreshold) { + try { + rehash(); + } finally { + unlockWrite(stamp); + } + } else { + unlockWrite(stamp); + } + } + } + + @SuppressWarnings("NonAtomicVolatileUpdate") + private V remove(long key, Object value, int keyHash) { + int bucket = keyHash; + long stamp = writeLock(); + + try { + while (true) { + int capacity = this.capacity; + bucket = signSafeMod(bucket, capacity); + + long storedKey = keys[bucket]; + V storedValue = values[bucket]; + if (storedKey == key) { + if (value == null || value.equals(storedValue)) { + if (storedValue == EmptyValue || storedValue == DeletedValue) { + return null; + } + + --size; + V nextValueInArray = values[signSafeMod(bucket + 1, capacity)]; + if (nextValueInArray == EmptyValue) { + values[bucket] = (V) EmptyValue; + --usedBuckets; + } else { + values[bucket] = (V) DeletedValue; + } + + return storedValue; + } else { + return null; + } + } else if (storedValue == EmptyValue) { + // Key wasn't found + return null; + } + + ++bucket; + } + + } finally { + unlockWrite(stamp); + } + } + + void clear() { + long stamp = writeLock(); + + try { + Arrays.fill(keys, 0); + Arrays.fill(values, EmptyValue); + this.size = 0; + this.usedBuckets = 0; + } finally { + unlockWrite(stamp); + } + } + + public void forEach(EntryProcessor processor) { + long stamp = tryOptimisticRead(); + + int capacity = this.capacity; + long[] keys = this.keys; + V[] values = this.values; + + boolean acquiredReadLock = false; + + try { + + // Validate no rehashing + if (!validate(stamp)) { + // Fallback to read lock + stamp = readLock(); + acquiredReadLock = true; + + capacity = this.capacity; + keys = this.keys; + values = this.values; + } + + // Go through all the buckets for this section + for (int bucket = 0; bucket < capacity; bucket++) { + long storedKey = keys[bucket]; + V storedValue = values[bucket]; + + if (!acquiredReadLock && !validate(stamp)) { + // Fallback to acquiring read lock + stamp = readLock(); + acquiredReadLock = true; + + storedKey = keys[bucket]; + storedValue = values[bucket]; + } + + if (storedValue != DeletedValue && storedValue != EmptyValue) { + processor.accept(storedKey, storedValue); + } + } + } finally { + if (acquiredReadLock) { + unlockRead(stamp); + } + } + } + + private void rehash() { + // Expand the hashmap + int newCapacity = capacity * 2; + long[] newKeys = new long[newCapacity]; + V[] newValues = (V[]) new Object[newCapacity]; + + // Re-hash table + for (int i = 0; i < keys.length; i++) { + long storedKey = keys[i]; + V storedValue = values[i]; + if (storedValue != EmptyValue && storedValue != DeletedValue) { + insertKeyValueNoLock(newKeys, newValues, storedKey, storedValue); + } + } + + keys = newKeys; + values = newValues; + usedBuckets = size; + CAPACITY_UPDATER.lazySet(this, newCapacity); + resizeThreshold = (int) (newCapacity * MapFillFactor); + } + + private static void insertKeyValueNoLock(long[] keys, V[] values, long key, V value) { + int bucket = (int) hash(key); + + while (true) { + bucket = signSafeMod(bucket, keys.length); + + V storedValue = values[bucket]; + + if (storedValue == EmptyValue) { + // The bucket is empty, so we can use it + keys[bucket] = key; + values[bucket] = value; + return; + } + + ++bucket; + } + } + } + + private static final long HashMixer = 0xc6a4a7935bd1e995L; + private static final int R = 47; + + static long hash(long key) { + long hash = key * HashMixer; + hash ^= hash >>> R; + hash *= HashMixer; + return hash; + } + + static int signSafeMod(long n, int Max) { + return (int) n & (Max - 1); + } + + static int alignToPowerOfTwo(int n) { + return (int) Math.pow(2, 32 - Integer.numberOfLeadingZeros(n - 1)); + } +} \ No newline at end of file