Permalink
Browse files

#6 added concurrent linked hash map library

  • Loading branch information...
1 parent 07bb8e9 commit 738e3a63fbcc6e35f5a9d010d44d130d938f250e @veebs veebs committed May 29, 2012
View
15 licenses/LICENSE.concurrentlinkedhashmap.txt
@@ -0,0 +1,15 @@
+/*
+ * Copyright 2010 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
View
11 licenses/README.md
@@ -41,4 +41,15 @@ This product uses the Netty project:
* [http://netty.io/]
+Concurrent Linked Hash Map
+--------------------------
+This product uses the Concurrent Linked Hash Map project:
+
+ * LICENSE FILE:
+ * LICENSE.concurrentlinkedhashmap.txt (Apache 2)
+ * HOMEPAGE:
+ * [http://code.google.com/p/concurrentlinkedhashmap/]
+
+
+
View
1,695 ...r/src/main/java/org/mashupbots/socko/concurrentlinkedhashmap/ConcurrentLinkedHashMap.java
@@ -0,0 +1,1695 @@
+/*
+ * Copyright 2010 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mashupbots.socko.concurrentlinkedhashmap;
+
+import static org.mashupbots.socko.concurrentlinkedhashmap.ConcurrentLinkedHashMap.DrainStatus.IDLE;
+import static org.mashupbots.socko.concurrentlinkedhashmap.ConcurrentLinkedHashMap.DrainStatus.PROCESSING;
+import static org.mashupbots.socko.concurrentlinkedhashmap.ConcurrentLinkedHashMap.DrainStatus.REQUIRED;
+import static java.util.Collections.emptyList;
+import static java.util.Collections.unmodifiableMap;
+import static java.util.Collections.unmodifiableSet;
+
+import java.io.InvalidObjectException;
+import java.io.ObjectInputStream;
+import java.io.Serializable;
+import java.util.AbstractCollection;
+import java.util.AbstractMap;
+import java.util.AbstractQueue;
+import java.util.AbstractSet;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicIntegerArray;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+/**
+ * A hash table supporting full concurrency of retrievals, adjustable expected
+ * concurrency for updates, and a maximum capacity to bound the map by. This
+ * implementation differs from {@link ConcurrentHashMap} in that it maintains a
+ * page replacement algorithm that is used to evict an entry when the map has
+ * exceeded its capacity. Unlike the <tt>Java Collections Framework</tt>, this
+ * map does not have a publicly visible constructor and instances are created
+ * through a {@link Builder}.
+ * <p>
+ * An entry is evicted from the map when the <tt>weighted capacity</tt> exceeds
+ * its <tt>maximum weighted capacity</tt> threshold. A {@link EntryWeigher}
+ * determines how many units of capacity that an entry consumes. The default
+ * weigher assigns each value a weight of <tt>1</tt> to bound the map by the
+ * total number of key-value pairs. A map that holds collections may choose to
+ * weigh values by the number of elements in the collection and bound the map
+ * by the total number of elements that it contains. A change to a value that
+ * modifies its weight requires that an update operation is performed on the
+ * map.
+ * <p>
+ * An {@link EvictionListener} may be supplied for notification when an entry
+ * is evicted from the map. This listener is invoked on a caller's thread and
+ * will not block other threads from operating on the map. An implementation
+ * should be aware that the caller's thread will not expect long execution
+ * times or failures as a side effect of the listener being notified. Execution
+ * safety and a fast turn around time can be achieved by performing the
+ * operation asynchronously, such as by submitting a task to an
+ * {@link java.util.concurrent.ExecutorService}.
+ * <p>
+ * The <tt>concurrency level</tt> determines the number of threads that can
+ * concurrently modify the table. Using a significantly higher or lower value
+ * than needed can waste space or lead to thread contention, but an estimate
+ * within an order of magnitude of the ideal value does not usually have a
+ * noticeable impact. Because placement in hash tables is essentially random,
+ * the actual concurrency will vary.
+ * <p>
+ * This class and its views and iterators implement all of the
+ * <em>optional</em> methods of the {@link Map} and {@link Iterator}
+ * interfaces.
+ * <p>
+ * Like {@link java.util.Hashtable} but unlike {@link HashMap}, this class
+ * does <em>not</em> allow <tt>null</tt> to be used as a key or value. Unlike
+ * {@link java.util.LinkedHashMap}, this class does <em>not</em> provide
+ * predictable iteration order. A snapshot of the keys and entries may be
+ * obtained in ascending and descending order of retention.
+ *
+ * @author ben.manes@gmail.com (Ben Manes)
+ * @param <K> the type of keys maintained by this map
+ * @param <V> the type of mapped values
+ * @see <a href="http://code.google.com/p/concurrentlinkedhashmap/">
+ * http://code.google.com/p/concurrentlinkedhashmap/</a>
+ */
+@ThreadSafe
+public final class ConcurrentLinkedHashMap<K, V> extends AbstractMap<K, V>
+ implements ConcurrentMap<K, V>, Serializable {
+
+ /*
+ * This class performs a best-effort bounding of a ConcurrentHashMap using a
+ * page-replacement algorithm to determine which entries to evict when the
+ * capacity is exceeded.
+ *
+ * The page replacement algorithm's data structures are kept eventually
+ * consistent with the map. An update to the map and recording of reads may
+ * not be immediately reflected on the algorithm's data structures. These
+ * structures are guarded by a lock and operations are applied in batches to
+ * avoid lock contention. The penalty of applying the batches is spread across
+ * threads so that the amortized cost is slightly higher than performing just
+ * the ConcurrentHashMap operation.
+ *
+ * A memento of the reads and writes that were performed on the map are
+ * recorded in a buffer. These buffers are drained at the first opportunity
+ * after a write or when a buffer exceeds a threshold size. A mostly strict
+ * ordering is achieved by observing that each buffer is in a weakly sorted
+ * order relative to the last drain. This allows the buffers to be merged in
+ * O(n) time so that the operations are run in the expected order.
+ *
+ * Due to a lack of a strict ordering guarantee, a task can be executed
+ * out-of-order, such as a removal followed by its addition. The state of the
+ * entry is encoded within the value's weight.
+ *
+ * Alive: The entry is in both the hash-table and the page replacement policy.
+ * This is represented by a positive weight.
+ *
+ * Retired: The entry is not in the hash-table and is pending removal from the
+ * page replacement policy. This is represented by a negative weight.
+ *
+ * Dead: The entry is not in the hash-table and is not in the page replacement
+ * policy. This is represented by a weight of zero.
+ *
+ * The Least Recently Used page replacement algorithm was chosen due to its
+ * simplicity, high hit rate, and ability to be implemented with O(1) time
+ * complexity.
+ */
+
+ /** The maximum weighted capacity of the map. */
+ static final long MAXIMUM_CAPACITY = Long.MAX_VALUE - Integer.MAX_VALUE;
+
+ /** The maximum number of pending operations per buffer. */
+ static final int MAXIMUM_BUFFER_SIZE = 1 << 20;
+
+ /** The number of pending operations per buffer before attempting to drain. */
+ static final int BUFFER_THRESHOLD = 16;
+
+ /** The number of buffers to use. */
+ static final int NUMBER_OF_BUFFERS;
+
+ /** Mask value for indexing into the buffers. */
+ static final int BUFFER_MASK;
+
+ /** The maximum number of operations to perform per amortized drain. */
+ static final int AMORTIZED_DRAIN_THRESHOLD;
+
+ /** A queue that discards all entries. */
+ static final Queue<?> DISCARDING_QUEUE = new DiscardingQueue();
+
+ static {
+ int buffers = ceilingNextPowerOfTwo(Runtime.getRuntime().availableProcessors());
+ AMORTIZED_DRAIN_THRESHOLD = (1 + buffers) * BUFFER_THRESHOLD;
+ NUMBER_OF_BUFFERS = buffers;
+ BUFFER_MASK = buffers - 1;
+ }
+
+ static int ceilingNextPowerOfTwo(int x) {
+ // From Hacker's Delight, Chapter 3, Harry S. Warren Jr.
+ return 1 << (Integer.SIZE - Integer.numberOfLeadingZeros(x - 1));
+ }
+
+ /** The draining status of the buffers. */
+ enum DrainStatus {
+
+ /** A drain is not taking place. */
+ IDLE,
+
+ /** A drain is required due to a pending write modification. */
+ REQUIRED,
+
+ /** A drain is in progress. */
+ PROCESSING
+ }
+
+ // The backing data store holding the key-value associations
+ final ConcurrentMap<K, Node> data;
+ final int concurrencyLevel;
+
+ // These fields provide support to bound the map by a maximum capacity
+ @GuardedBy("evictionLock")
+ final LinkedDeque<Node> evictionDeque;
+
+ @GuardedBy("evictionLock") // must write under lock
+ volatile long weightedSize;
+ @GuardedBy("evictionLock") // must write under lock
+ volatile long capacity;
+
+ volatile int nextOrder;
+ @GuardedBy("evictionLock")
+ int drainedOrder;
+
+ final Lock evictionLock;
+ final Queue<Task>[] buffers;
+ final AtomicIntegerArray bufferLengths;
+ final AtomicReference<DrainStatus> drainStatus;
+ final EntryWeigher<? super K, ? super V> weigher;
+
+ // These fields provide support for notifying a listener.
+ final Queue<Node> pendingNotifications;
+ final EvictionListener<K, V> listener;
+
+ transient Set<K> keySet;
+ transient Collection<V> values;
+ transient Set<Entry<K, V>> entrySet;
+
+ /**
+ * Creates an instance based on the builder's configuration.
+ */
+ @SuppressWarnings({"unchecked", "cast"})
+ private ConcurrentLinkedHashMap(Builder<K, V> builder) {
+ // The data store and its maximum capacity
+ concurrencyLevel = builder.concurrencyLevel;
+ capacity = Math.min(builder.capacity, MAXIMUM_CAPACITY);
+ data = new ConcurrentHashMap<K, Node>(builder.initialCapacity, 0.75f, concurrencyLevel);
+
+ // The eviction support
+ weigher = builder.weigher;
+ nextOrder = Integer.MIN_VALUE;
+ drainedOrder = Integer.MIN_VALUE;
+ evictionLock = new ReentrantLock();
+ evictionDeque = new LinkedDeque<Node>();
+ drainStatus = new AtomicReference<DrainStatus>(IDLE);
+
+ buffers = (Queue<Task>[]) new Queue[NUMBER_OF_BUFFERS];
+ bufferLengths = new AtomicIntegerArray(NUMBER_OF_BUFFERS);
+ for (int i = 0; i < NUMBER_OF_BUFFERS; i++) {
+ buffers[i] = new ConcurrentLinkedQueue<Task>();
+ }
+
+ // The notification queue and listener
+ listener = builder.listener;
+ pendingNotifications = (listener == DiscardingListener.INSTANCE)
+ ? (Queue<Node>) DISCARDING_QUEUE
+ : new ConcurrentLinkedQueue<Node>();
+ }
+
+ /** Ensures that the object is not null. */
+ static void checkNotNull(Object o) {
+ if (o == null) {
+ throw new NullPointerException();
+ }
+ }
+
+ /** Ensures that the argument expression is true. */
+ static void checkArgument(boolean expression) {
+ if (!expression) {
+ throw new IllegalArgumentException();
+ }
+ }
+
+ /** Ensures that the state expression is true. */
+ static void checkState(boolean expression) {
+ if (!expression) {
+ throw new IllegalStateException();
+ }
+ }
+
+ /* ---------------- Eviction Support -------------- */
+
+ /**
+ * Retrieves the maximum weighted capacity of the map.
+ *
+ * @return the maximum weighted capacity
+ */
+ public long capacity() {
+ return capacity;
+ }
+
+ /**
+ * Sets the maximum weighted capacity of the map and eagerly evicts entries
+ * until it shrinks to the appropriate size.
+ *
+ * @param capacity the maximum weighted capacity of the map
+ * @throws IllegalArgumentException if the capacity is negative
+ */
+ public void setCapacity(long capacity) {
+ checkArgument(capacity >= 0);
+ evictionLock.lock();
+ try {
+ this.capacity = Math.min(capacity, MAXIMUM_CAPACITY);
+ drainBuffers(AMORTIZED_DRAIN_THRESHOLD);
+ evict();
+ } finally {
+ evictionLock.unlock();
+ }
+ notifyListener();
+ }
+
+ /** Determines whether the map has exceeded its capacity. */
+ boolean hasOverflowed() {
+ return weightedSize > capacity;
+ }
+
+ /**
+ * Evicts entries from the map while it exceeds the capacity and appends
+ * evicted entries to the notification queue for processing.
+ */
+ @GuardedBy("evictionLock")
+ void evict() {
+ // Attempts to evict entries from the map if it exceeds the maximum
+ // capacity. If the eviction fails due to a concurrent removal of the
+ // victim, that removal may cancel out the addition that triggered this
+ // eviction. The victim is eagerly unlinked before the removal task so
+ // that if an eviction is still required then a new victim will be chosen
+ // for removal.
+ while (hasOverflowed()) {
+ Node node = evictionDeque.poll();
+
+ // If weighted values are used, then the pending operations will adjust
+ // the size to reflect the correct weight
+ if (node == null) {
+ return;
+ }
+
+ // Notify the listener only if the entry was evicted
+ if (data.remove(node.key, node)) {
+ pendingNotifications.add(node);
+ }
+
+ node.makeDead();
+ }
+ }
+
+ /**
+ * Performs the post-processing work required after the map operation.
+ *
+ * @param task the pending operation to be applied
+ */
+ void afterCompletion(Task task) {
+ boolean delayable = schedule(task);
+ if (shouldDrainBuffers(delayable)) {
+ tryToDrainBuffers(AMORTIZED_DRAIN_THRESHOLD);
+ }
+ notifyListener();
+ }
+
+ /**
+ * Schedules the task to be applied to the page replacement policy.
+ *
+ * @param task the pending operation
+ * @return if the draining of the buffers can be delayed
+ */
+ boolean schedule(Task task) {
+ int index = bufferIndex();
+ int buffered = bufferLengths.incrementAndGet(index);
+
+ if (task.isWrite()) {
+ buffers[index].add(task);
+ drainStatus.set(REQUIRED);
+ return false;
+ }
+
+ // A buffer may discard a read task if its length exceeds a tolerance level
+ if (buffered <= MAXIMUM_BUFFER_SIZE) {
+ buffers[index].add(task);
+ return (buffered <= BUFFER_THRESHOLD);
+ } else { // not optimized for fail-safe scenario
+ bufferLengths.decrementAndGet(index);
+ return false;
+ }
+ }
+
+ /** Returns the index to the buffer that the task should be scheduled on. */
+ static int bufferIndex() {
+ // A buffer is chosen by the thread's id so that tasks are distributed in a
+ // pseudo evenly manner. This helps avoid hot entries causing contention due
+ // to other threads trying to append to the same buffer.
+ return (int) Thread.currentThread().getId() & BUFFER_MASK;
+ }
+
+ /** Returns the ordering value to assign to a task. */
+ int nextOrdering() {
+ // The next ordering is acquired in a racy fashion as the increment is not
+ // atomic with the insertion into a buffer. This means that concurrent tasks
+ // can have the same ordering and the buffers are in a weakly sorted order.
+ return nextOrder++;
+ }
+
+ /**
+ * Determines whether the buffers should be drained.
+ *
+ * @param delayable if a drain should be delayed until required
+ * @return if a drain should be attempted
+ */
+ boolean shouldDrainBuffers(boolean delayable) {
+ DrainStatus status = drainStatus.get();
+ return (status != PROCESSING) & (!delayable | (status == REQUIRED));
+ }
+
+ /**
+ * Attempts to acquire the eviction lock and apply the pending operations to
+ * the page replacement policy.
+ *
+ * @param maxToDrain the maximum number of operations to drain
+ */
+ void tryToDrainBuffers(int maxToDrain) {
+ if (evictionLock.tryLock()) {
+ try {
+ drainStatus.set(PROCESSING);
+ drainBuffers(maxToDrain);
+ } finally {
+ drainStatus.compareAndSet(PROCESSING, IDLE);
+ evictionLock.unlock();
+ }
+ }
+ }
+
+ /**
+ * Drains the buffers and applies the pending operations.
+ *
+ * @param maxToDrain the maximum number of operations to drain
+ */
+ @GuardedBy("evictionLock")
+ void drainBuffers(int maxToDrain) {
+ // A mostly strict ordering is achieved by observing that each buffer
+ // contains tasks in a weakly sorted order starting from the last drain.
+ // The buffers can be merged into a sorted list in O(n) time by using
+ // counting sort and chaining on a collision.
+
+ // The output is capped to the expected number of tasks plus additional
+ // slack to optimistically handle the concurrent additions to the buffers.
+ Task[] tasks = new Task[maxToDrain];
+
+ // Moves the tasks into the output array, applies them, and updates the
+ // marker for the starting order of the next drain.
+ int maxTaskIndex = moveTasksFromBuffers(tasks);
+ runTasks(tasks, maxTaskIndex);
+ updateDrainedOrder(tasks, maxTaskIndex);
+ }
+
+ /**
+ * Moves the tasks from the buffers into the output array.
+ *
+ * @param tasks the ordered array of the pending operations
+ * @return the highest index location of a task that was added to the array
+ */
+ @GuardedBy("evictionLock")
+ int moveTasksFromBuffers(Task[] tasks) {
+ int maxTaskIndex = -1;
+ for (int i = 0; i < buffers.length; i++) {
+ int maxIndex = moveTasksFromBuffer(tasks, i);
+ maxTaskIndex = Math.max(maxIndex, maxTaskIndex);
+ }
+ return maxTaskIndex;
+ }
+
+ /**
+ * Moves the tasks from the specified buffer into the output array.
+ *
+ * @param tasks the ordered array of the pending operations
+ * @param bufferIndex the buffer to drain into the tasks array
+ * @return the highest index location of a task that was added to the array
+ */
+ @GuardedBy("evictionLock")
+ int moveTasksFromBuffer(Task[] tasks, int bufferIndex) {
+ // While a buffer is being drained it may be concurrently appended to. The
+ // number of tasks removed are tracked so that the length can be decremented
+ // by the delta rather than set to zero.
+ Queue<Task> buffer = buffers[bufferIndex];
+ int removedFromBuffer = 0;
+
+ Task task;
+ int maxIndex = -1;
+ while ((task = buffer.poll()) != null) {
+ removedFromBuffer++;
+
+ // The index into the output array is determined by calculating the offset
+ // since the last drain
+ int index = task.getOrder() - drainedOrder;
+ if (index < 0) {
+ // The task was missed by the last drain and can be run immediately
+ task.run();
+ } else if (index >= tasks.length) {
+ // Due to concurrent additions, the order exceeds the capacity of the
+ // output array. It is added to the end as overflow and the remaining
+ // tasks in the buffer will be handled by the next drain.
+ maxIndex = tasks.length - 1;
+ addTaskToChain(tasks, task, maxIndex);
+ break;
+ } else {
+ maxIndex = Math.max(index, maxIndex);
+ addTaskToChain(tasks, task, index);
+ }
+ }
+ bufferLengths.addAndGet(bufferIndex, -removedFromBuffer);
+ return maxIndex;
+ }
+
+ /**
+ * Adds the task as the head of the chain at the index location.
+ *
+ * @param tasks the ordered array of the pending operations
+ * @param task the pending operation to add
+ * @param index the array location
+ */
+ @GuardedBy("evictionLock")
+ void addTaskToChain(Task[] tasks, Task task, int index) {
+ task.setNext(tasks[index]);
+ tasks[index] = task;
+ }
+
+ /**
+ * Runs the pending page replacement policy operations.
+ *
+ * @param tasks the ordered array of the pending operations
+ * @param maxTaskIndex the maximum index of the array
+ */
+ @GuardedBy("evictionLock")
+ void runTasks(Task[] tasks, int maxTaskIndex) {
+ for (int i = 0; i <= maxTaskIndex; i++) {
+ runTasksInChain(tasks[i]);
+ }
+ }
+
+ /**
+ * Runs the pending operations on the linked chain.
+ *
+ * @param task the first task in the chain of operations
+ */
+ @GuardedBy("evictionLock")
+ void runTasksInChain(Task task) {
+ while (task != null) {
+ Task current = task;
+ task = task.getNext();
+ current.setNext(null);
+ current.run();
+ }
+ }
+
+ /**
+ * Updates the order to start the next drain from.
+ *
+ * @param tasks the ordered array of operations
+ * @param maxTaskIndex the maximum index of the array
+ */
+ @GuardedBy("evictionLock")
+ void updateDrainedOrder(Task[] tasks, int maxTaskIndex) {
+ if (maxTaskIndex >= 0) {
+ Task task = tasks[maxTaskIndex];
+ drainedOrder = task.getOrder() + 1;
+ }
+ }
+
+ /** Notifies the listener of entries that were evicted. */
+ void notifyListener() {
+ Node node;
+ while ((node = pendingNotifications.poll()) != null) {
+ listener.onEviction(node.key, node.getValue());
+ }
+ }
+
+ /** Updates the node's location in the page replacement policy. */
+ class ReadTask extends AbstractTask {
+ final Node node;
+
+ ReadTask(Node node) {
+ this.node = node;
+ }
+
+ @Override
+ @GuardedBy("evictionLock")
+ public void run() {
+ // An entry may scheduled for reordering despite having been previously
+ // removed. This can occur when the entry was concurrently read while a
+ // writer was removing it. If the entry is no longer linked then it does
+ // not need to be processed.
+ if (evictionDeque.contains(node)) {
+ evictionDeque.moveToBack(node);
+ }
+ }
+
+ @Override
+ public boolean isWrite() {
+ return false;
+ }
+ }
+
+ /** Adds the node to the page replacement policy. */
+ final class AddTask extends AbstractTask {
+ final Node node;
+ final int weight;
+
+ AddTask(Node node, int weight) {
+ this.weight = weight;
+ this.node = node;
+ }
+
+ @Override
+ @GuardedBy("evictionLock")
+ public void run() {
+ weightedSize += weight;
+
+ // ignore out-of-order write operations
+ if (node.get().isAlive()) {
+ evictionDeque.add(node);
+ evict();
+ }
+ }
+
+ @Override
+ public boolean isWrite() {
+ return true;
+ }
+ }
+
+ /** Removes a node from the page replacement policy. */
+ final class RemovalTask extends AbstractTask {
+ final Node node;
+
+ RemovalTask(Node node) {
+ this.node = node;
+ }
+
+ @Override
+ @GuardedBy("evictionLock")
+ public void run() {
+ // add may not have been processed yet
+ evictionDeque.remove(node);
+ node.makeDead();
+ }
+
+ @Override
+ public boolean isWrite() {
+ return true;
+ }
+ }
+
+ /** Updates the weighted size and evicts an entry on overflow. */
+ final class UpdateTask extends ReadTask {
+ final int weightDifference;
+
+ public UpdateTask(Node node, int weightDifference) {
+ super(node);
+ this.weightDifference = weightDifference;
+ }
+
+ @Override
+ @GuardedBy("evictionLock")
+ public void run() {
+ super.run();
+ weightedSize += weightDifference;
+ evict();
+ }
+
+ @Override
+ public boolean isWrite() {
+ return true;
+ }
+ }
+
+ /* ---------------- Concurrent Map Support -------------- */
+
+ @Override
+ public boolean isEmpty() {
+ return data.isEmpty();
+ }
+
+ @Override
+ public int size() {
+ return data.size();
+ }
+
+ /**
+ * Returns the weighted size of this map.
+ *
+ * @return the combined weight of the values in this map
+ */
+ public long weightedSize() {
+ return Math.max(0, weightedSize);
+ }
+
+ @Override
+ public void clear() {
+ // The alternative is to iterate through the keys and call #remove(), which
+ // adds unnecessary contention on the eviction lock and buffers.
+ evictionLock.lock();
+ try {
+ Node node;
+ while ((node = evictionDeque.poll()) != null) {
+ data.remove(node.key, node);
+ node.makeDead();
+ }
+
+ // Drain the buffers and run only the write tasks
+ for (int i = 0; i < buffers.length; i++) {
+ Queue<Task> buffer = buffers[i];
+ int removed = 0;
+ Task task;
+ while ((task = buffer.poll()) != null) {
+ if (task.isWrite()) {
+ task.run();
+ }
+ removed++;
+ }
+ bufferLengths.addAndGet(i, -removed);
+ }
+ } finally {
+ evictionLock.unlock();
+ }
+ }
+
+ @Override
+ public boolean containsKey(Object key) {
+ return data.containsKey(key);
+ }
+
+ @Override
+ public boolean containsValue(Object value) {
+ checkNotNull(value);
+
+ for (Node node : data.values()) {
+ if (node.getValue().equals(value)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public V get(Object key) {
+ final Node node = data.get(key);
+ if (node == null) {
+ return null;
+ }
+ afterCompletion(new ReadTask(node));
+ return node.getValue();
+ }
+
+ @Override
+ public V put(K key, V value) {
+ return put(key, value, false);
+ }
+
+ @Override
+ public V putIfAbsent(K key, V value) {
+ return put(key, value, true);
+ }
+
+ /**
+ * Adds a node to the list and the data store. If an existing node is found,
+ * then its value is updated if allowed.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value value to be associated with the specified key
+ * @param onlyIfAbsent a write is performed only if the key is not already
+ * associated with a value
+ * @return the prior value in the data store or null if no mapping was found
+ */
+ V put(K key, V value, boolean onlyIfAbsent) {
+ checkNotNull(value);
+
+ final int weight = weigher.weightOf(key, value);
+ final WeightedValue<V> weightedValue = new WeightedValue<V>(value, weight);
+ final Node node = new Node(key, weightedValue);
+
+ for (;;) {
+ final Node prior = data.putIfAbsent(node.key, node);
+ if (prior == null) {
+ afterCompletion(new AddTask(node, weight));
+ return null;
+ } else if (onlyIfAbsent) {
+ afterCompletion(new ReadTask(prior));
+ return prior.getValue();
+ }
+ for (;;) {
+ final WeightedValue<V> oldWeightedValue = prior.get();
+ if (!oldWeightedValue.isAlive()) {
+ break;
+ }
+
+ if (prior.compareAndSet(oldWeightedValue, weightedValue)) {
+ final int weightedDifference = weight - oldWeightedValue.weight;
+ final Task task = (weightedDifference == 0)
+ ? new ReadTask(prior)
+ : new UpdateTask(prior, weightedDifference);
+ afterCompletion(task);
+ return oldWeightedValue.value;
+ }
+ }
+ }
+ }
+
+ @Override
+ public V remove(Object key) {
+ final Node node = data.remove(key);
+ if (node == null) {
+ return null;
+ }
+
+ node.makeRetired();
+ afterCompletion(new RemovalTask(node));
+ return node.getValue();
+ }
+
+ @Override
+ public boolean remove(Object key, Object value) {
+ Node node = data.get(key);
+ if ((node == null) || (value == null)) {
+ return false;
+ }
+
+ WeightedValue<V> weightedValue = node.get();
+ for (;;) {
+ if (weightedValue.hasValue(value)) {
+ if (node.tryToRetire(weightedValue)) {
+ if (data.remove(key, node)) {
+ afterCompletion(new RemovalTask(node));
+ return true;
+ }
+ } else {
+ weightedValue = node.get();
+ if (weightedValue.isAlive()) {
+ // retry as an intermediate update may have replaced the value with
+ // an equal instance that has a different reference identity
+ continue;
+ }
+ }
+ }
+ return false;
+ }
+ }
+
+ @Override
+ public V replace(K key, V value) {
+ checkNotNull(value);
+
+ final int weight = weigher.weightOf(key, value);
+ final WeightedValue<V> weightedValue = new WeightedValue<V>(value, weight);
+
+ final Node node = data.get(key);
+ if (node == null) {
+ return null;
+ }
+ for (;;) {
+ WeightedValue<V> oldWeightedValue = node.get();
+ if (!oldWeightedValue.isAlive()) {
+ return null;
+ }
+ if (node.compareAndSet(oldWeightedValue, weightedValue)) {
+ int weightedDifference = weight - oldWeightedValue.weight;
+ final Task task = (weightedDifference == 0)
+ ? new ReadTask(node)
+ : new UpdateTask(node, weightedDifference);
+ afterCompletion(task);
+ return oldWeightedValue.value;
+ }
+ }
+ }
+
+ @Override
+ public boolean replace(K key, V oldValue, V newValue) {
+ checkNotNull(oldValue);
+ checkNotNull(newValue);
+
+ final int weight = weigher.weightOf(key, newValue);
+ final WeightedValue<V> newWeightedValue = new WeightedValue<V>(newValue, weight);
+
+ final Node node = data.get(key);
+ if (node == null) {
+ return false;
+ }
+ for (;;) {
+ final WeightedValue<V> weightedValue = node.get();
+ if (!weightedValue.isAlive() || !weightedValue.hasValue(oldValue)) {
+ return false;
+ }
+ if (node.compareAndSet(weightedValue, newWeightedValue)) {
+ int weightedDifference = weight - weightedValue.weight;
+ final Task task = (weightedDifference == 0)
+ ? new ReadTask(node)
+ : new UpdateTask(node, weightedDifference);
+ afterCompletion(task);
+ return true;
+ }
+ }
+ }
+
+ @Override
+ public Set<K> keySet() {
+ Set<K> ks = keySet;
+ return (ks == null) ? (keySet = new KeySet()) : ks;
+ }
+
+ /**
+ * Returns a unmodifiable snapshot {@link Set} view of the keys contained in
+ * this map. The set's iterator returns the keys whose order of iteration is
+ * the ascending order in which its entries are considered eligible for
+ * retention, from the least-likely to be retained to the most-likely.
+ * <p>
+ * Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
+ * a constant-time operation. Because of the asynchronous nature of the page
+ * replacement policy, determining the retention ordering requires a traversal
+ * of the keys.
+ *
+ * @return an ascending snapshot view of the keys in this map
+ */
+ public Set<K> ascendingKeySet() {
+ return orderedKeySet(true, Integer.MAX_VALUE);
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Set} view of the keys contained in
+ * this map. The set's iterator returns the keys whose order of iteration is
+ * the ascending order in which its entries are considered eligible for
+ * retention, from the least-likely to be retained to the most-likely.
+ * <p>
+ * Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
+ * a constant-time operation. Because of the asynchronous nature of the page
+ * replacement policy, determining the retention ordering requires a traversal
+ * of the keys.
+ *
+ * @param limit the maximum size of the returned set
+ * @return a ascending snapshot view of the keys in this map
+ * @throws IllegalArgumentException if the limit is negative
+ */
+ public Set<K> ascendingKeySetWithLimit(int limit) {
+ return orderedKeySet(true, limit);
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Set} view of the keys contained in
+ * this map. The set's iterator returns the keys whose order of iteration is
+ * the descending order in which its entries are considered eligible for
+ * retention, from the most-likely to be retained to the least-likely.
+ * <p>
+ * Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
+ * a constant-time operation. Because of the asynchronous nature of the page
+ * replacement policy, determining the retention ordering requires a traversal
+ * of the keys.
+ *
+ * @return a descending snapshot view of the keys in this map
+ */
+ public Set<K> descendingKeySet() {
+ return orderedKeySet(false, Integer.MAX_VALUE);
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Set} view of the keys contained in
+ * this map. The set's iterator returns the keys whose order of iteration is
+ * the descending order in which its entries are considered eligible for
+ * retention, from the most-likely to be retained to the least-likely.
+ * <p>
+ * Beware that, unlike in {@link #keySet()}, obtaining the set is <em>NOT</em>
+ * a constant-time operation. Because of the asynchronous nature of the page
+ * replacement policy, determining the retention ordering requires a traversal
+ * of the keys.
+ *
+ * @param limit the maximum size of the returned set
+ * @return a descending snapshot view of the keys in this map
+ * @throws IllegalArgumentException if the limit is negative
+ */
+ public Set<K> descendingKeySetWithLimit(int limit) {
+ return orderedKeySet(false, limit);
+ }
+
+ Set<K> orderedKeySet(boolean ascending, int limit) {
+ checkArgument(limit >= 0);
+ evictionLock.lock();
+ try {
+ drainBuffers(AMORTIZED_DRAIN_THRESHOLD);
+
+ int initialCapacity = (weigher == Weighers.singleton())
+ ? Math.min(limit, (int) weightedSize())
+ : 16;
+ Set<K> keys = new LinkedHashSet<K>(initialCapacity);
+ Iterator<Node> iterator = ascending
+ ? evictionDeque.iterator()
+ : evictionDeque.descendingIterator();
+ while (iterator.hasNext() && (limit > keys.size())) {
+ keys.add(iterator.next().key);
+ }
+ return unmodifiableSet(keys);
+ } finally {
+ evictionLock.unlock();
+ }
+ }
+
+ @Override
+ public Collection<V> values() {
+ Collection<V> vs = values;
+ return (vs == null) ? (values = new Values()) : vs;
+ }
+
+ @Override
+ public Set<Entry<K, V>> entrySet() {
+ Set<Entry<K, V>> es = entrySet;
+ return (es == null) ? (entrySet = new EntrySet()) : es;
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
+ * in this map. The map's collections return the mappings whose order of
+ * iteration is the ascending order in which its entries are considered
+ * eligible for retention, from the least-likely to be retained to the
+ * most-likely.
+ * <p>
+ * Beware that obtaining the mappings is <em>NOT</em> a constant-time
+ * operation. Because of the asynchronous nature of the page replacement
+ * policy, determining the retention ordering requires a traversal of the
+ * entries.
+ *
+ * @return a ascending snapshot view of this map
+ */
+ public Map<K, V> ascendingMap() {
+ return orderedMap(true, Integer.MAX_VALUE);
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
+ * in this map. The map's collections return the mappings whose order of
+ * iteration is the ascending order in which its entries are considered
+ * eligible for retention, from the least-likely to be retained to the
+ * most-likely.
+ * <p>
+ * Beware that obtaining the mappings is <em>NOT</em> a constant-time
+ * operation. Because of the asynchronous nature of the page replacement
+ * policy, determining the retention ordering requires a traversal of the
+ * entries.
+ *
+ * @param limit the maximum size of the returned map
+ * @return a ascending snapshot view of this map
+ * @throws IllegalArgumentException if the limit is negative
+ */
+ public Map<K, V> ascendingMapWithLimit(int limit) {
+ return orderedMap(true, limit);
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
+ * in this map. The map's collections return the mappings whose order of
+ * iteration is the descending order in which its entries are considered
+ * eligible for retention, from the most-likely to be retained to the
+ * least-likely.
+ * <p>
+ * Beware that obtaining the mappings is <em>NOT</em> a constant-time
+ * operation. Because of the asynchronous nature of the page replacement
+ * policy, determining the retention ordering requires a traversal of the
+ * entries.
+ *
+ * @return a descending snapshot view of this map
+ */
+ public Map<K, V> descendingMap() {
+ return orderedMap(false, Integer.MAX_VALUE);
+ }
+
+ /**
+ * Returns an unmodifiable snapshot {@link Map} view of the mappings contained
+ * in this map. The map's collections return the mappings whose order of
+ * iteration is the descending order in which its entries are considered
+ * eligible for retention, from the most-likely to be retained to the
+ * least-likely.
+ * <p>
+ * Beware that obtaining the mappings is <em>NOT</em> a constant-time
+ * operation. Because of the asynchronous nature of the page replacement
+ * policy, determining the retention ordering requires a traversal of the
+ * entries.
+ *
+ * @param limit the maximum size of the returned map
+ * @return a descending snapshot view of this map
+ * @throws IllegalArgumentException if the limit is negative
+ */
+ public Map<K, V> descendingMapWithLimit(int limit) {
+ return orderedMap(false, limit);
+ }
+
+ Map<K, V> orderedMap(boolean ascending, int limit) {
+ checkArgument(limit >= 0);
+ evictionLock.lock();
+ try {
+ drainBuffers(AMORTIZED_DRAIN_THRESHOLD);
+
+ int initialCapacity = (weigher == Weighers.singleton())
+ ? Math.min(limit, (int) weightedSize())
+ : 16;
+ Map<K, V> map = new LinkedHashMap<K, V>(initialCapacity);
+ Iterator<Node> iterator = ascending
+ ? evictionDeque.iterator()
+ : evictionDeque.descendingIterator();
+ while (iterator.hasNext() && (limit > map.size())) {
+ Node node = iterator.next();
+ map.put(node.key, node.getValue());
+ }
+ return unmodifiableMap(map);
+ } finally {
+ evictionLock.unlock();
+ }
+ }
+
+ /** A value, its weight, and the entry's status. */
+ @Immutable
+ static final class WeightedValue<V> {
+ final int weight;
+ final V value;
+
+ WeightedValue(V value, int weight) {
+ this.weight = weight;
+ this.value = value;
+ }
+
+ boolean hasValue(Object o) {
+ return (o == value) || value.equals(o);
+ }
+
+ /**
+ * If the entry is available in the hash-table and page replacement policy.
+ */
+ boolean isAlive() {
+ return weight > 0;
+ }
+
+ /**
+ * If the entry was removed from the hash-table and is awaiting removal from
+ * the page replacement policy.
+ */
+ boolean isRetired() {
+ return weight < 0;
+ }
+
+ /**
+ * If the entry was removed from the hash-table and the page replacement
+ * policy.
+ */
+ boolean isDead() {
+ return weight == 0;
+ }
+ }
+
+ /**
+ * A node contains the key, the weighted value, and the linkage pointers on
+ * the page-replacement algorithm's data structures.
+ */
+ @SuppressWarnings("serial")
+ final class Node extends AtomicReference<WeightedValue<V>> implements Linked<Node> {
+ final K key;
+ @GuardedBy("evictionLock")
+ Node prev;
+ @GuardedBy("evictionLock")
+ Node next;
+
+ /** Creates a new, unlinked node. */
+ Node(K key, WeightedValue<V> weightedValue) {
+ super(weightedValue);
+ this.key = key;
+ }
+
+ @Override
+ @GuardedBy("evictionLock")
+ public Node getPrevious() {
+ return prev;
+ }
+
+ @Override
+ @GuardedBy("evictionLock")
+ public void setPrevious(Node prev) {
+ this.prev = prev;
+ }
+
+ @Override
+ @GuardedBy("evictionLock")
+ public Node getNext() {
+ return next;
+ }
+
+ @Override
+ @GuardedBy("evictionLock")
+ public void setNext(Node next) {
+ this.next = next;
+ }
+
+ /** Retrieves the value held by the current <tt>WeightedValue</tt>. */
+ V getValue() {
+ return get().value;
+ }
+
+ /**
+ * Attempts to transition the node from the <tt>alive</tt> state to the
+ * <tt>retired</tt> state.
+ *
+ * @param expect the expected weighted value
+ * @return if successful
+ */
+ boolean tryToRetire(WeightedValue<V> expect) {
+ if (expect.isAlive()) {
+ WeightedValue<V> retired = new WeightedValue<V>(expect.value, -expect.weight);
+ return compareAndSet(expect, retired);
+ }
+ return false;
+ }
+
+ /**
+ * Atomically transitions the node from the <tt>alive</tt> state to the
+ * <tt>retired</tt> state, if a valid transition.
+ */
+ void makeRetired() {
+ for (;;) {
+ WeightedValue<V> current = get();
+ if (!current.isAlive()) {
+ return;
+ }
+ WeightedValue<V> retired = new WeightedValue<V>(current.value, -current.weight);
+ if (compareAndSet(current, retired)) {
+ return;
+ }
+ }
+ }
+
+ /**
+ * Atomically transitions the node to the <tt>dead</tt> state and decrements
+ * the <tt>weightedSize</tt>.
+ */
+ @GuardedBy("evictionLock")
+ void makeDead() {
+ for (;;) {
+ WeightedValue<V> current = get();
+ WeightedValue<V> dead = new WeightedValue<V>(current.value, 0);
+ if (compareAndSet(current, dead)) {
+ weightedSize -= Math.abs(current.weight);
+ return;
+ }
+ }
+ }
+ }
+
+ /** An adapter to safely externalize the keys. */
+ final class KeySet extends AbstractSet<K> {
+ final ConcurrentLinkedHashMap<K, V> map = ConcurrentLinkedHashMap.this;
+
+ @Override
+ public int size() {
+ return map.size();
+ }
+
+ @Override
+ public void clear() {
+ map.clear();
+ }
+
+ @Override
+ public Iterator<K> iterator() {
+ return new KeyIterator();
+ }
+
+ @Override
+ public boolean contains(Object obj) {
+ return containsKey(obj);
+ }
+
+ @Override
+ public boolean remove(Object obj) {
+ return (map.remove(obj) != null);
+ }
+
+ @Override
+ public Object[] toArray() {
+ return map.data.keySet().toArray();
+ }
+
+ @Override
+ public <T> T[] toArray(T[] array) {
+ return map.data.keySet().toArray(array);
+ }
+ }
+
+ /** An adapter to safely externalize the key iterator. */
+ final class KeyIterator implements Iterator<K> {
+ final Iterator<K> iterator = data.keySet().iterator();
+ K current;
+
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public K next() {
+ current = iterator.next();
+ return current;
+ }
+
+ @Override
+ public void remove() {
+ checkState(current != null);
+ ConcurrentLinkedHashMap.this.remove(current);
+ current = null;
+ }
+ }
+
+ /** An adapter to safely externalize the values. */
+ final class Values extends AbstractCollection<V> {
+
+ @Override
+ public int size() {
+ return ConcurrentLinkedHashMap.this.size();
+ }
+
+ @Override
+ public void clear() {
+ ConcurrentLinkedHashMap.this.clear();
+ }
+
+ @Override
+ public Iterator<V> iterator() {
+ return new ValueIterator();
+ }
+
+ @Override
+ public boolean contains(Object o) {
+ return containsValue(o);
+ }
+ }
+
+ /** An adapter to safely externalize the value iterator. */
+ final class ValueIterator implements Iterator<V> {
+ final Iterator<Node> iterator = data.values().iterator();
+ Node current;
+
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public V next() {
+ current = iterator.next();
+ return current.getValue();
+ }
+
+ @Override
+ public void remove() {
+ checkState(current != null);
+ ConcurrentLinkedHashMap.this.remove(current.key);
+ current = null;
+ }
+ }
+
+ /** An adapter to safely externalize the entries. */
+ final class EntrySet extends AbstractSet<Entry<K, V>> {
+ final ConcurrentLinkedHashMap<K, V> map = ConcurrentLinkedHashMap.this;
+
+ @Override
+ public int size() {
+ return map.size();
+ }
+
+ @Override
+ public void clear() {
+ map.clear();
+ }
+
+ @Override
+ public Iterator<Entry<K, V>> iterator() {
+ return new EntryIterator();
+ }
+
+ @Override
+ public boolean contains(Object obj) {
+ if (!(obj instanceof Entry<?, ?>)) {
+ return false;
+ }
+ Entry<?, ?> entry = (Entry<?, ?>) obj;
+ Node node = map.data.get(entry.getKey());
+ return (node != null) && (node.getValue().equals(entry.getValue()));
+ }
+
+ @Override
+ public boolean add(Entry<K, V> entry) {
+ return (map.putIfAbsent(entry.getKey(), entry.getValue()) == null);
+ }
+
+ @Override
+ public boolean remove(Object obj) {
+ if (!(obj instanceof Entry<?, ?>)) {
+ return false;
+ }
+ Entry<?, ?> entry = (Entry<?, ?>) obj;
+ return map.remove(entry.getKey(), entry.getValue());
+ }
+ }
+
+ /** An adapter to safely externalize the entry iterator. */
+ final class EntryIterator implements Iterator<Entry<K, V>> {
+ final Iterator<Node> iterator = data.values().iterator();
+ Node current;
+
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
+ }
+
+ @Override
+ public Entry<K, V> next() {
+ current = iterator.next();
+ return new WriteThroughEntry(current);
+ }
+
+ @Override
+ public void remove() {
+ checkState(current != null);
+ ConcurrentLinkedHashMap.this.remove(current.key);
+ current = null;
+ }
+ }
+
+ /** An entry that allows updates to write through to the map. */
+ final class WriteThroughEntry extends SimpleEntry<K, V> {
+ static final long serialVersionUID = 1;
+
+ WriteThroughEntry(Node node) {
+ super(node.key, node.getValue());
+ }
+
+ @Override
+ public V setValue(V value) {
+ put(getKey(), value);
+ return super.setValue(value);
+ }
+
+ Object writeReplace() {
+ return new SimpleEntry<K, V>(this);
+ }
+ }
+
+ /** A weigher that enforces that the weight falls within a valid range. */
+ static final class BoundedEntryWeigher<K, V> implements EntryWeigher<K, V>, Serializable {
+ static final long serialVersionUID = 1;
+ final EntryWeigher<? super K, ? super V> weigher;
+
+ BoundedEntryWeigher(EntryWeigher<? super K, ? super V> weigher) {
+ checkNotNull(weigher);
+ this.weigher = weigher;
+ }
+
+ @Override
+ public int weightOf(K key, V value) {
+ int weight = weigher.weightOf(key, value);
+ checkArgument(weight >= 1);
+ return weight;
+ }
+
+ Object writeReplace() {
+ return weigher;
+ }
+ }
+
+ /** A queue that discards all additions and is always empty. */
+ static final class DiscardingQueue extends AbstractQueue<Object> {
+ @Override public boolean add(Object e) { return true; }
+ @Override public boolean offer(Object e) { return true; }
+ @Override public Object poll() { return null; }
+ @Override public Object peek() { return null; }
+ @Override public int size() { return 0; }
+ @Override public Iterator<Object> iterator() { return emptyList().iterator(); }
+ }
+
+ /** A listener that ignores all notifications. */
+ enum DiscardingListener implements EvictionListener<Object, Object> {
+ INSTANCE;
+
+ @Override public void onEviction(Object key, Object value) {}
+ }
+
+ /** An operation that can be lazily applied to the page replacement policy. */
+ interface Task extends Runnable {
+
+ /** The priority order. */
+ int getOrder();
+
+ /** If the task represents an add, modify, or remove operation. */
+ boolean isWrite();
+
+ /** Returns the next task on the link chain. */
+ Task getNext();
+
+ /** Sets the next task on the link chain. */
+ void setNext(Task task);
+ }
+
+ /** A skeletal implementation of the <tt>Task</tt> interface. */
+ abstract class AbstractTask implements Task {
+ final int order;
+ Task task;
+
+ AbstractTask() {
+ order = nextOrdering();
+ }
+
+ @Override
+ public int getOrder() {
+ return order;
+ }
+
+ @Override
+ public Task getNext() {
+ return task;
+ }
+
+ @Override
+ public void setNext(Task task) {
+ this.task = task;
+ }
+ }
+
+ /* ---------------- Serialization Support -------------- */
+
+ static final long serialVersionUID = 1;
+
+ Object writeReplace() {
+ return new SerializationProxy<K, V>(this);
+ }
+
+ private void readObject(ObjectInputStream stream) throws InvalidObjectException {
+ throw new InvalidObjectException("Proxy required");
+ }
+
+ /**
+ * A proxy that is serialized instead of the map. The page-replacement
+ * algorithm's data structures are not serialized so the deserialized
+ * instance contains only the entries. This is acceptable as caches hold
+ * transient data that is recomputable and serialization would tend to be
+ * used as a fast warm-up process.
+ */
+ static final class SerializationProxy<K, V> implements Serializable {
+ final EntryWeigher<? super K, ? super V> weigher;
+ final EvictionListener<K, V> listener;
+ final int concurrencyLevel;
+ final Map<K, V> data;
+ final long capacity;
+
+ SerializationProxy(ConcurrentLinkedHashMap<K, V> map) {
+ concurrencyLevel = map.concurrencyLevel;
+ data = new HashMap<K, V>(map);
+ capacity = map.capacity;
+ listener = map.listener;
+ weigher = map.weigher;
+ }
+
+ Object readResolve() {
+ ConcurrentLinkedHashMap<K, V> map = new Builder<K, V>()
+ .concurrencyLevel(concurrencyLevel)
+ .maximumWeightedCapacity(capacity)
+ .listener(listener)
+ .weigher(weigher)
+ .build();
+ map.putAll(data);
+ return map;
+ }
+
+ static final long serialVersionUID = 1;
+ }
+
+ /* ---------------- Builder -------------- */
+
+ /**
+ * A builder that creates {@link ConcurrentLinkedHashMap} instances. It
+ * provides a flexible approach for constructing customized instances with
+ * a named parameter syntax. It can be used in the following manner:
+ * <pre>{@code
+ * ConcurrentMap<Vertex, Set<Edge>> graph = new Builder<Vertex, Set<Edge>>()
+ * .maximumWeightedCapacity(5000)
+ * .weigher(Weighers.<Edge>set())
+ * .build();
+ * }</pre>
+ */
+ public static final class Builder<K, V> {
+ static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+ static final int DEFAULT_INITIAL_CAPACITY = 16;
+
+ EvictionListener<K, V> listener;
+ EntryWeigher<? super K, ? super V> weigher;
+
+ int concurrencyLevel;
+ int initialCapacity;
+ long capacity;
+
+ @SuppressWarnings("unchecked")
+ public Builder() {
+ capacity = -1;
+ weigher = Weighers.entrySingleton();
+ initialCapacity = DEFAULT_INITIAL_CAPACITY;
+ concurrencyLevel = DEFAULT_CONCURRENCY_LEVEL;
+ listener = (EvictionListener<K, V>) DiscardingListener.INSTANCE;
+ }
+
+ /**
+ * Specifies the initial capacity of the hash table (default <tt>16</tt>).
+ * This is the number of key-value pairs that the hash table can hold
+ * before a resize operation is required.
+ *
+ * @param initialCapacity the initial capacity used to size the hash table
+ * to accommodate this many entries.
+ * @throws IllegalArgumentException if the initialCapacity is negative
+ */
+ public Builder<K, V> initialCapacity(int initialCapacity) {
+ checkArgument(initialCapacity >= 0);
+ this.initialCapacity = initialCapacity;
+ return this;
+ }
+
+ /**
+ * Specifies the maximum weighted capacity to coerce the map to and may
+ * exceed it temporarily.
+ *
+ * @param capacity the weighted threshold to bound the map by
+ * @throws IllegalArgumentException if the maximumWeightedCapacity is
+ * negative
+ */
+ public Builder<K, V> maximumWeightedCapacity(long capacity) {
+ checkArgument(capacity >= 0);
+ this.capacity = capacity;
+ return this;
+ }
+
+ /**
+ * Specifies the estimated number of concurrently updating threads. The
+ * implementation performs internal sizing to try to accommodate this many
+ * threads (default <tt>16</tt>).
+ *
+ * @param concurrencyLevel the estimated number of concurrently updating
+ * threads
+ * @throws IllegalArgumentException if the concurrencyLevel is less than or
+ * equal to zero
+ */
+ public Builder<K, V> concurrencyLevel(int concurrencyLevel) {
+ checkArgument(concurrencyLevel > 0);
+ this.concurrencyLevel = concurrencyLevel;
+ return this;
+ }
+
+ /**
+ * Specifies an optional listener that is registered for notification when
+ * an entry is evicted.
+ *
+ * @param listener the object to forward evicted entries to
+ * @throws NullPointerException if the listener is null
+ */
+ public Builder<K, V> listener(EvictionListener<K, V> listener) {
+ checkNotNull(listener);
+ this.listener = listener;
+ return this;
+ }
+
+ /**
+ * Specifies an algorithm to determine how many the units of capacity an
+ * entry consumes. The default algorithm bounds the map by the number of
+ * key-value pairs by giving each entry a weight of <tt>1</tt>.
+ *
+ * @param weigher the algorithm to determine a entry's weight
+ * @throws NullPointerException if the weigher is null
+ */
+ public Builder<K, V> weigher(Weigher<? super V> weigher) {
+ this.weigher = (weigher == Weighers.singleton())
+ ? Weighers.<K, V>entrySingleton()
+ : new BoundedEntryWeigher<K, V>(Weighers.asEntryWeigher(weigher));
+ return this;
+ }
+
+ /**
+ * Specifies an algorithm to determine how many the units of capacity a
+ * value consumes. The default algorithm bounds the map by the number of
+ * key-value pairs by giving each entry a weight of <tt>1</tt>.
+ *
+ * @param weigher the algorithm to determine a value's weight
+ * @throws NullPointerException if the weigher is null
+ */
+ public Builder<K, V> weigher(EntryWeigher<? super K, ? super V> weigher) {
+ this.weigher = (weigher == Weighers.entrySingleton())
+ ? Weighers.<K, V>entrySingleton()
+ : new BoundedEntryWeigher<K, V>(weigher);
+ return this;
+ }
+
+ /**
+ * Creates a new {@link ConcurrentLinkedHashMap} instance.
+ *
+ * @throws IllegalStateException if the maximum weighted capacity was
+ * not set
+ */
+ public ConcurrentLinkedHashMap<K, V> build() {
+ checkState(capacity >= 0);
+ return new ConcurrentLinkedHashMap<K, V>(this);
+ }
+ }
+}
View
38 socko-webserver/src/main/java/org/mashupbots/socko/concurrentlinkedhashmap/EntryWeigher.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mashupbots.socko.concurrentlinkedhashmap;
+
+/**
+ * A class that can determine the weight of an entry. The total weight threshold
+ * is used to determine when an eviction is required.
+ *
+ * @author ben.manes@gmail.com (Ben Manes)
+ * @see <a href="http://code.google.com/p/concurrentlinkedhashmap/">
+ * http://code.google.com/p/concurrentlinkedhashmap/</a>
+ */
+@ThreadSafe
+public interface EntryWeigher<K, V> {
+
+ /**
+ * Measures an entry's weight to determine how many units of capacity that
+ * the key and value consumes. An entry must consume a minimum of one unit.
+ *
+ * @param key the key to weigh
+ * @param value the value to weigh
+ * @return the entry's weight
+ */
+ int weightOf(K key, V value);
+}
View
46 ...ebserver/src/main/java/org/mashupbots/socko/concurrentlinkedhashmap/EvictionListener.java
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2010 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mashupbots.socko.concurrentlinkedhashmap;
+
+/**
+ * A listener registered for notification when an entry is evicted. An instance
+ * may be called concurrently by multiple threads to process entries. An
+ * implementation should avoid performing blocking calls or synchronizing on
+ * shared resources.
+ * <p>
+ * The listener is invoked by {@link ConcurrentLinkedHashMap} on a caller's
+ * thread and will not block other threads from operating on the map. An
+ * implementation should be aware that the caller's thread will not expect
+ * long execution times or failures as a side effect of the listener being
+ * notified. Execution safety and a fast turn around time can be achieved by
+ * performing the operation asynchronously, such as by submitting a task to an
+ * {@link java.util.concurrent.ExecutorService}.
+ *
+ * @author ben.manes@gmail.com (Ben Manes)
+ * @see <a href="http://code.google.com/p/concurrentlinkedhashmap/">
+ * http://code.google.com/p/concurrentlinkedhashmap/</a>
+ */
+@ThreadSafe
+public interface EvictionListener<K, V> {
+
+ /**
+ * A call-back notification that the entry was evicted.
+ *
+ * @param key the entry's key
+ * @param value the entry's value
+ */
+ void onEviction(K key, V value);
+}
View
37 socko-webserver/src/main/java/org/mashupbots/socko/concurrentlinkedhashmap/GuardedBy.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2005 Brian Goetz
+ * Released under the Creative Commons Attribution License
+ * (http://creativecommons.org/licenses/by/2.5)
+ * Official home: http://www.jcip.net
+ */
+package org.mashupbots.socko.concurrentlinkedhashmap;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * GuardedBy
+ *
+ * The field or method to which this annotation is applied can only be accessed
+ * when holding a particular lock, which may be a built-in (synchronization)
+ * lock, or may be an explicit java.util.concurrent.Lock.
+ *
+ * The argument determines which lock guards the annotated field or method: this :
+ * The string literal "this" means that this field is guarded by the class in
+ * which it is defined. class-name.this : For inner classes, it may be necessary
+ * to disambiguate 'this'; the class-name.this designation allows you to specify
+ * which 'this' reference is intended itself : For reference fields only; the
+ * object to which the field refers. field-name : The lock object is referenced
+ * by the (instance or static) field specified by field-name.
+ * class-name.field-name : The lock object is reference by the static field
+ * specified by class-name.field-name. method-name() : The lock object is
+ * returned by calling the named nil-ary method. class-name.class : The Class
+ * object for the specified class should be used as the lock object.
+ */
+@Target( { ElementType.FIELD, ElementType.METHOD })
+@Retention(RetentionPolicy.CLASS)
+@interface GuardedBy {
+ String value();
+}
View
38 socko-webserver/src/main/java/org/mashupbots/socko/concurrentlinkedhashmap/Immutable.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2005 Brian Goetz and Tim Peierls
+ * Released under the Creative Commons Attribution License
+ * (http://creativecommons.org/licenses/by/2.5)
+ * Official home: http://www.jcip.net
+ *
+ * Any republication or derived work distributed in source code form
+ * must include this copyright and license notice.
+ */
+package org.mashupbots.socko.concurrentlinkedhashmap;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * The class to which this annotation is applied is immutable. This means that
+ * its state cannot be seen to change by callers, which implies that
+ * <ul>
+ * <li> all public fields are final, </li>
+ * <li> all public final reference fields refer to other immutable objects, and </li>
+ * <li> constructors and methods do not publish references to any internal state
+ * which is potentially mutable by the implementation. </li>
+ * </ul>
+ * Immutable objects may still have internal mutable state for purposes of performance
+ * optimization; some state variables may be lazily computed, so long as they are computed
+ * from immutable state and that callers cannot tell the difference.
+ * <p>
+ * Immutable objects are inherently thread-safe; they may be passed between threads or
+ * published without synchronization.
+ */
+@Documented
+@Target(ElementType.TYPE)
+@Retention(RetentionPolicy.RUNTIME)
+@interface Immutable {
+}
View
462 socko-webserver/src/main/java/org/mashupbots/socko/concurrentlinkedhashmap/LinkedDeque.java
@@ -0,0 +1,462 @@
+/*
+ * Copyright 2011 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mashupbots.socko.concurrentlinkedhashmap;
+
+import java.util.AbstractCollection;
+import java.util.Collection;
+import java.util.Deque;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+/**
+ * Linked list implementation of the {@link Deque} interface where the link
+ * pointers are tightly integrated with the element. Linked deques have no
+ * capacity restrictions; they grow as necessary to support usage. They are not
+ * thread-safe; in the absence of external synchronization, they do not support
+ * concurrent access by multiple threads. Null elements are prohibited.
+ * <p>
+ * Most <tt>LinkedDeque</tt> operations run in constant time by assuming that
+ * the {@link Linked} parameter is associated with the deque instance. Any usage
+ * that violates this assumption will result in non-deterministic behavior.
+ * <p>
+ * The iterators returned by this class are <em>not</em> <i>fail-fast</i>: If
+ * the deque is modified at any time after the iterator is created, the iterator
+ * will be in an unknown state. Thus, in the face of concurrent modification,
+ * the iterator risks arbitrary, non-deterministic behavior at an undetermined
+ * time in the future.
+ *
+ * @author ben.manes@gmail.com (Ben Manes)
+ * @param <E> the type of elements held in this collection
+ * @see <a href="http://code.google.com/p/concurrentlinkedhashmap/">
+ * http://code.google.com/p/concurrentlinkedhashmap/</a>
+ */
+@NotThreadSafe
+final class LinkedDeque<E extends Linked<E>> extends AbstractCollection<E> implements Deque<E> {
+
+ // This class provides a doubly-linked list that is optimized for the virtual
+ // machine. The first and last elements are manipulated instead of a slightly
+ // more convenient sentinel element to avoid the insertion of null checks with
+ // NullPointerException throws in the byte code. The links to a removed
+ // element are cleared to help a generational garbage collector if the
+ // discarded elements inhabit more than one generation.
+
+ /**
+ * Pointer to first node.
+ * Invariant: (first == null && last == null) ||
+ * (first.prev == null)
+ */
+ E first;
+
+ /**
+ * Pointer to last node.
+ * Invariant: (first == null && last == null) ||
+ * (last.next == null)
+ */
+ E last;
+
+ /**
+ * Links the element to the front of the deque so that it becomes the first
+ * element.
+ *
+ * @param e the unlinked element
+ */
+ void linkFirst(final E e) {
+ final E f = first;
+ first = e;
+
+ if (f == null) {
+ last = e;
+ } else {
+ f.setPrevious(e);
+ e.setNext(f);
+ }
+ }
+
+ /**
+ * Links the element to the back of the deque so that it becomes the last
+ * element.
+ *
+ * @param e the unlinked element
+ */
+ void linkLast(final E e) {
+ final E l = last;
+ last = e;
+
+ if (l == null) {
+ first = e;
+ } else {
+ l.setNext(e);
+ e.setPrevious(l);
+ }
+ }
+
+ /** Unlinks the non-null first element. */
+ E unlinkFirst() {
+ final E f = first;
+ final E next = f.getNext();
+ f.setNext(null);
+
+ first = next;
+ if (next == null) {
+ last = null;
+ } else {
+ next.setPrevious(null);
+ }
+ return f;
+ }
+
+ /** Unlinks the non-null last element. */
+ E unlinkLast() {
+ final E l = last;
+ final E prev = l.getPrevious();
+ l.setPrevious(null);
+ last = prev;
+ if (prev == null) {
+ first = null;
+ } else {
+ prev.setNext(null);
+ }
+ return l;
+ }
+
+ /** Unlinks the non-null element. */
+ void unlink(E e) {
+ final E prev = e.getPrevious();
+ final E next = e.getNext();
+
+ if (prev == null) {
+ first = next;
+ } else {
+ prev.setNext(next);
+ e.setPrevious(null);
+ }
+
+ if (next == null) {
+ last = prev;
+ } else {
+ next.setPrevious(prev);
+ e.setNext(null);
+ }
+ }
+
+ @Override
+ public boolean isEmpty() {
+ return (first == null);
+ }
+
+ void checkNotEmpty() {
+ if (isEmpty()) {
+ throw new NoSuchElementException();
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ * <p>
+ * Beware that, unlike in most collections, this method is <em>NOT</em> a
+ * constant-time operation.
+ */
+ @Override
+ public int size() {
+ int size = 0;
+ for (E e = first; e != null; e = e.getNext()) {
+ size++;
+ }
+ return size;
+ }
+
+ @Override
+ public void clear() {
+ for (E e = first; e != null;) {
+ E next = e.getNext();
+ e.setPrevious(null);
+ e.setNext(null);
+ e = next;
+ }
+ first = last = null;
+ }
+
+ @Override
+ public boolean contains(Object o) {
+ return (o instanceof Linked<?>) && contains((Linked<?>) o);
+ }
+
+ // A fast-path containment check
+ boolean contains(Linked<?> e) {
+ return (e.getPrevious() != null)
+ || (e.getNext() != null)
+ || (e == first);
+ }
+
+ /**
+ * Moves the element to the front of the deque so that it becomes the first
+ * element.
+ *
+ * @param e the linked element
+ */
+ public void moveToFront(E e) {
+ if (e != first) {
+ unlink(e);
+ linkFirst(e);
+ }
+ }
+
+ /**
+ * Moves the element to the back of the deque so that it becomes the last
+ * element.
+ *
+ * @param e the linked element
+ */
+ public void moveToBack(E e) {
+ if (e != last) {
+ unlink(e);
+ linkLast(e);
+ }
+ }
+
+ @Override
+ public E peek() {
+ return peekFirst();
+ }
+
+ @Override
+ public E peekFirst() {
+ return first;
+ }
+
+ @Override
+ public E peekLast() {
+ return last;
+ }
+
+ @Override
+ public E getFirst() {
+ checkNotEmpty();
+ return peekFirst();
+ }
+
+ @Override
+ public E getLast() {
+ checkNotEmpty();
+ return peekLast();
+ }
+
+ @Override
+ public E element() {
+ return getFirst();
+ }
+
+ @Override
+ public boolean offer(E e) {
+ return offerLast(e);
+ }
+
+ @Override
+ public boolean offerFirst(E e) {
+ if (contains(e)) {
+ return false;
+ }
+ linkFirst(e);
+ return true;
+ }
+
+ @Override
+ public boolean offerLast(E e) {
+ if (contains(e)) {
+ return false;
+ }
+ linkLast(e);
+ return true;
+ }
+
+ @Override
+ public boolean add(E e) {
+ return offerLast(e);
+ }
+
+
+ @Override
+ public void addFirst(E e) {
+ if (!offerFirst(e)) {
+ throw new IllegalArgumentException();
+ }
+ }
+
+ @Override
+ public void addLast(E e) {
+ if (!offerLast(e)) {
+ throw new IllegalArgumentException();
+ }
+ }
+
+ @Override
+ public E poll() {
+ return pollFirst();
+ }
+
+ @Override
+ public E pollFirst() {
+ if (isEmpty()) {
+ return null;
+ }
+ return unlinkFirst();
+ }
+
+ @Override
+ public E pollLast() {
+ if (isEmpty()) {
+ return null;
+ }
+ return unlinkLast();
+ }
+
+ @Override
+ public E remove() {
+ return removeFirst();
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public boolean remove(Object o) {
+ if (contains(o)) {
+ unlink((E) o);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public E removeFirst() {
+ checkNotEmpty();
+ return pollFirst();
+ }
+
+ @Override
+ public boolean removeFirstOccurrence(Object o) {
+ return remove(o);
+ }
+
+ @Override
+ public E removeLast() {
+ checkNotEmpty();
+ return pollLast();
+ }
+
+ @Override
+ public boolean removeLastOccurrence(Object o) {
+ return remove(o);
+ }
+
+ @Override
+ public boolean removeAll(Collection<?> c) {
+ boolean modified = false;
+ for (Object o : c) {
+ modified |= remove(o);
+ }
+ return modified;
+ }
+
+ @Override
+ public void push(E e) {
+ addFirst(e);
+ }
+
+ @Override
+ public E pop() {
+ return removeFirst();
+ }
+
+ @Override
+ public Iterator<E> iterator() {
+ return new AbstractLinkedIterator(first) {
+ @Override E computeNext() {
+ return cursor.getNext();
+ }
+ };
+ }
+
+ @Override
+ public Iterator<E> descendingIterator() {
+ return new AbstractLinkedIterator(last) {
+ @Override E computeNext() {
+ return cursor.getPrevious();
+ }
+ };
+ }
+
+ abstract class AbstractLinkedIterator implements Iterator<E> {
+ E cursor;
+
+ /**
+ * Creates an iterator that can can traverse the deque.
+ *
+ * @param start the initial element to begin traversal from
+ */
+ AbstractLinkedIterator(E start) {
+ cursor = start;
+ }
+
+ @Override
+ public boolean hasNext() {
+ return (cursor != null);
+ }
+
+ @Override
+ public E next() {
+ if (!hasNext()) {
+ throw new NoSuchElementException();
+ }
+ E e = cursor;
+ cursor = computeNext();
+ return e;
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ /**
+ * Retrieves the next element to traverse to or <tt>null</tt> if there are
+ * no more elements.
+ */
+ abstract E computeNext();
+ }
+}
+
+/**
+ * An element that is linked on the {@link Deque}.
+ */
+interface Linked<T extends Linked<T>> {
+
+ /**
+ * Retrieves the previous element or <tt>null</tt> if either the element is
+ * unlinked or the first element on the deque.
+ */
+ T getPrevious();
+
+ /** Sets the previous element or <tt>null</tt> if there is no link. */
+ void setPrevious(T prev);
+
+ /**
+ * Retrieves the next element or <tt>null</tt> if either the element is
+ * unlinked or the last element on the deque.
+ */
+ T getNext();
+
+ /** Sets the next element or <tt>null</tt> if there is no link. */
+ void setNext(T next);
+}
View
29 ...o-webserver/src/main/java/org/mashupbots/socko/concurrentlinkedhashmap/NotThreadSafe.java
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2005 Brian Goetz and Tim Peierls
+ * Released under the Creative Commons Attribution License
+ * (http://creativecommons.org/licenses/by/2.5)
+ * Official home: http://www.jcip.net
+ *
+ * Any republication or derived work distributed in source code form
+ * must include this copyright and license notice.
+ */
+package org.mashupbots.socko.concurrentlinkedhashmap;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * The class to which this annotation is applied is not thread-safe.
+ * This annotation primarily exists for clarifying the non-thread-safety of a class
+ * that might otherwise be assumed to be thread-safe, despite the fact that it is a bad
+ * idea to assume a class is thread-safe without good reason.
+ * @see ThreadSafe
+ */
+@Documented
+@Target(ElementType.TYPE)
+@Retention(RetentionPolicy.RUNTIME)
+@interface NotThreadSafe {
+}
View
28 socko-webserver/src/main/java/org/mashupbots/socko/concurrentlinkedhashmap/ThreadSafe.java
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2005 Brian Goetz
+ * Released under the Creative Commons Attribution License
+ * (http://creativecommons.org/licenses/by/2.5)
+ * Official home: http://www.jcip.net
+ */
+package org.mashupbots.socko.concurrentlinkedhashmap;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * ThreadSafe
+ *
+ * The class to which this annotation is applied is thread-safe. This means that
+ * no sequences of accesses (reads and writes to public fields, calls to public
+ * methods) may put the object into an invalid state, regardless of the
+ * interleaving of those actions by the runtime, and without requiring any
+ * additional synchronization or coordination on the part of the caller.
+ */
+@Documented
+@Target(ElementType.TYPE)
+@Retention(RetentionPolicy.CLASS)
+@interface ThreadSafe {
+}
View
2 socko-webserver/src/main/java/org/mashupbots/socko/concurrentlinkedhashmap/VERSION.txt
@@ -0,0 +1,2 @@
+
+Revision r773.
View
37 socko-webserver/src/main/java/org/mashupbots/socko/concurrentlinkedhashmap/Weigher.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2010 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mashupbots.socko.concurrentlinkedhashmap;
+
+/**
+ * A class that can determine the weight of a value. The total weight threshold
+ * is used to determine when an eviction is required.
+ *
+ * @author ben.manes@gmail.com (Ben Manes)
+ * @see <a href="http://code.google.com/p/concurrentlinkedhashmap/">
+ * http://code.google.com/p/concurrentlinkedhashmap/</a>
+ */
+@ThreadSafe
+public interface Weigher<V> {
+
+ /**
+ * Measures an object's weight to determine how many units of capacity that
+ * the value consumes. A value must consume a minimum of one unit.
+ *
+ * @param value the object to weigh
+ * @return the object's weight
+ */
+ int weightOf(V value);
+}
View
282 socko-webserver/src/main/java/org/mashupbots/socko/concurrentlinkedhashmap/Weighers.java
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2010 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.mashupbots.socko.concurrentlinkedhashmap;
+
+import static org.mashupbots.socko.concurrentlinkedhashmap.ConcurrentLinkedHashMap.checkNotNull;
+
+import java.io.Serializable;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+/**
+ * A common set of {@link Weigher} and {@link EntryWeigher} implementations.