diff --git a/src/main/java/jsr166e/CompletableFuture.java b/src/main/java/jsr166e/CompletableFuture.java index d8a944f004020..2a9e0bb04920f 100644 --- a/src/main/java/jsr166e/CompletableFuture.java +++ b/src/main/java/jsr166e/CompletableFuture.java @@ -1392,6 +1392,7 @@ public CompletableFuture() { * * @param supplier a function returning the value to be used * to complete the returned CompletableFuture + * @param the function's return type * @return the new CompletableFuture */ public static CompletableFuture supplyAsync(Generator supplier) { @@ -1410,6 +1411,7 @@ public static CompletableFuture supplyAsync(Generator supplier) { * @param supplier a function returning the value to be used * to complete the returned CompletableFuture * @param executor the executor to use for asynchronous execution + * @param the function's return type * @return the new CompletableFuture */ public static CompletableFuture supplyAsync(Generator supplier, @@ -1462,6 +1464,7 @@ public static CompletableFuture runAsync(Runnable runnable, * the given value. * * @param value the value + * @param the type of the value * @return the completed CompletableFuture */ public static CompletableFuture completedFuture(U value) { @@ -2829,7 +2832,7 @@ else if (UNSAFE.compareAndSwapObject } if (dst == null) dst = new CompletableFuture(); - if (e == null || ex != null) + if (ex != null) dst.internalComplete(null, ex); } helpPostComplete(); diff --git a/src/main/java/jsr166e/ConcurrentHashMapV8.java b/src/main/java/jsr166e/ConcurrentHashMapV8.java index 5706cd04d769f..b4d041086d2f1 100644 --- a/src/main/java/jsr166e/ConcurrentHashMapV8.java +++ b/src/main/java/jsr166e/ConcurrentHashMapV8.java @@ -12,6 +12,7 @@ import java.io.Serializable; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; +import java.util.AbstractMap; import java.util.Arrays; import java.util.Collection; import java.util.Comparator; @@ -218,7 +219,7 @@ * @param the type of keys maintained by this map * @param the type of mapped values */ -public class ConcurrentHashMapV8 +public class ConcurrentHashMapV8 extends AbstractMap implements ConcurrentMap, Serializable { private static final long serialVersionUID = 7249069246763182397L; @@ -275,6 +276,7 @@ public interface LongByLongToLong { long apply(long a, long b); } /** Interface describing a function mapping two ints to an int */ public interface IntByIntToInt { int apply(int a, int b); } + /* * Overview: * @@ -380,14 +382,15 @@ public interface IntByIntToInt { int apply(int a, int b); } * The table is resized when occupancy exceeds a percentage * threshold (nominally, 0.75, but see below). Any thread * noticing an overfull bin may assist in resizing after the - * initiating thread allocates and sets up the replacement - * array. However, rather than stalling, these other threads may - * proceed with insertions etc. The use of TreeBins shields us - * from the worst case effects of overfilling while resizes are in + * initiating thread allocates and sets up the replacement array. + * However, rather than stalling, these other threads may proceed + * with insertions etc. The use of TreeBins shields us from the + * worst case effects of overfilling while resizes are in * progress. Resizing proceeds by transferring bins, one by one, - * from the table to the next table. To enable concurrency, the - * next table must be (incrementally) prefilled with place-holders - * serving as reverse forwarders to the old table. Because we are + * from the table to the next table. However, threads claim small + * blocks of indices to transfer (via field transferIndex) before + * doing so, reducing contention. A generation stamp in field + * sizeCtl ensures that resizings do not overlap. Because we are * using power-of-two expansion, the elements from each bin must * either stay at same index, or move with a power of two * offset. We eliminate unnecessary node creation by catching @@ -408,13 +411,19 @@ public interface IntByIntToInt { int apply(int a, int b); } * locks, average aggregate waits become shorter as resizing * progresses. The transfer operation must also ensure that all * accessible bins in both the old and new table are usable by any - * traversal. This is arranged by proceeding from the last bin - * (table.length - 1) up towards the first. Upon seeing a - * forwarding node, traversals (see class Traverser) arrange to - * move to the new table without revisiting nodes. However, to - * ensure that no intervening nodes are skipped, bin splitting can - * only begin after the associated reverse-forwarders are in - * place. + * traversal. This is arranged in part by proceeding from the + * last bin (table.length - 1) up towards the first. Upon seeing + * a forwarding node, traversals (see class Traverser) arrange to + * move to the new table without revisiting nodes. To ensure that + * no intervening nodes are skipped even when moved out of order, + * a stack (see class TableStack) is created on first encounter of + * a forwarding node during a traversal, to maintain its place if + * later processing the current table. The need for these + * save/restore mechanics is relatively rare, but when one + * forwarding node is encountered, typically many more will be. + * So Traversers use a simple caching scheme to avoid creating so + * many new TableStack nodes. (Thanks to Peter Levart for + * suggesting use of a stack here.) * * The traversal scheme also applies to partial traversals of * ranges of bins (via an alternate Traverser constructor) @@ -446,16 +455,18 @@ public interface IntByIntToInt { int apply(int a, int b); } * related operations (which is the main reason we cannot use * existing collections such as TreeMaps). TreeBins contain * Comparable elements, but may contain others, as well as - * elements that are Comparable but not necessarily Comparable - * for the same T, so we cannot invoke compareTo among them. To - * handle this, the tree is ordered primarily by hash value, then - * by Comparable.compareTo order if applicable. On lookup at a - * node, if elements are not comparable or compare as 0 then both - * left and right children may need to be searched in the case of - * tied hash values. (This corresponds to the full list search - * that would be necessary if all elements were non-Comparable and - * had tied hashes.) The red-black balancing code is updated from - * pre-jdk-collections + * elements that are Comparable but not necessarily Comparable for + * the same T, so we cannot invoke compareTo among them. To handle + * this, the tree is ordered primarily by hash value, then by + * Comparable.compareTo order if applicable. On lookup at a node, + * if elements are not comparable or compare as 0 then both left + * and right children may need to be searched in the case of tied + * hash values. (This corresponds to the full list search that + * would be necessary if all elements were non-Comparable and had + * tied hashes.) On insertion, to keep a total ordering (or as + * close as is required here) across rebalancings, we compare + * classes and identityHashCodes as tie-breakers. The red-black + * balancing code is updated from pre-jdk-collections * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) * based in turn on Cormen, Leiserson, and Rivest "Introduction to * Algorithms" (CLR). @@ -485,6 +496,10 @@ public interface IntByIntToInt { int apply(int a, int b); } * unused "Segment" class that is instantiated in minimal form * only when serializing. * + * Also, solely for compatibility with previous versions of this + * class, it extends AbstractMap, even though all of its methods + * are overridden, so it is just useless baggage. + * * This file is organized to make things a little easier to follow * while reading than they might otherwise: First the main static * declarations and utilities, then fields, then main public @@ -565,6 +580,23 @@ public interface IntByIntToInt { int apply(int a, int b); } */ private static final int MIN_TRANSFER_STRIDE = 16; + /** + * The number of bits used for generation stamp in sizeCtl. + * Must be at least 6 for 32bit arrays. + */ + private static int RESIZE_STAMP_BITS = 16; + + /** + * The maximum number of threads that can help resize. + * Must fit in 32 - RESIZE_STAMP_BITS bits. + */ + private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1; + + /** + * The bit shift for recording size stamp in sizeCtl. + */ + private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS; + /* * Encodings for Node hash fields. See above for explanation. */ @@ -722,7 +754,7 @@ static int compareComparables(Class kc, Object k, Object x) { * errors by users, these checks must operate on local variables, * which accounts for some odd-looking inline assignments below. * Note that calls to setTabAt always occur within locked regions, - * and so in principle require only release ordering, not need + * and so in principle require only release ordering, not * full volatile semantics, but are currently coded as volatile * writes to be conservative. */ @@ -776,11 +808,6 @@ static final void setTabAt(Node[] tab, int i, Node v) { */ private transient volatile int transferIndex; - /** - * The least available table index to split while resizing. - */ - private transient volatile int transferOrigin; - /** * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. */ @@ -1359,6 +1386,7 @@ static class Segment extends ReentrantLock implements Serializable { * Saves the state of the {@code ConcurrentHashMapV8} instance to a * stream (i.e., serializes it). * @param s the stream + * @throws java.io.IOException if an I/O error occurs * @serialData * the key (Object) and value (Object) * for each key-value mapping, followed by a null pair. @@ -1401,6 +1429,9 @@ private void writeObject(java.io.ObjectOutputStream s) /** * Reconstitutes the instance from a stream (that is, deserializes it). * @param s the stream + * @throws ClassNotFoundException if the class of a serialized object + * could not be found + * @throws java.io.IOException if an I/O error occurs */ private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { @@ -1435,8 +1466,8 @@ private void readObject(java.io.ObjectInputStream s) int sz = (int)size; n = tableSizeFor(sz + (sz >>> 1) + 1); } - @SuppressWarnings({"rawtypes","unchecked"}) - Node[] tab = (Node[])new Node[n]; + @SuppressWarnings("unchecked") + Node[] tab = (Node[])new Node[n]; int mask = n - 1; long added = 0L; while (p != null) { @@ -2101,9 +2132,9 @@ public static KeySetView newKeySet() { * * @param initialCapacity The implementation performs internal * sizing to accommodate this many elements. + * @return the new set * @throws IllegalArgumentException if the initial capacity of * elements is negative - * @return the new set * @since 1.8 */ public static KeySetView newKeySet(int initialCapacity) { @@ -2182,6 +2213,14 @@ Node find(int h, Object k) { /* ---------------- Table Initialization and Resizing -------------- */ + /** + * Returns the stamp bits for resizing a table of size n. + * Must be negative when shifted left by RESIZE_STAMP_SHIFT. + */ + static final int resizeStamp(int n) { + return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1)); + } + /** * Initializes table, using the size recorded in sizeCtl. */ @@ -2194,8 +2233,8 @@ else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { try { if ((tab = table) == null || tab.length == 0) { int n = (sc > 0) ? sc : DEFAULT_CAPACITY; - @SuppressWarnings({"rawtypes","unchecked"}) - Node[] nt = (Node[])new Node[n]; + @SuppressWarnings("unchecked") + Node[] nt = (Node[])new Node[n]; table = tab = nt; sc = n - (n >>> 2); } @@ -2237,17 +2276,20 @@ private final void addCount(long x, int check) { s = sumCount(); } if (check >= 0) { - Node[] tab, nt; int sc; + Node[] tab, nt; int n, sc; while (s >= (long)(sc = sizeCtl) && (tab = table) != null && - tab.length < MAXIMUM_CAPACITY) { + (n = tab.length) < MAXIMUM_CAPACITY) { + int rs = resizeStamp(n); if (sc < 0) { - if (sc == -1 || transferIndex <= transferOrigin || - (nt = nextTable) == null) + if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || + sc == rs + MAX_RESIZERS || (nt = nextTable) == null || + transferIndex <= 0) break; - if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) + if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) transfer(tab, nt); } - else if (U.compareAndSwapInt(this, SIZECTL, sc, -2)) + else if (U.compareAndSwapInt(this, SIZECTL, sc, + (rs << RESIZE_STAMP_SHIFT) + 2)) transfer(tab, null); s = sumCount(); } @@ -2259,12 +2301,19 @@ else if (U.compareAndSwapInt(this, SIZECTL, sc, -2)) */ final Node[] helpTransfer(Node[] tab, Node f) { Node[] nextTab; int sc; - if ((f instanceof ForwardingNode) && + if (tab != null && (f instanceof ForwardingNode) && (nextTab = ((ForwardingNode)f).nextTable) != null) { - if (nextTab == nextTable && tab == table && - transferIndex > transferOrigin && (sc = sizeCtl) < -1 && - U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) - transfer(tab, nextTab); + int rs = resizeStamp(tab.length); + while (nextTab == nextTable && table == tab && + (sc = sizeCtl) < 0) { + if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || + sc == rs + MAX_RESIZERS || transferIndex <= 0) + break; + if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) { + transfer(tab, nextTab); + break; + } + } return nextTab; } return table; @@ -2286,8 +2335,8 @@ private final void tryPresize(int size) { if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { try { if (table == tab) { - @SuppressWarnings({"rawtypes","unchecked"}) - Node[] nt = (Node[])new Node[n]; + @SuppressWarnings("unchecked") + Node[] nt = (Node[])new Node[n]; table = nt; sc = n - (n >>> 2); } @@ -2298,9 +2347,21 @@ private final void tryPresize(int size) { } else if (c <= sc || n >= MAXIMUM_CAPACITY) break; - else if (tab == table && - U.compareAndSwapInt(this, SIZECTL, sc, -2)) - transfer(tab, null); + else if (tab == table) { + int rs = resizeStamp(n); + if (sc < 0) { + Node[] nt; + if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || + sc == rs + MAX_RESIZERS || (nt = nextTable) == null || + transferIndex <= 0) + break; + if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) + transfer(tab, nt); + } + else if (U.compareAndSwapInt(this, SIZECTL, sc, + (rs << RESIZE_STAMP_SHIFT) + 2)) + transfer(tab, null); + } } } @@ -2314,36 +2375,27 @@ private final void transfer(Node[] tab, Node[] nextTab) { stride = MIN_TRANSFER_STRIDE; // subdivide range if (nextTab == null) { // initiating try { - @SuppressWarnings({"rawtypes","unchecked"}) - Node[] nt = (Node[])new Node[n << 1]; + @SuppressWarnings("unchecked") + Node[] nt = (Node[])new Node[n << 1]; nextTab = nt; } catch (Throwable ex) { // try to cope with OOME sizeCtl = Integer.MAX_VALUE; return; } nextTable = nextTab; - transferOrigin = n; transferIndex = n; - ForwardingNode rev = new ForwardingNode(tab); - for (int k = n; k > 0;) { // progressively reveal ready slots - int nextk = (k > stride) ? k - stride : 0; - for (int m = nextk; m < k; ++m) - nextTab[m] = rev; - for (int m = n + nextk; m < n + k; ++m) - nextTab[m] = rev; - U.putOrderedInt(this, TRANSFERORIGIN, k = nextk); - } } int nextn = nextTab.length; ForwardingNode fwd = new ForwardingNode(nextTab); boolean advance = true; boolean finishing = false; // to ensure sweep before committing nextTab for (int i = 0, bound = 0;;) { - int nextIndex, nextBound, fh; Node f; + Node f; int fh; while (advance) { + int nextIndex, nextBound; if (--i >= bound || finishing) advance = false; - else if ((nextIndex = transferIndex) <= transferOrigin) { + else if ((nextIndex = transferIndex) <= 0) { i = -1; advance = false; } @@ -2357,29 +2409,22 @@ else if (U.compareAndSwapInt } } if (i < 0 || i >= n || i + n >= nextn) { + int sc; if (finishing) { nextTable = null; table = nextTab; sizeCtl = (n << 1) - (n >>> 1); return; } - for (int sc;;) { - if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) { - if (sc != -1) - return; - finishing = advance = true; - i = n; // recheck before commit - break; - } - } - } - else if ((f = tabAt(tab, i)) == null) { - if (casTabAt(tab, i, null, fwd)) { - setTabAt(nextTab, i, null); - setTabAt(nextTab, i + n, null); - advance = true; + if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) { + if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) + return; + finishing = advance = true; + i = n; // recheck before commit } } + else if ((f = tabAt(tab, i)) == null) + advance = casTabAt(tab, i, null, fwd); else if ((fh = f.hash) == MOVED) advance = true; // already processed else { @@ -2466,11 +2511,8 @@ else if (f instanceof TreeBin) { private final void treeifyBin(Node[] tab, int index) { Node b; int n, sc; if (tab != null) { - if ((n = tab.length) < MIN_TREEIFY_CAPACITY) { - if (tab == table && (sc = sizeCtl) >= 0 && - U.compareAndSwapInt(this, SIZECTL, sc, -2)) - transfer(tab, null); - } + if ((n = tab.length) < MIN_TREEIFY_CAPACITY) + tryPresize(n << 1); else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { synchronized (b) { if (tabAt(tab, index) == b) { @@ -2546,19 +2588,18 @@ else if (ph < h) p = pr; else if ((pk = p.key) == k || (pk != null && k.equals(pk))) return p; - else if (pl == null && pr == null) - break; + else if (pl == null) + p = pr; + else if (pr == null) + p = pl; else if ((kc != null || (kc = comparableClassFor(k)) != null) && (dir = compareComparables(kc, k, pk)) != 0) p = (dir < 0) ? pl : pr; - else if (pl == null) - p = pr; - else if (pr == null || - (q = pr.findTreeNode(h, k, kc)) == null) - p = pl; - else + else if ((q = pr.findTreeNode(h, k, kc)) != null) return q; + else + p = pl; } while (p != null); } return null; @@ -2584,6 +2625,23 @@ static final class TreeBin extends Node { static final int WAITER = 2; // set when waiting for write lock static final int READER = 4; // increment value for setting read lock + /** + * Tie-breaking utility for ordering insertions when equal + * hashCodes and non-comparable. We don't require a total + * order, just a consistent insertion rule to maintain + * equivalence across rebalancings. Tie-breaking further than + * necessary simplifies testing a bit. + */ + static int tieBreakOrder(Object a, Object b) { + int d; + if (a == null || b == null || + (d = a.getClass().getName(). + compareTo(b.getClass().getName())) == 0) + d = (System.identityHashCode(a) <= System.identityHashCode(b) ? + -1 : 1); + return d; + } + /** * Creates bin with initial set of nodes headed by b. */ @@ -2600,21 +2658,21 @@ static final class TreeBin extends Node { r = x; } else { - Object key = x.key; - int hash = x.hash; + K k = x.key; + int h = x.hash; Class kc = null; for (TreeNode p = r;;) { int dir, ph; - if ((ph = p.hash) > hash) + K pk = p.key; + if ((ph = p.hash) > h) dir = -1; - else if (ph < hash) + else if (ph < h) dir = 1; - else if ((kc != null || - (kc = comparableClassFor(key)) != null)) - dir = compareComparables(kc, key, p.key); - else - dir = 0; - TreeNode xp = p; + else if ((kc == null && + (kc = comparableClassFor(k)) == null) || + (dir = compareComparables(kc, k, pk)) == 0) + dir = tieBreakOrder(k, pk); + TreeNode xp = p; if ((p = (dir <= 0) ? p.left : p.right) == null) { x.parent = xp; if (dir <= 0) @@ -2628,6 +2686,7 @@ else if ((kc != null || } } this.root = r; + assert checkInvariants(root); } /** @@ -2651,14 +2710,14 @@ private final void unlockRoot() { private final void contendedLock() { boolean waiting = false; for (int s;;) { - if (((s = lockState) & WRITER) == 0) { + if (((s = lockState) & ~WAITER) == 0) { if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) { if (waiting) waiter = null; return; } } - else if ((s | WAITER) == 0) { + else if ((s & WAITER) == 0) { if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) { waiting = true; waiter = Thread.currentThread(); @@ -2676,12 +2735,13 @@ else if (waiting) */ final Node find(int h, Object k) { if (k != null) { - for (Node e = first; e != null; e = e.next) { + for (Node e = first; e != null; ) { int s; K ek; if (((s = lockState) & (WAITER|WRITER)) != 0) { if (e.hash == h && ((ek = e.key) == k || (ek != null && k.equals(ek)))) return e; + e = e.next; } else if (U.compareAndSwapInt(this, LOCKSTATE, s, s + READER)) { @@ -2711,8 +2771,9 @@ else if (U.compareAndSwapInt(this, LOCKSTATE, s, */ final TreeNode putTreeVal(int h, K k, V v) { Class kc = null; + boolean searched = false; for (TreeNode p = root;;) { - int dir, ph; K pk; TreeNode q, pr; + int dir, ph; K pk; if (p == null) { first = root = new TreeNode(h, k, v, null, null); break; @@ -2726,21 +2787,25 @@ else if ((pk = p.key) == k || (pk != null && k.equals(pk))) else if ((kc == null && (kc = comparableClassFor(k)) == null) || (dir = compareComparables(kc, k, pk)) == 0) { - if (p.left == null) - dir = 1; - else if ((pr = p.right) == null || - (q = pr.findTreeNode(h, k, kc)) == null) - dir = -1; - else - return q; + if (!searched) { + TreeNode q, ch; + searched = true; + if (((ch = p.left) != null && + (q = ch.findTreeNode(h, k, kc)) != null) || + ((ch = p.right) != null && + (q = ch.findTreeNode(h, k, kc)) != null)) + return q; + } + dir = tieBreakOrder(k, pk); } + TreeNode xp = p; - if ((p = (dir < 0) ? p.left : p.right) == null) { + if ((p = (dir <= 0) ? p.left : p.right) == null) { TreeNode x, f = first; first = x = new TreeNode(h, k, v, f, xp); if (f != null) f.prev = x; - if (dir < 0) + if (dir <= 0) xp.left = x; else xp.right = x; @@ -3094,6 +3159,18 @@ static boolean checkInvariants(TreeNode t) { /* ----------------Table Traversal -------------- */ + /** + * Records the table, its length, and current traversal index for a + * traverser that must process a region of a forwarded table before + * proceeding with current table. + */ + static final class TableStack { + int length; + int index; + Node[] tab; + TableStack next; + } + /** * Encapsulates traversal for methods such as containsValue; also * serves as a base class for other iterators and spliterators. @@ -3118,6 +3195,7 @@ static boolean checkInvariants(TreeNode t) { static class Traverser { Node[] tab; // current table; updated if resized Node next; // the next entry to use + TableStack stack, spare; // to save/restore on ForwardingNodes int index; // index of bin to use next int baseIndex; // current index of initial table int baseLimit; // index bound for initial table @@ -3139,16 +3217,17 @@ final Node advance() { if ((e = next) != null) e = e.next; for (;;) { - Node[] t; int i, n; K ek; // must use locals in checks + Node[] t; int i, n; // must use locals in checks if (e != null) return next = e; if (baseIndex >= baseLimit || (t = tab) == null || (n = t.length) <= (i = index) || i < 0) return next = null; - if ((e = tabAt(t, index)) != null && e.hash < 0) { + if ((e = tabAt(t, i)) != null && e.hash < 0) { if (e instanceof ForwardingNode) { tab = ((ForwardingNode)e).nextTable; e = null; + pushState(t, i, n); continue; } else if (e instanceof TreeBin) @@ -3156,9 +3235,48 @@ else if (e instanceof TreeBin) else e = null; } - if ((index += baseSize) >= n) - index = ++baseIndex; // visit upper slots if present + if (stack != null) + recoverState(n); + else if ((index = i + baseSize) >= n) + index = ++baseIndex; // visit upper slots if present + } + } + + /** + * Saves traversal state upon encountering a forwarding node. + */ + private void pushState(Node[] t, int i, int n) { + TableStack s = spare; // reuse if possible + if (s != null) + spare = s.next; + else + s = new TableStack(); + s.tab = t; + s.length = n; + s.index = i; + s.next = stack; + stack = s; + } + + /** + * Possibly pops traversal state. + * + * @param n length of current table + */ + private void recoverState(int n) { + TableStack s; int len; + while ((s = stack) != null && (index += (len = s.length)) >= n) { + n = len; + index = s.index; + tab = s.tab; + s.tab = null; + TableStack next = s.next; + s.next = spare; // save for reuse + stack = next; + spare = s; } + if (s == null && (index += baseSize) >= n) + index = ++baseIndex; } } @@ -6126,7 +6244,6 @@ else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) private static final sun.misc.Unsafe U; private static final long SIZECTL; private static final long TRANSFERINDEX; - private static final long TRANSFERORIGIN; private static final long BASECOUNT; private static final long CELLSBUSY; private static final long CELLVALUE; @@ -6141,8 +6258,6 @@ else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) (k.getDeclaredField("sizeCtl")); TRANSFERINDEX = U.objectFieldOffset (k.getDeclaredField("transferIndex")); - TRANSFERORIGIN = U.objectFieldOffset - (k.getDeclaredField("transferOrigin")); BASECOUNT = U.objectFieldOffset (k.getDeclaredField("baseCount")); CELLSBUSY = U.objectFieldOffset diff --git a/src/main/java/jsr166e/CountedCompleter.java b/src/main/java/jsr166e/CountedCompleter.java index d9d1d48132e96..c7717ee9032e8 100644 --- a/src/main/java/jsr166e/CountedCompleter.java +++ b/src/main/java/jsr166e/CountedCompleter.java @@ -686,7 +686,7 @@ protected final boolean exec() { } /** - * Returns the result of the computation. By default + * Returns the result of the computation. By default, * returns {@code null}, which is appropriate for {@code Void} * actions, but in other cases should be overridden, almost * always to return a field or function of a field that diff --git a/src/main/java/jsr166e/ForkJoinPool.java b/src/main/java/jsr166e/ForkJoinPool.java index e814f23d52961..8a0497371c2ff 100644 --- a/src/main/java/jsr166e/ForkJoinPool.java +++ b/src/main/java/jsr166e/ForkJoinPool.java @@ -532,8 +532,8 @@ public static interface ForkJoinWorkerThreadFactory { * Returns a new worker thread operating in the given pool. * * @param pool the pool this thread works in - * @throws NullPointerException if the pool is null * @return the new worker thread + * @throws NullPointerException if the pool is null */ public ForkJoinWorkerThread newThread(ForkJoinPool pool); } @@ -2098,7 +2098,7 @@ final void helpQuiescePool(WorkQueue w) { w.currentSteal = ps; } } - else if (active) { // decrement active count without queuing + else if (active) { // decrement active count without queuing long nc = ((c = ctl) & ~AC_MASK) | ((c & AC_MASK) - AC_UNIT); if ((int)(nc >> AC_SHIFT) + parallelism == 0) break; // bypass decrement-then-increment @@ -2497,6 +2497,7 @@ public static ForkJoinPool commonPool() { * minimally only the latter. * * @param task the task + * @param the type of the task's result * @return the task's result * @throws NullPointerException if the task is null * @throws RejectedExecutionException if the task cannot be @@ -2545,6 +2546,7 @@ public void execute(Runnable task) { * Submits a ForkJoinTask for execution. * * @param task the task to submit + * @param the type of the task's result * @return the task * @throws NullPointerException if the task is null * @throws RejectedExecutionException if the task cannot be diff --git a/src/main/java/jsr166e/ForkJoinTask.java b/src/main/java/jsr166e/ForkJoinTask.java index e23dfbe3f796a..99ac374ec79b5 100644 --- a/src/main/java/jsr166e/ForkJoinTask.java +++ b/src/main/java/jsr166e/ForkJoinTask.java @@ -134,7 +134,7 @@ * (DAG). Otherwise, executions may encounter a form of deadlock as * tasks cyclically wait for each other. However, this framework * supports other methods and techniques (for example the use of - * {@link Phaser}, {@link #helpQuiesce}, and {@link #complete}) that + * {@link java.util.concurrent.Phaser Phaser}, {@link #helpQuiesce}, and {@link #complete}) that * may be of use in constructing custom subclasses for problems that * are not statically structured as DAGs. To support such usages, a * ForkJoinTask may be atomically tagged with a {@code short} @@ -411,11 +411,13 @@ static final class ExceptionNode extends WeakReference> { final Throwable ex; ExceptionNode next; final long thrower; // use id not ref to avoid weak cycles + final int hashCode; // store task hashCode before weak ref disappears ExceptionNode(ForkJoinTask task, Throwable ex, ExceptionNode next) { super(task, exceptionTableRefQueue); this.ex = ex; this.next = next; this.thrower = Thread.currentThread().getId(); + this.hashCode = System.identityHashCode(task); } } @@ -571,15 +573,18 @@ else if (ps.length == 1 && ps[0] == Throwable.class) return ex; } + /** + * Poll stale refs and remove them. Call only while holding lock. + */ /** * Poll stale refs and remove them. Call only while holding lock. */ private static void expungeStaleExceptions() { for (Object x; (x = exceptionTableRefQueue.poll()) != null;) { if (x instanceof ExceptionNode) { - ForkJoinTask key = ((ExceptionNode)x).get(); + int hashCode = ((ExceptionNode)x).hashCode; ExceptionNode[] t = exceptionTable; - int i = System.identityHashCode(key) & (t.length - 1); + int i = hashCode & (t.length - 1); ExceptionNode e = t[i]; ExceptionNode pred = null; while (e != null) { @@ -782,6 +787,7 @@ else if (t.doJoin() < NORMAL) * unprocessed. * * @param tasks the collection of tasks + * @param the type of the values returned from the tasks * @return the tasks argument, to simplify usage * @throws NullPointerException if tasks or any element are null */ @@ -1444,6 +1450,7 @@ public static ForkJoinTask adapt(Runnable runnable) { * * @param runnable the runnable action * @param result the result upon completion + * @param the type of the result * @return the task */ public static ForkJoinTask adapt(Runnable runnable, T result) { @@ -1457,6 +1464,7 @@ public static ForkJoinTask adapt(Runnable runnable, T result) { * encountered into {@code RuntimeException}. * * @param callable the callable action + * @param the type of the callable's result * @return the task */ public static ForkJoinTask adapt(Callable callable) { @@ -1470,6 +1478,8 @@ public static ForkJoinTask adapt(Callable callable) { /** * Saves this task to a stream (that is, serializes it). * + * @param s the stream + * @throws java.io.IOException if an I/O error occurs * @serialData the current run status and the exception thrown * during execution, or {@code null} if none */ @@ -1481,6 +1491,10 @@ private void writeObject(java.io.ObjectOutputStream s) /** * Reconstitutes this task from a stream (that is, deserializes it). + * @param s the stream + * @throws ClassNotFoundException if the class of a serialized object + * could not be found + * @throws java.io.IOException if an I/O error occurs */ private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { diff --git a/src/main/java/jsr166e/RecursiveTask.java b/src/main/java/jsr166e/RecursiveTask.java index a31bd0cfff81c..263364af1a255 100644 --- a/src/main/java/jsr166e/RecursiveTask.java +++ b/src/main/java/jsr166e/RecursiveTask.java @@ -15,7 +15,7 @@ * class Fibonacci extends RecursiveTask { * final int n; * Fibonacci(int n) { this.n = n; } - * Integer compute() { + * protected Integer compute() { * if (n <= 1) * return n; * Fibonacci f1 = new Fibonacci(n - 1); @@ -46,6 +46,7 @@ public abstract class RecursiveTask extends ForkJoinTask { /** * The main computation performed by this task. + * @return the result of the computation */ protected abstract V compute(); diff --git a/src/main/java/jsr166e/StampedLock.java b/src/main/java/jsr166e/StampedLock.java index d8fe6f8af795e..45d873a1c0a01 100644 --- a/src/main/java/jsr166e/StampedLock.java +++ b/src/main/java/jsr166e/StampedLock.java @@ -198,7 +198,11 @@ public class StampedLock implements java.io.Serializable { * incoming reader arrives while read lock is held but there is a * queued writer, this incoming reader is queued. (This rule is * responsible for some of the complexity of method acquireRead, - * but without it, the lock becomes highly unfair.) + * but without it, the lock becomes highly unfair.) Method release + * does not (and sometimes cannot) itself wake up cowaiters. This + * is done by the primary thread, but helped by any other threads + * with nothing better to do in methods acquireRead and + * acquireWrite. * * These rules apply to threads actually queued. All tryLock forms * opportunistically try to acquire locks regardless of preference @@ -242,11 +246,14 @@ public class StampedLock implements java.io.Serializable { /** Number of processors, for spin control */ private static final int NCPU = Runtime.getRuntime().availableProcessors(); - /** Maximum number of retries before blocking on acquisition */ + /** Maximum number of retries before enqueuing on acquisition */ private static final int SPINS = (NCPU > 1) ? 1 << 6 : 0; + /** Maximum number of retries before blocking at head on acquisition */ + private static final int HEAD_SPINS = (NCPU > 1) ? 1 << 10 : 0; + /** Maximum number of retries before re-blocking */ - private static final int MAX_HEAD_SPINS = (NCPU > 1) ? 1 << 12 : 0; + private static final int MAX_HEAD_SPINS = (NCPU > 1) ? 1 << 16 : 0; /** The period for yielding when waiting for overflow spinlock */ private static final int OVERFLOW_YIELD_RATE = 7; // must be power 2 - 1 @@ -390,8 +397,8 @@ public long writeLockInterruptibly() throws InterruptedException { * @return a stamp that can be used to unlock or convert mode */ public long readLock() { - long s, next; // bypass acquireRead on fully unlocked case only - return ((((s = state) & ABITS) == 0L && + long s = state, next; // bypass acquireRead on common uncontended case + return ((whead == wtail && (s & ABITS) < RFULL && U.compareAndSwapLong(this, STATE, s, next = s + RUNIT)) ? next : acquireRead(false, 0L)); } @@ -668,7 +675,7 @@ public long tryConvertToOptimisticRead(long stamp) { long a = stamp & ABITS, m, s, next; WNode h; for (;;) { s = U.getLongVolatile(this, STATE); // see above - if ((s & SBITS) != (stamp & SBITS)) + if (((s = state) & SBITS) != (stamp & SBITS)) break; if ((m = s & ABITS) == 0L) { if (a != 0L) @@ -987,17 +994,8 @@ private void release(WNode h) { if (t.status <= 0) q = t; } - if (q != null) { - for (WNode r = q;;) { // release co-waiters too - if ((w = r.thread) != null) { - r.thread = null; - U.unpark(w); - } - if ((r = q.cowait) == null) - break; - U.compareAndSwapObject(q, WCOWAIT, r, r.cowait); - } - } + if (q != null && (w = q.thread) != null) + U.unpark(w); } } @@ -1013,22 +1011,22 @@ private void release(WNode h) { private long acquireWrite(boolean interruptible, long deadline) { WNode node = null, p; for (int spins = -1;;) { // spin while enqueuing - long s, ns; - if (((s = state) & ABITS) == 0L) { + long m, s, ns; + if ((m = (s = state) & ABITS) == 0L) { if (U.compareAndSwapLong(this, STATE, s, ns = s + WBIT)) return ns; } + else if (spins < 0) + spins = (m == WBIT && wtail == whead) ? SPINS : 0; else if (spins > 0) { if (ThreadLocalRandom.current().nextInt() >= 0) --spins; } else if ((p = wtail) == null) { // initialize queue - WNode h = new WNode(WMODE, null); - if (U.compareAndSwapObject(this, WHEAD, null, h)) - wtail = h; + WNode hd = new WNode(WMODE, null); + if (U.compareAndSwapObject(this, WHEAD, null, hd)) + wtail = hd; } - else if (spins < 0) - spins = (p == whead) ? SPINS : 0; else if (node == null) node = new WNode(WMODE, p); else if (node.prev != p) @@ -1039,14 +1037,18 @@ else if (U.compareAndSwapObject(this, WTAIL, p, node)) { } } - for (int spins = SPINS;;) { - WNode np, pp; int ps; long s, ns; Thread w; - while ((np = node.prev) != p && np != null) - (p = np).next = node; // stale - if (whead == p) { + for (int spins = -1;;) { + WNode h, np, pp; int ps; + if ((h = whead) == p) { + if (spins < 0) + spins = HEAD_SPINS; + else if (spins < MAX_HEAD_SPINS) + spins <<= 1; for (int k = spins;;) { // spin at head + long s, ns; if (((s = state) & ABITS) == 0L) { - if (U.compareAndSwapLong(this, STATE, s, ns = s+WBIT)) { + if (U.compareAndSwapLong(this, STATE, s, + ns = s + WBIT)) { whead = node; node.prev = null; return ns; @@ -1056,33 +1058,45 @@ else if (ThreadLocalRandom.current().nextInt() >= 0 && --k <= 0) break; } - if (spins < MAX_HEAD_SPINS) - spins <<= 1; } - if ((ps = p.status) == 0) - U.compareAndSwapInt(p, WSTATUS, 0, WAITING); - else if (ps == CANCELLED) { - if ((pp = p.prev) != null) { - node.prev = pp; - pp.next = node; + else if (h != null) { // help release stale waiters + WNode c; Thread w; + while ((c = h.cowait) != null) { + if (U.compareAndSwapObject(h, WCOWAIT, c, c.cowait) && + (w = c.thread) != null) + U.unpark(w); } } - else { - long time; // 0 argument to park means no timeout - if (deadline == 0L) - time = 0L; - else if ((time = deadline - System.nanoTime()) <= 0L) - return cancelWaiter(node, node, false); - Thread wt = Thread.currentThread(); - U.putObject(wt, PARKBLOCKER, this); // emulate LockSupport.park - node.thread = wt; - if (node.prev == p && p.status == WAITING && // recheck - (p != whead || (state & ABITS) != 0L)) - U.park(false, time); - node.thread = null; - U.putObject(wt, PARKBLOCKER, null); - if (interruptible && Thread.interrupted()) - return cancelWaiter(node, node, true); + if (whead == h) { + if ((np = node.prev) != p) { + if (np != null) + (p = np).next = node; // stale + } + else if ((ps = p.status) == 0) + U.compareAndSwapInt(p, WSTATUS, 0, WAITING); + else if (ps == CANCELLED) { + if ((pp = p.prev) != null) { + node.prev = pp; + pp.next = node; + } + } + else { + long time; // 0 argument to park means no timeout + if (deadline == 0L) + time = 0L; + else if ((time = deadline - System.nanoTime()) <= 0L) + return cancelWaiter(node, node, false); + Thread wt = Thread.currentThread(); + U.putObject(wt, PARKBLOCKER, this); + node.thread = wt; + if (p.status < 0 && (p != h || (state & ABITS) != 0L) && + whead == h && node.prev == p) + U.park(false, time); // emulate LockSupport.park + node.thread = null; + U.putObject(wt, PARKBLOCKER, null); + if (interruptible && Thread.interrupted()) + return cancelWaiter(node, node, true); + } } } } @@ -1097,138 +1111,159 @@ else if ((time = deadline - System.nanoTime()) <= 0L) * @return next state, or INTERRUPTED */ private long acquireRead(boolean interruptible, long deadline) { - WNode node = null, group = null, p; + WNode node = null, p; for (int spins = -1;;) { - for (;;) { - long s, m, ns; WNode h, q; Thread w; // anti-barging guard - if (group == null && (h = whead) != null && - (q = h.next) != null && q.mode != RMODE) - break; - if ((m = (s = state) & ABITS) < RFULL ? - U.compareAndSwapLong(this, STATE, s, ns = s + RUNIT) : - (m < WBIT && (ns = tryIncReaderOverflow(s)) != 0L)) { - if (group != null) { // help release others - for (WNode r = group;;) { - if ((w = r.thread) != null) { - r.thread = null; - U.unpark(w); + WNode h; + if ((h = whead) == (p = wtail)) { + for (long m, s, ns;;) { + if ((m = (s = state) & ABITS) < RFULL ? + U.compareAndSwapLong(this, STATE, s, ns = s + RUNIT) : + (m < WBIT && (ns = tryIncReaderOverflow(s)) != 0L)) + return ns; + else if (m >= WBIT) { + if (spins > 0) { + if (ThreadLocalRandom.current().nextInt() >= 0) + --spins; + } + else { + if (spins == 0) { + WNode nh = whead, np = wtail; + if ((nh == h && np == p) || (h = nh) != (p = np)) + break; } - if ((r = group.cowait) == null) - break; - U.compareAndSwapObject(group, WCOWAIT, r, r.cowait); + spins = SPINS; } } - return ns; } - if (m >= WBIT) - break; - } - if (spins > 0) { - if (ThreadLocalRandom.current().nextInt() >= 0) - --spins; } - else if ((p = wtail) == null) { - WNode h = new WNode(WMODE, null); - if (U.compareAndSwapObject(this, WHEAD, null, h)) - wtail = h; + if (p == null) { // initialize queue + WNode hd = new WNode(WMODE, null); + if (U.compareAndSwapObject(this, WHEAD, null, hd)) + wtail = hd; } - else if (spins < 0) - spins = (p == whead) ? SPINS : 0; else if (node == null) - node = new WNode(WMODE, p); - else if (node.prev != p) - node.prev = p; - else if (p.mode == RMODE && p != whead) { - WNode pp = p.prev; // become co-waiter with group p - if (pp != null && p == wtail && - U.compareAndSwapObject(p, WCOWAIT, - node.cowait = p.cowait, node)) { - node.thread = Thread.currentThread(); - for (long time;;) { + node = new WNode(RMODE, p); + else if (h == p || p.mode != RMODE) { + if (node.prev != p) + node.prev = p; + else if (U.compareAndSwapObject(this, WTAIL, p, node)) { + p.next = node; + break; + } + } + else if (!U.compareAndSwapObject(p, WCOWAIT, + node.cowait = p.cowait, node)) + node.cowait = null; + else { + for (;;) { + WNode pp, c; Thread w; + if ((h = whead) != null && (c = h.cowait) != null && + U.compareAndSwapObject(h, WCOWAIT, c, c.cowait) && + (w = c.thread) != null) // help release + U.unpark(w); + if (h == (pp = p.prev) || h == p || pp == null) { + long m, s, ns; + do { + if ((m = (s = state) & ABITS) < RFULL ? + U.compareAndSwapLong(this, STATE, s, + ns = s + RUNIT) : + (m < WBIT && + (ns = tryIncReaderOverflow(s)) != 0L)) + return ns; + } while (m < WBIT); + } + if (whead == h && p.prev == pp) { + long time; + if (pp == null || h == p || p.status > 0) { + node = null; // throw away + break; + } if (deadline == 0L) time = 0L; else if ((time = deadline - System.nanoTime()) <= 0L) return cancelWaiter(node, p, false); - if (node.thread == null) - break; - if (p.prev != pp || p.status == CANCELLED || - p == whead || p.prev != pp) { - node.thread = null; - break; - } Thread wt = Thread.currentThread(); U.putObject(wt, PARKBLOCKER, this); - if (node.thread == null) // must recheck - break; - U.park(false, time); + node.thread = wt; + if ((h != pp || (state & ABITS) == WBIT) && + whead == h && p.prev == pp) + U.park(false, time); + node.thread = null; U.putObject(wt, PARKBLOCKER, null); if (interruptible && Thread.interrupted()) return cancelWaiter(node, p, true); } - group = p; } - node = null; // throw away - } - else if (U.compareAndSwapObject(this, WTAIL, p, node)) { - p.next = node; - break; } } - for (int spins = SPINS;;) { - WNode np, pp, r; int ps; long m, s, ns; Thread w; - while ((np = node.prev) != p && np != null) - (p = np).next = node; - if (whead == p) { - for (int k = spins;;) { - if ((m = (s = state) & ABITS) != WBIT) { - if (m < RFULL ? - U.compareAndSwapLong(this, STATE, s, ns = s + RUNIT): - (ns = tryIncReaderOverflow(s)) != 0L) { - whead = node; - node.prev = null; - while ((r = node.cowait) != null) { - if (U.compareAndSwapObject(node, WCOWAIT, - r, r.cowait) && - (w = r.thread) != null) { - r.thread = null; - U.unpark(w); // release co-waiter - } - } - return ns; + for (int spins = -1;;) { + WNode h, np, pp; int ps; + if ((h = whead) == p) { + if (spins < 0) + spins = HEAD_SPINS; + else if (spins < MAX_HEAD_SPINS) + spins <<= 1; + for (int k = spins;;) { // spin at head + long m, s, ns; + if ((m = (s = state) & ABITS) < RFULL ? + U.compareAndSwapLong(this, STATE, s, ns = s + RUNIT) : + (m < WBIT && (ns = tryIncReaderOverflow(s)) != 0L)) { + WNode c; Thread w; + whead = node; + node.prev = null; + while ((c = node.cowait) != null) { + if (U.compareAndSwapObject(node, WCOWAIT, + c, c.cowait) && + (w = c.thread) != null) + U.unpark(w); } + return ns; } - else if (ThreadLocalRandom.current().nextInt() >= 0 && - --k <= 0) + else if (m >= WBIT && + ThreadLocalRandom.current().nextInt() >= 0 && --k <= 0) break; } - if (spins < MAX_HEAD_SPINS) - spins <<= 1; } - if ((ps = p.status) == 0) - U.compareAndSwapInt(p, WSTATUS, 0, WAITING); - else if (ps == CANCELLED) { - if ((pp = p.prev) != null) { - node.prev = pp; - pp.next = node; + else if (h != null) { + WNode c; Thread w; + while ((c = h.cowait) != null) { + if (U.compareAndSwapObject(h, WCOWAIT, c, c.cowait) && + (w = c.thread) != null) + U.unpark(w); } } - else { - long time; - if (deadline == 0L) - time = 0L; - else if ((time = deadline - System.nanoTime()) <= 0L) - return cancelWaiter(node, node, false); - Thread wt = Thread.currentThread(); - U.putObject(wt, PARKBLOCKER, this); - node.thread = wt; - if (node.prev == p && p.status == WAITING && - (p != whead || (state & ABITS) != WBIT)) - U.park(false, time); - node.thread = null; - U.putObject(wt, PARKBLOCKER, null); - if (interruptible && Thread.interrupted()) - return cancelWaiter(node, node, true); + if (whead == h) { + if ((np = node.prev) != p) { + if (np != null) + (p = np).next = node; // stale + } + else if ((ps = p.status) == 0) + U.compareAndSwapInt(p, WSTATUS, 0, WAITING); + else if (ps == CANCELLED) { + if ((pp = p.prev) != null) { + node.prev = pp; + pp.next = node; + } + } + else { + long time; + if (deadline == 0L) + time = 0L; + else if ((time = deadline - System.nanoTime()) <= 0L) + return cancelWaiter(node, node, false); + Thread wt = Thread.currentThread(); + U.putObject(wt, PARKBLOCKER, this); + node.thread = wt; + if (p.status < 0 && + (p != h || (state & ABITS) == WBIT) && + whead == h && node.prev == p) + U.park(false, time); + node.thread = null; + U.putObject(wt, PARKBLOCKER, null); + if (interruptible && Thread.interrupted()) + return cancelWaiter(node, node, true); + } } } } @@ -1253,22 +1288,19 @@ private long cancelWaiter(WNode node, WNode group, boolean interrupted) { if (node != null && group != null) { Thread w; node.status = CANCELLED; - node.thread = null; // unsplice cancelled nodes from group for (WNode p = group, q; (q = p.cowait) != null;) { - if (q.status == CANCELLED) - U.compareAndSwapObject(p, WNEXT, q, q.next); + if (q.status == CANCELLED) { + U.compareAndSwapObject(p, WCOWAIT, q, q.cowait); + p = group; // restart + } else p = q; } if (group == node) { - WNode r; // detach and wake up uncancelled co-waiters - while ((r = node.cowait) != null) { - if (U.compareAndSwapObject(node, WCOWAIT, r, r.cowait) && - (w = r.thread) != null) { - r.thread = null; - U.unpark(w); - } + for (WNode r = group.cowait; r != null; r = r.cowait) { + if ((w = r.thread) != null) + U.unpark(w); // wake up uncancelled co-waiters } for (WNode pred = node.prev; pred != null; ) { // unsplice WNode succ, pp; // find valid successor diff --git a/src/main/java/jsr166e/extra/AtomicDouble.java b/src/main/java/jsr166e/extra/AtomicDouble.java index 4a17613567191..b3976589ff958 100644 --- a/src/main/java/jsr166e/extra/AtomicDouble.java +++ b/src/main/java/jsr166e/extra/AtomicDouble.java @@ -212,6 +212,8 @@ public double doubleValue() { /** * Saves the state to a stream (that is, serializes it). * + * @param s the stream + * @throws java.io.IOException if an I/O error occurs * @serialData The current value is emitted (a {@code double}). */ private void writeObject(java.io.ObjectOutputStream s) @@ -223,6 +225,10 @@ private void writeObject(java.io.ObjectOutputStream s) /** * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + * @throws ClassNotFoundException if the class of a serialized object + * could not be found + * @throws java.io.IOException if an I/O error occurs */ private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { diff --git a/src/main/java/jsr166e/extra/AtomicDoubleArray.java b/src/main/java/jsr166e/extra/AtomicDoubleArray.java index 8d56c51f031a5..33b10f825650f 100644 --- a/src/main/java/jsr166e/extra/AtomicDoubleArray.java +++ b/src/main/java/jsr166e/extra/AtomicDoubleArray.java @@ -237,6 +237,8 @@ public String toString() { /** * Saves the state to a stream (that is, serializes it). * + * @param s the stream + * @throws java.io.IOException if an I/O error occurs * @serialData The length of the array is emitted (int), followed by all * of its elements (each a {@code double}) in the proper order. */ @@ -255,6 +257,10 @@ private void writeObject(java.io.ObjectOutputStream s) /** * Reconstitutes the instance from a stream (that is, deserializes it). + * @param s the stream + * @throws ClassNotFoundException if the class of a serialized object + * could not be found + * @throws java.io.IOException if an I/O error occurs */ private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { diff --git a/src/main/java/jsr166e/extra/SequenceLock.java b/src/main/java/jsr166e/extra/SequenceLock.java index cc997b6d9d7bb..ed2a8c80a4aa6 100644 --- a/src/main/java/jsr166e/extra/SequenceLock.java +++ b/src/main/java/jsr166e/extra/SequenceLock.java @@ -257,8 +257,10 @@ private void readObject(ObjectInputStream s) /** * Creates an instance of {@code SequenceLock} that will retry - * attempts to acquire the lock at least the given number times + * attempts to acquire the lock at least the given number of times * before blocking. + * + * @param spins the number of times before blocking */ public SequenceLock(int spins) { sync = new Sync(spins); } @@ -508,7 +510,7 @@ public boolean tryLock(long timeout, TimeUnit unit) * Throws UnsupportedOperationException. SequenceLocks * do not support Condition objects. * - * @throws UnsupportedOperationException + * @throws UnsupportedOperationException always */ public Condition newCondition() { throw new UnsupportedOperationException(); diff --git a/src/main/java/jsr166y/CountedCompleter.java b/src/main/java/jsr166y/CountedCompleter.java index b23db5311ddca..7e742409a36d2 100644 --- a/src/main/java/jsr166y/CountedCompleter.java +++ b/src/main/java/jsr166y/CountedCompleter.java @@ -680,7 +680,7 @@ protected final boolean exec() { } /** - * Returns the result of the computation. By default + * Returns the result of the computation. By default, * returns {@code null}, which is appropriate for {@code Void} * actions, but in other cases should be overridden, almost * always to return a field or function of a field that diff --git a/src/main/java/jsr166y/ForkJoinPool.java b/src/main/java/jsr166y/ForkJoinPool.java index 5ebd02761857f..211722d45487b 100644 --- a/src/main/java/jsr166y/ForkJoinPool.java +++ b/src/main/java/jsr166y/ForkJoinPool.java @@ -2127,10 +2127,10 @@ final void helpQuiescePool(WorkQueue w) { w.runSubtask(t); } } - else if (active) { // decrement active count without queuing + else if (active) { // decrement active count without queuing long nc = (c = ctl) - AC_UNIT; if ((int)(nc >> AC_SHIFT) + (config & SMASK) == 0) - return; // bypass decrement-then-increment + return; // bypass decrement-then-increment if (U.compareAndSwapLong(this, CTL, c, nc)) active = false; } diff --git a/src/main/java/jsr166y/ForkJoinTask.java b/src/main/java/jsr166y/ForkJoinTask.java index df5739ee12a6b..ab56eca152ed0 100644 --- a/src/main/java/jsr166y/ForkJoinTask.java +++ b/src/main/java/jsr166y/ForkJoinTask.java @@ -395,11 +395,13 @@ static final class ExceptionNode extends WeakReference> { final Throwable ex; ExceptionNode next; final long thrower; // use id not ref to avoid weak cycles + final int hashCode; // store task hashCode before weak ref disappears ExceptionNode(ForkJoinTask task, Throwable ex, ExceptionNode next) { super(task, exceptionTableRefQueue); this.ex = ex; this.next = next; this.thrower = Thread.currentThread().getId(); + this.hashCode = System.identityHashCode(task); } } @@ -561,9 +563,9 @@ else if (ps.length == 1 && ps[0] == Throwable.class) private static void expungeStaleExceptions() { for (Object x; (x = exceptionTableRefQueue.poll()) != null;) { if (x instanceof ExceptionNode) { - ForkJoinTask key = ((ExceptionNode)x).get(); + int hashCode = ((ExceptionNode)x).hashCode; ExceptionNode[] t = exceptionTable; - int i = System.identityHashCode(key) & (t.length - 1); + int i = hashCode & (t.length - 1); ExceptionNode e = t[i]; ExceptionNode pred = null; while (e != null) { diff --git a/src/main/java/jsr166y/RecursiveTask.java b/src/main/java/jsr166y/RecursiveTask.java index eba46631dc527..0192966418907 100644 --- a/src/main/java/jsr166y/RecursiveTask.java +++ b/src/main/java/jsr166y/RecursiveTask.java @@ -15,7 +15,7 @@ * class Fibonacci extends RecursiveTask { * final int n; * Fibonacci(int n) { this.n = n; } - * Integer compute() { + * protected Integer compute() { * if (n <= 1) * return n; * Fibonacci f1 = new Fibonacci(n - 1); diff --git a/src/main/java/jsr166y/ThreadLocalRandom.java b/src/main/java/jsr166y/ThreadLocalRandom.java index 032f77a04bc59..fe6fc8ed65413 100644 --- a/src/main/java/jsr166y/ThreadLocalRandom.java +++ b/src/main/java/jsr166y/ThreadLocalRandom.java @@ -107,9 +107,9 @@ protected int next(int bits) { * * @param least the least value returned * @param bound the upper bound (exclusive) + * @return the next value * @throws IllegalArgumentException if least greater than or equal * to bound - * @return the next value */ public int nextInt(int least, int bound) { if (least >= bound) diff --git a/src/main/java/jsr166y/package-info.java b/src/main/java/jsr166y/package-info.java index b6e19b97965f1..980280390a3f4 100644 --- a/src/main/java/jsr166y/package-info.java +++ b/src/main/java/jsr166y/package-info.java @@ -25,6 +25,4 @@ * complete the tasks. In general, the most efficient ForkJoinTasks * are those that directly implement this algorithmic design pattern. */ - -// Built on 2013-07-10 package jsr166y;