Permalink
Browse files

DefaultHttp2Connection modifying child map while iterating

Motivation:
When DefaultHttp2Connection removes a stream it iterates over all children and adds them as children to the parent of the stream being removed. This process may remove elements from the child map while iterating without using the iterator's remove() method. This is generally unsafe and may result in an undefined iteration.

Modifications:
- We should use the Iterator's remove() method while iterating over the child map

Result:
Fixes #6163
  • Loading branch information...
1 parent 3c5e677 commit ec3d077e0d7058eec332b46d8b87d73d821be734 @Scottmitch Scottmitch committed Dec 31, 2016
@@ -79,8 +79,10 @@
* the assumption that most streams will have a small number of children. This choice may be
* sub-optimal if when children are present there are many children (i.e. a web page which has many
* dependencies to load).
+ *
+ * Visible only for testing!
*/
- private static final int INITIAL_CHILDREN_MAP_SIZE =
+ static final int INITIAL_CHILDREN_MAP_SIZE =
max(1, SystemPropertyUtil.getInt("io.netty.http2.childrenMapSize", 4));
/**
@@ -609,7 +611,7 @@ final void weight(short weight) {
* This method is intended to be used to support an exclusive priority dependency operation.
* @return The map of children prior to this operation, excluding {@code streamToRetain} if present.
*/
- private IntObjectMap<DefaultStream> retain(DefaultStream streamToRetain) {
+ private IntObjectMap<DefaultStream> removeAllChildrenExcept(DefaultStream streamToRetain) {
streamToRetain = children.remove(streamToRetain.id());
IntObjectMap<DefaultStream> prevChildren = children;
// This map should be re-initialized in anticipation for the 1 exclusive child which will be added.
@@ -625,17 +627,20 @@ final void weight(short weight) {
* Adds a child to this priority. If exclusive is set, any children of this node are moved to being dependent on
* the child.
*/
- final void takeChild(DefaultStream child, boolean exclusive, List<ParentChangedEvent> events) {
+ final void takeChild(Iterator<PrimitiveEntry<DefaultStream>> childItr, DefaultStream child, boolean exclusive,
+ List<ParentChangedEvent> events) {
DefaultStream oldParent = child.parent();
if (oldParent != this) {
events.add(new ParentChangedEvent(child, oldParent));
notifyParentChanging(child, this);
child.parent = this;
- // Note that the removal operation may not be successful and may return null. This is because when an
- // exclusive dependency is processed the children are removed in a previous recursive call but the
- // child's parent link is updated here.
- if (oldParent != null) {
+ // If the childItr is not null we are iterating over the oldParent.children collection and should
+ // use the iterator to remove from the collection to avoid concurrent modification. Otherwise it is
+ // assumed we are not iterating over this collection and it is safe to call remove directly.
+ if (childItr != null) {
+ childItr.remove();
+ } else if (oldParent != null) {
oldParent.children.remove(child.id());
}
@@ -649,12 +654,17 @@ final void takeChild(DefaultStream child, boolean exclusive, List<ParentChangedE
if (exclusive && !children.isEmpty()) {
// If it was requested that this child be the exclusive dependency of this node,
// move any previous children to the child node, becoming grand children of this node.
- for (DefaultStream grandchild : retain(child).values()) {
- child.takeChild(grandchild, false, events);
+ Iterator<PrimitiveEntry<DefaultStream>> itr = removeAllChildrenExcept(child).entries().iterator();
+ while (itr.hasNext()) {
+ child.takeChild(itr, itr.next().value(), false, events);
}
}
}
+ final void takeChild(DefaultStream child, boolean exclusive, List<ParentChangedEvent> events) {
+ takeChild(null, child, exclusive, events);
+ }
+
/**
* Removes the child priority and moves any of its dependencies to being direct dependencies on this node.
*/
@@ -666,8 +676,9 @@ final boolean removeChild(DefaultStream child) {
child.parent = null;
// Move up any grand children to be directly dependent on this node.
- for (DefaultStream grandchild : child.children.values()) {
- takeChild(grandchild, false, events);
+ Iterator<PrimitiveEntry<DefaultStream>> itr = child.children.entries().iterator();
+ while (itr.hasNext()) {
+ takeChild(itr, itr.next().value(), false, events);
}
notifyParentChanged(events);
@@ -42,6 +42,7 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
+import static io.netty.handler.codec.http2.DefaultHttp2Connection.INITIAL_CHILDREN_MAP_SIZE;
import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_PRIORITY_WEIGHT;
import static io.netty.handler.codec.http2.Http2CodecUtil.MIN_WEIGHT;
import static java.lang.Integer.MAX_VALUE;
@@ -183,6 +184,22 @@ public Void answer(InvocationOnMock invocation) throws Throwable {
}
@Test
+ public void closingStreamWithChildrenDoesNotCauseConcurrentModification() throws Http2Exception {
+ // We create enough streams to wrap around the child array. We carefully craft the stream ids so that they hash
+ // codes overlap with respect to the child collection. If the implementation is not careful this may lead to a
+ // concurrent modification excpetion while promoting all children to the connection stream.
+ final Http2Stream streamA = client.local().createStream(1, false);
+ final int numStreams = INITIAL_CHILDREN_MAP_SIZE - 1;
+ for (int i = 0, streamId = 3; i < numStreams; ++i, streamId += INITIAL_CHILDREN_MAP_SIZE) {
+ final Http2Stream stream = client.local().createStream(streamId, false);
+ stream.setPriority(streamA.id(), Http2CodecUtil.DEFAULT_PRIORITY_WEIGHT, false);
+ }
+ assertEquals(INITIAL_CHILDREN_MAP_SIZE, client.numActiveStreams());
+ streamA.close();
+ assertEquals(numStreams, client.numActiveStreams());
+ }
+
+ @Test
public void removeAllStreamsWhileIteratingActiveStreams() throws InterruptedException, Http2Exception {
final Endpoint<Http2RemoteFlowController> remote = client.remote();
final Endpoint<Http2LocalFlowController> local = client.local();
@@ -100,6 +100,7 @@ public class @K@ObjectHashMap<V> implements @K@ObjectMap<V> {
}
private static <T> T toExternal(T value) {
+ assert value != null : "null is not a legitimate internal value. Concurrent Modification?";
return value == NULL_VALUE ? null : value;
}
@@ -414,23 +415,23 @@ public class @K@ObjectHashMap<V> implements @K@ObjectMap<V> {
// entries and move them back if possible, optimizing future lookups.
// Knuth Section 6.4 Algorithm R, also used by the JDK's IdentityHashMap.
- boolean movedBack = false;
int nextFree = index;
- for (int i = probeNext(index); values[i] != null; i = probeNext(i)) {
- int bucket = hashIndex(keys[i]);
+ int i = probeNext(index);
+ for (V value = values[i]; value != null; value = values[i = probeNext(i)]) {
+ @k@ key = keys[i];
+ int bucket = hashIndex(key);
if (i < bucket && (bucket <= nextFree || nextFree <= i) ||
bucket <= nextFree && nextFree <= i) {
// Move the displaced entry "back" to the first available position.
- keys[nextFree] = keys[i];
- values[nextFree] = values[i];
- movedBack = true;
+ keys[nextFree] = key;
+ values[nextFree] = value;
// Put the first entry after the displaced entry
keys[i] = 0;
values[i] = null;
nextFree = i;
}
}
- return movedBack;
+ return nextFree != index;
}
/**
@@ -596,10 +597,7 @@ public class @K@ObjectHashMap<V> implements @K@ObjectMap<V> {
private int entryIndex = -1;
private void scanNext() {
- for (;;) {
- if (++nextIndex == values.length || values[nextIndex] != null) {
- break;
- }
+ while (++nextIndex != values.length && values[nextIndex] == null) {
}
}
@@ -608,7 +606,7 @@ public class @K@ObjectHashMap<V> implements @K@ObjectMap<V> {
if (nextIndex == -1) {
scanNext();
}
- return nextIndex < keys.length;
+ return nextIndex != values.length;
}
@Override
@@ -627,7 +625,7 @@ public class @K@ObjectHashMap<V> implements @K@ObjectMap<V> {
@Override
public void remove() {
- if (prevIndex < 0) {
+ if (prevIndex == -1) {
throw new IllegalStateException("next must be called before each remove.");
}
if (removeAt(prevIndex)) {

0 comments on commit ec3d077

Please sign in to comment.