Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[SPARK-21175] Reject OpenBlocks when memory shortage on shuffle service.
## What changes were proposed in this pull request? A shuffle service can serves blocks from multiple apps/tasks. Thus the shuffle service can suffers high memory usage when lots of shuffle-reads happen at the same time. In my cluster, OOM always happens on shuffle service. Analyzing heap dump, memory cost by Netty(ChannelOutboundBufferEntry) can be up to 2~3G. It might make sense to reject "open blocks" request when memory usage is high on shuffle service. 93dd0c5 and 85c6ce6 tried to alleviate the memory pressure on shuffle service but cannot solve the root cause. This pr proposes to control currency of shuffle read. ## How was this patch tested? Added unit test. Author: jinxing <jinxing6042@126.com> Closes #18388 from jinxing64/SPARK-21175.
- Loading branch information
Showing
7 changed files
with
265 additions
and
13 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
134 changes: 134 additions & 0 deletions
134
...n/network-common/src/test/java/org/apache/spark/network/TransportRequestHandlerSuite.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,134 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.network; | ||
|
||
import java.util.ArrayList; | ||
import java.util.List; | ||
|
||
import io.netty.channel.Channel; | ||
import io.netty.channel.ChannelPromise; | ||
import io.netty.channel.DefaultChannelPromise; | ||
import io.netty.util.concurrent.Future; | ||
import io.netty.util.concurrent.GenericFutureListener; | ||
import org.junit.Test; | ||
|
||
import static org.mockito.Mockito.*; | ||
|
||
import org.apache.commons.lang3.tuple.ImmutablePair; | ||
import org.apache.commons.lang3.tuple.Pair; | ||
import org.apache.spark.network.buffer.ManagedBuffer; | ||
import org.apache.spark.network.client.TransportClient; | ||
import org.apache.spark.network.protocol.*; | ||
import org.apache.spark.network.server.NoOpRpcHandler; | ||
import org.apache.spark.network.server.OneForOneStreamManager; | ||
import org.apache.spark.network.server.RpcHandler; | ||
import org.apache.spark.network.server.TransportRequestHandler; | ||
|
||
public class TransportRequestHandlerSuite { | ||
|
||
@Test | ||
public void handleFetchRequestAndStreamRequest() throws Exception { | ||
RpcHandler rpcHandler = new NoOpRpcHandler(); | ||
OneForOneStreamManager streamManager = (OneForOneStreamManager) (rpcHandler.getStreamManager()); | ||
Channel channel = mock(Channel.class); | ||
List<Pair<Object, ExtendedChannelPromise>> responseAndPromisePairs = | ||
new ArrayList<>(); | ||
when(channel.writeAndFlush(any())) | ||
.thenAnswer(invocationOnMock0 -> { | ||
Object response = invocationOnMock0.getArguments()[0]; | ||
ExtendedChannelPromise channelFuture = new ExtendedChannelPromise(channel); | ||
responseAndPromisePairs.add(ImmutablePair.of(response, channelFuture)); | ||
return channelFuture; | ||
}); | ||
|
||
// Prepare the stream. | ||
List<ManagedBuffer> managedBuffers = new ArrayList<>(); | ||
managedBuffers.add(new TestManagedBuffer(10)); | ||
managedBuffers.add(new TestManagedBuffer(20)); | ||
managedBuffers.add(new TestManagedBuffer(30)); | ||
managedBuffers.add(new TestManagedBuffer(40)); | ||
long streamId = streamManager.registerStream("test-app", managedBuffers.iterator()); | ||
streamManager.registerChannel(channel, streamId); | ||
TransportClient reverseClient = mock(TransportClient.class); | ||
TransportRequestHandler requestHandler = new TransportRequestHandler(channel, reverseClient, | ||
rpcHandler, 2L); | ||
|
||
RequestMessage request0 = new ChunkFetchRequest(new StreamChunkId(streamId, 0)); | ||
requestHandler.handle(request0); | ||
assert responseAndPromisePairs.size() == 1; | ||
assert responseAndPromisePairs.get(0).getLeft() instanceof ChunkFetchSuccess; | ||
assert ((ChunkFetchSuccess) (responseAndPromisePairs.get(0).getLeft())).body() == | ||
managedBuffers.get(0); | ||
|
||
RequestMessage request1 = new ChunkFetchRequest(new StreamChunkId(streamId, 1)); | ||
requestHandler.handle(request1); | ||
assert responseAndPromisePairs.size() == 2; | ||
assert responseAndPromisePairs.get(1).getLeft() instanceof ChunkFetchSuccess; | ||
assert ((ChunkFetchSuccess) (responseAndPromisePairs.get(1).getLeft())).body() == | ||
managedBuffers.get(1); | ||
|
||
// Finish flushing the response for request0. | ||
responseAndPromisePairs.get(0).getRight().finish(true); | ||
|
||
RequestMessage request2 = new StreamRequest(String.format("%d_%d", streamId, 2)); | ||
requestHandler.handle(request2); | ||
assert responseAndPromisePairs.size() == 3; | ||
assert responseAndPromisePairs.get(2).getLeft() instanceof StreamResponse; | ||
assert ((StreamResponse) (responseAndPromisePairs.get(2).getLeft())).body() == | ||
managedBuffers.get(2); | ||
|
||
// Request3 will trigger the close of channel, because the number of max chunks being | ||
// transferred is 2; | ||
RequestMessage request3 = new StreamRequest(String.format("%d_%d", streamId, 3)); | ||
requestHandler.handle(request3); | ||
verify(channel, times(1)).close(); | ||
assert responseAndPromisePairs.size() == 3; | ||
} | ||
|
||
private class ExtendedChannelPromise extends DefaultChannelPromise { | ||
|
||
private List<GenericFutureListener> listeners = new ArrayList<>(); | ||
private boolean success; | ||
|
||
public ExtendedChannelPromise(Channel channel) { | ||
super(channel); | ||
success = false; | ||
} | ||
|
||
@Override | ||
public ChannelPromise addListener( | ||
GenericFutureListener<? extends Future<? super Void>> listener) { | ||
listeners.add(listener); | ||
return super.addListener(listener); | ||
} | ||
|
||
@Override | ||
public boolean isSuccess() { | ||
return success; | ||
} | ||
|
||
public void finish(boolean success) { | ||
this.success = success; | ||
listeners.forEach(listener -> { | ||
try { | ||
listener.operationComplete(this); | ||
} catch (Exception e) { } | ||
}); | ||
} | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters