Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[#1608][part-4] feat(server)(spark3): activate partition reassign when server is inactive #1617

Open
wants to merge 8 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -517,7 +517,8 @@ public void registerShuffle(
List<PartitionRange> partitionRanges,
RemoteStorageInfo remoteStorage,
ShuffleDataDistributionType distributionType,
int maxConcurrencyPerPartitionToWrite) {}
int maxConcurrencyPerPartitionToWrite,
boolean blockFailureReassignEnabled) {}

@Override
public boolean sendCommit(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,8 @@ public void registerShuffle(
List<PartitionRange> partitionRanges,
RemoteStorageInfo storageType,
ShuffleDataDistributionType distributionType,
int maxConcurrencyPerPartitionToWrite) {}
int maxConcurrencyPerPartitionToWrite,
boolean blockFailureReassignEnabled) {}

@Override
public boolean sendCommit(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -939,16 +939,16 @@ protected void registerShuffleServers(
serverToPartitionRanges.entrySet();
entries.stream()
.forEach(
entry -> {
shuffleWriteClient.registerShuffle(
entry.getKey(),
appId,
shuffleId,
entry.getValue(),
remoteStorage,
dataDistributionType,
maxConcurrencyPerPartitionToWrite);
});
entry ->
shuffleWriteClient.registerShuffle(
entry.getKey(),
appId,
shuffleId,
entry.getValue(),
remoteStorage,
dataDistributionType,
maxConcurrencyPerPartitionToWrite,
taskBlockSendFailureRetryEnabled));
LOG.info(
"Finish register shuffleId["
+ shuffleId
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -588,7 +588,8 @@ public void registerShuffle(
List<PartitionRange> partitionRanges,
RemoteStorageInfo remoteStorage,
ShuffleDataDistributionType dataDistributionType,
int maxConcurrencyPerPartitionToWrite) {}
int maxConcurrencyPerPartitionToWrite,
boolean blockFailureReassignEnabled) {}

@Override
public boolean sendCommit(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,14 +44,34 @@ SendShuffleDataResult sendShuffleData(

void registerApplicationInfo(String appId, long timeoutMs, String user);

default void registerShuffle(
ShuffleServerInfo shuffleServerInfo,
String appId,
int shuffleId,
List<PartitionRange> partitionRanges,
RemoteStorageInfo remoteStorage,
ShuffleDataDistributionType dataDistributionType,
int maxConcurrencyPerPartitionToWrite) {
this.registerShuffle(
shuffleServerInfo,
appId,
shuffleId,
partitionRanges,
remoteStorage,
dataDistributionType,
maxConcurrencyPerPartitionToWrite,
false);
}

void registerShuffle(
ShuffleServerInfo shuffleServerInfo,
String appId,
int shuffleId,
List<PartitionRange> partitionRanges,
RemoteStorageInfo remoteStorage,
ShuffleDataDistributionType dataDistributionType,
int maxConcurrencyPerPartitionToWrite);
int maxConcurrencyPerPartitionToWrite,
boolean blockFailureReassignEnabled);

boolean sendCommit(
Set<ShuffleServerInfo> shuffleServerInfoSet, String appId, int shuffleId, int numMaps);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -531,7 +531,8 @@ public void registerShuffle(
List<PartitionRange> partitionRanges,
RemoteStorageInfo remoteStorage,
ShuffleDataDistributionType dataDistributionType,
int maxConcurrencyPerPartitionToWrite) {
int maxConcurrencyPerPartitionToWrite,
boolean blockFailureReassignEnabled) {
String user = null;
try {
user = UserGroupInformation.getCurrentUser().getShortUserName();
Expand All @@ -547,7 +548,8 @@ public void registerShuffle(
remoteStorage,
user,
dataDistributionType,
maxConcurrencyPerPartitionToWrite);
maxConcurrencyPerPartitionToWrite,
blockFailureReassignEnabled);
RssRegisterShuffleResponse response =
getShuffleServerClient(shuffleServerInfo).registerShuffle(request);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -232,4 +232,8 @@ public void blockUntilShutdown() throws InterruptedException {
public int getPort() {
return listenPort;
}

public List<Pair<BindableService, List<ServerInterceptor>>> getServicesWithInterceptors() {
return servicesWithInterceptors;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.uniffle.test;

import java.io.File;
import java.util.Map;

import com.google.common.collect.Maps;
import org.apache.spark.SparkConf;
import org.apache.spark.shuffle.RssSparkConfig;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.io.TempDir;

import org.apache.uniffle.common.rpc.GrpcServer;
import org.apache.uniffle.common.rpc.ServerType;
import org.apache.uniffle.coordinator.CoordinatorConf;
import org.apache.uniffle.server.ShuffleServer;
import org.apache.uniffle.server.ShuffleServerConf;
import org.apache.uniffle.server.ShuffleServerGrpcService;
import org.apache.uniffle.storage.util.StorageType;

import static org.apache.uniffle.client.util.RssClientConfig.RSS_CLIENT_ASSIGNMENT_SHUFFLE_SERVER_NUMBER;
import static org.apache.uniffle.client.util.RssClientConfig.RSS_CLIENT_RETRY_MAX;
import static org.apache.uniffle.common.config.RssClientConf.RSS_CLIENT_BLOCK_SEND_FAILURE_RETRY_ENABLED;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.when;

public class ServerInActivateReassignTest extends PartitionBlockDataReassignBasicTest {

@BeforeAll
public static void setupServers(@TempDir File tmpDir) throws Exception {
// for coordinator
CoordinatorConf coordinatorConf = getCoordinatorConf();
coordinatorConf.setLong("rss.coordinator.app.expired", 5000);
Map<String, String> dynamicConf = Maps.newHashMap();
dynamicConf.put(RssSparkConfig.RSS_STORAGE_TYPE.key(), StorageType.MEMORY_LOCALFILE.name());
addDynamicConf(coordinatorConf, dynamicConf);
createCoordinatorServer(coordinatorConf);

// for shuffle-server
File dataDir1 = new File(tmpDir, "data1");
File dataDir2 = new File(tmpDir, "data2");
basePath = dataDir1.getAbsolutePath() + "," + dataDir2.getAbsolutePath();

ShuffleServerConf grpcShuffleServerConf1 = buildShuffleServerConf(ServerType.GRPC);
createShuffleServer(grpcShuffleServerConf1);

ShuffleServerConf grpcShuffleServerConf2 = buildShuffleServerConf(ServerType.GRPC);
createShuffleServer(grpcShuffleServerConf2);

ShuffleServerConf grpcShuffleServerConf3 = buildShuffleServerConf(ServerType.GRPC_NETTY);
createShuffleServer(grpcShuffleServerConf3);

ShuffleServerConf grpcShuffleServerConf4 = buildShuffleServerConf(ServerType.GRPC_NETTY);
createShuffleServer(grpcShuffleServerConf4);

startServers();
}

@Override
public void updateRssStorage(SparkConf sparkConf) {
sparkConf.set("spark." + RSS_CLIENT_ASSIGNMENT_SHUFFLE_SERVER_NUMBER, "2");
sparkConf.set("spark." + RSS_CLIENT_BLOCK_SEND_FAILURE_RETRY_ENABLED.key(), "true");
}

@Override
public void updateSparkConfCustomer(SparkConf sparkConf) {
sparkConf.set("spark.sql.shuffle.partitions", "4");
sparkConf.set("spark." + RSS_CLIENT_RETRY_MAX, "2");
sparkConf.set(
"spark." + RSS_CLIENT_ASSIGNMENT_SHUFFLE_SERVER_NUMBER,
String.valueOf(grpcShuffleServers.size()));
sparkConf.set("spark." + RSS_CLIENT_BLOCK_SEND_FAILURE_RETRY_ENABLED.key(), "true");

// simulate one server that is inactive.
ShuffleServer shuffleServer = grpcShuffleServers.get(0);
ShuffleServerGrpcService grpcServer =
(ShuffleServerGrpcService)
((GrpcServer) shuffleServer.getServer()).getServicesWithInterceptors().get(0).getKey();
ShuffleServer spy = spy(shuffleServer);
when(spy.isActivateClientReassign()).thenReturn(true);
grpcServer.setShuffleServer(spy);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@

import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import com.google.protobuf.BoolValue;
import com.google.protobuf.ByteString;
import com.google.protobuf.UnsafeByteOperations;
import io.netty.buffer.Unpooled;
Expand Down Expand Up @@ -174,14 +175,16 @@ private ShuffleRegisterResponse doRegisterShuffle(
RemoteStorageInfo remoteStorageInfo,
String user,
ShuffleDataDistributionType dataDistributionType,
int maxConcurrencyPerPartitionToWrite) {
int maxConcurrencyPerPartitionToWrite,
boolean blockFailureReassignEnabled) {
ShuffleRegisterRequest.Builder reqBuilder = ShuffleRegisterRequest.newBuilder();
reqBuilder
.setAppId(appId)
.setShuffleId(shuffleId)
.setUser(user)
.setShuffleDataDistribution(RssProtos.DataDistribution.valueOf(dataDistributionType.name()))
.setMaxConcurrencyPerPartitionToWrite(maxConcurrencyPerPartitionToWrite)
.setBlockFailureReassignEnabled(BoolValue.of(blockFailureReassignEnabled))
.addAllPartitionRanges(toShufflePartitionRanges(partitionRanges));
RemoteStorage.Builder rsBuilder = RemoteStorage.newBuilder();
rsBuilder.setPath(remoteStorageInfo.getPath());
Expand Down Expand Up @@ -433,7 +436,8 @@ public RssRegisterShuffleResponse registerShuffle(RssRegisterShuffleRequest requ
request.getRemoteStorageInfo(),
request.getUser(),
request.getDataDistributionType(),
request.getMaxConcurrencyPerPartitionToWrite());
request.getMaxConcurrencyPerPartitionToWrite(),
request.isBlockFailureReassignEnabled());

RssRegisterShuffleResponse response;
RssProtos.StatusCode statusCode = rpcResponse.getStatus();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,27 @@ public class RssRegisterShuffleRequest {
private String user;
private ShuffleDataDistributionType dataDistributionType;
private int maxConcurrencyPerPartitionToWrite;
private boolean blockFailureReassignEnabled = false;

public RssRegisterShuffleRequest(
String appId,
int shuffleId,
List<PartitionRange> partitionRanges,
RemoteStorageInfo remoteStorageInfo,
String user,
ShuffleDataDistributionType dataDistributionType,
int maxConcurrencyPerPartitionToWrite,
boolean blockFailureReassignEnabled) {
this(
appId,
shuffleId,
partitionRanges,
remoteStorageInfo,
user,
dataDistributionType,
maxConcurrencyPerPartitionToWrite);
this.blockFailureReassignEnabled = blockFailureReassignEnabled;
}

public RssRegisterShuffleRequest(
String appId,
Expand Down Expand Up @@ -109,4 +130,8 @@ public ShuffleDataDistributionType getDataDistributionType() {
public int getMaxConcurrencyPerPartitionToWrite() {
return maxConcurrencyPerPartitionToWrite;
}

public boolean isBlockFailureReassignEnabled() {
return blockFailureReassignEnabled;
}
}
1 change: 1 addition & 0 deletions proto/src/main/proto/Rss.proto
Original file line number Diff line number Diff line change
Expand Up @@ -184,6 +184,7 @@ message ShuffleRegisterRequest {
string user = 5;
DataDistribution shuffleDataDistribution = 6;
int32 maxConcurrencyPerPartitionToWrite = 7;
google.protobuf.BoolValue blockFailureReassignEnabled = 8;
}

enum DataDistribution {
Expand Down
13 changes: 13 additions & 0 deletions server/src/main/java/org/apache/uniffle/server/ShuffleServer.java
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,8 @@ public class ShuffleServer {
private StreamServer streamServer;
private JvmPauseMonitor jvmPauseMonitor;

private boolean decommissionActivateClientReassignEnabled;

public ShuffleServer(ShuffleServerConf shuffleServerConf) throws Exception {
this.shuffleServerConf = shuffleServerConf;
try {
Expand Down Expand Up @@ -229,6 +231,10 @@ private void initialization() throws Exception {
grpcPort = shuffleServerConf.getInteger(ShuffleServerConf.RPC_SERVER_PORT);
nettyPort = shuffleServerConf.getInteger(ShuffleServerConf.NETTY_SERVER_PORT);

decommissionActivateClientReassignEnabled =
shuffleServerConf.get(
ShuffleServerConf.SERVER_DECOMMISSION_ACTIVATE_CLIENT_REASSIGN_ENABLE);

initServerTags();

jettyServer = new JettyServer(shuffleServerConf);
Expand Down Expand Up @@ -544,4 +550,11 @@ public void sendHeartbeat() {
shuffleServer.getStorageManager().getStorageInfo(),
shuffleServer.getNettyPort());
}

public boolean isActivateClientReassign() {
if (decommissionActivateClientReassignEnabled && serverStatus.get() != ServerStatus.ACTIVE) {
zuston marked this conversation as resolved.
Show resolved Hide resolved
return true;
}
return false;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -463,6 +463,13 @@ public class ShuffleServerConf extends RssBaseConf {
.defaultValue(true)
.withDescription("Whether shutdown the server after server is decommissioned");

public static final ConfigOption<Boolean> SERVER_DECOMMISSION_ACTIVATE_CLIENT_REASSIGN_ENABLE =
ConfigOptions.key("rss.server.decommission.activeClientReassignEnabled")
.booleanType()
.defaultValue(false)
.withDescription(
"Whether to activate client partition reassign mechanism for server quick decommission or inactive.");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Modify the document.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For this config option, I'm not sure whether this should be grouped as decommission .

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Modify the document.

For client partition reassign mechanism, I will propose another PR to finish this. And this will also be involved in it.


public static final ConfigOption<Integer> NETTY_SERVER_PORT =
ConfigOptions.key("rss.server.netty.port")
.intType()
Expand Down