Skip to content

Commit

Permalink
HDDS-3053. Decrease the number of the chunk writer threads
Browse files Browse the repository at this point in the history
Closes #587
  • Loading branch information
elek committed Apr 30, 2020
1 parent d849904 commit f2b5b10
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ public final class ScmConfigKeys {
public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY
= "dfs.container.ratis.num.write.chunk.threads";
public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT
= 60;
= 10;
public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY
= "dfs.container.ratis.replication.level";
public static final ReplicationLevel
Expand Down
11 changes: 7 additions & 4 deletions hadoop-hdds/common/src/main/resources/ozone-default.xml
Original file line number Diff line number Diff line change
Expand Up @@ -197,10 +197,12 @@
</property>
<property>
<name>dfs.container.ratis.num.write.chunk.threads</name>
<value>60</value>
<value>10</value>
<tag>OZONE, RATIS, PERFORMANCE</tag>
<description>Maximum number of threads in the thread pool that Ratis
will use for writing chunks (60 by default).
<description>Maximum number of threads in the thread pool that Datanode
will use for writing replicated chunks.
This is a per configured locations!
(10 thread per disk by default).
</description>
</property>
<property>
Expand Down Expand Up @@ -399,7 +401,8 @@
<name>ozone.client.stream.buffer.size</name>
<value>4MB</value>
<tag>OZONE, CLIENT</tag>
<description>The size of chunks the client will send to the server.</description>
<description>The size of chunks the client will send to the server.
</description>
</property>
<property>
<name>ozone.client.stream.buffer.flush.size</name>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@
import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;

import com.google.common.annotations.VisibleForTesting;
Expand Down Expand Up @@ -778,10 +779,15 @@ private void sendPipelineReport() {
private static List<ThreadPoolExecutor> createChunkExecutors(
ConfigurationSource conf) {
// TODO create single pool with N threads if using non-incremental chunks
final int threadCount = conf.getInt(
final int threadCountPerDisk = conf.getInt(
OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY,
OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT);
ThreadPoolExecutor[] executors = new ThreadPoolExecutor[threadCount];

final int numberOfDisks =
MutableVolumeSet.getDatanodeStorageDirs(conf).size();

ThreadPoolExecutor[] executors =
new ThreadPoolExecutor[threadCountPerDisk * numberOfDisks];
for (int i = 0; i < executors.length; i++) {
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,15 +163,7 @@ private void initializeVolumeSet() throws IOException {
failedVolumeMap = new ConcurrentHashMap<>();
volumeStateMap = new EnumMap<>(StorageType.class);

Collection<String> rawLocations = conf.getTrimmedStringCollection(
HDDS_DATANODE_DIR_KEY);
if (rawLocations.isEmpty()) {
rawLocations = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
}
if (rawLocations.isEmpty()) {
throw new IllegalArgumentException("No location configured in either "
+ HDDS_DATANODE_DIR_KEY + " or " + DFS_DATANODE_DATA_DIR_KEY);
}
Collection<String> rawLocations = getDatanodeStorageDirs(conf);

for (StorageType storageType : StorageType.values()) {
volumeStateMap.put(storageType, new ArrayList<>());
Expand Down Expand Up @@ -220,6 +212,20 @@ private void initializeVolumeSet() throws IOException {
SHUTDOWN_HOOK_PRIORITY);
}

public static Collection<String> getDatanodeStorageDirs(
ConfigurationSource conf) {
Collection<String> rawLocations = conf.getTrimmedStringCollection(
HDDS_DATANODE_DIR_KEY);
if (rawLocations.isEmpty()) {
rawLocations = conf.getTrimmedStringCollection(DFS_DATANODE_DATA_DIR_KEY);
}
if (rawLocations.isEmpty()) {
throw new IllegalArgumentException("No location configured in either "
+ HDDS_DATANODE_DIR_KEY + " or " + DFS_DATANODE_DATA_DIR_KEY);
}
return rawLocations;
}

/**
* Run a synchronous parallel check of all HDDS volumes, removing
* failed volumes.
Expand Down

0 comments on commit f2b5b10

Please sign in to comment.