diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 962b0dce833c5..d1bb311413e09 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1075,6 +1075,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT = "dfs.block.misreplication.processing.limit"; public static final int DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT = 10000; + public static final String DFS_NAMENODE_REMOVE_BLOCKS_PER_INTERVAL_KEY = "dfs.namenode.remove.blocks.per.interval"; + public static final int DFS_NAMENODE_REMOVE_BLOCKS_PER_INTERVAL_DEFAULT = 500000; + public static final String DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY = "dfs.datanode.outliers.report.interval"; public static final String DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index c522e2604e70f..11c7e460d5fc4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS; import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED; import static org.apache.hadoop.util.ExitUtil.terminate; +import static org.apache.hadoop.util.Time.monotonicNowNanos; import static org.apache.hadoop.util.Time.now; import java.io.IOException; @@ -440,6 +441,11 @@ public int getPendingSPSPaths() { */ private int numBlocksPerIteration; + /** + * The maximum number of blocks to remove per tick. + */ + private int numBlocksPerRemove; + /** * The blocks of deleted files are put into the queue, * and the cleanup thread processes these blocks periodically. @@ -564,6 +570,10 @@ public BlockManager(final Namesystem namesystem, boolean haEnabled, DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT, DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT); + this.numBlocksPerRemove = conf.getInt( + DFSConfigKeys.DFS_NAMENODE_REMOVE_BLOCKS_PER_INTERVAL_KEY, + DFSConfigKeys.DFS_NAMENODE_REMOVE_BLOCKS_PER_INTERVAL_DEFAULT); + this.minReplicationToBeInMaintenance = (short) initMinReplicationToBeInMaintenance(conf); this.replQueueResetToHeadThreshold = @@ -1763,7 +1773,21 @@ public BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode, void removeBlocksAssociatedTo(final DatanodeDescriptor node) { providedStorageMap.removeDatanode(node); final Iterator it = node.getBlockIterator(); + int numBlocksRemovedPerLock = 0; while(it.hasNext()) { + if (numBlocksRemovedPerLock >= numBlocksPerRemove) { + namesystem.writeUnlock(); + try { + LOG.debug("Yielded lock during removing blocks associated to the dead datanode"); //释放锁,并sleep 500ns(1 ns = 10-9 s) + Thread.sleep(0, 500); + } catch (InterruptedException ignored) { + return; + } + // reset + numBlocksRemovedPerLock = 0; + namesystem.writeLock(); + } + numBlocksRemovedPerLock++; removeStoredBlock(it.next(), node); } // Remove all pending DN messages referencing this DN. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index bffde8cf7482a..5e695ebe5fc06 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4263,6 +4263,14 @@ + + dfs.namenode.remove.blocks.per.interval + 500000 + + The maximum number of blocks associated with dead datanodes to remove per tick. + + + dfs.block.placement.ec.classname org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant