Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1075,6 +1075,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT = "dfs.block.misreplication.processing.limit";
public static final int DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT = 10000;

public static final String DFS_NAMENODE_REMOVE_BLOCKS_PER_INTERVAL_KEY = "dfs.namenode.remove.blocks.per.interval";
public static final int DFS_NAMENODE_REMOVE_BLOCKS_PER_INTERVAL_DEFAULT = 500000;

public static final String DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY =
"dfs.datanode.outliers.report.interval";
public static final String DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_DEFAULT =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.Time.monotonicNowNanos;
import static org.apache.hadoop.util.Time.now;

import java.io.IOException;
Expand Down Expand Up @@ -440,6 +441,11 @@ public int getPendingSPSPaths() {
*/
private int numBlocksPerIteration;

/**
* The maximum number of blocks to remove per tick.
*/
private int numBlocksPerRemove;

/**
* The blocks of deleted files are put into the queue,
* and the cleanup thread processes these blocks periodically.
Expand Down Expand Up @@ -564,6 +570,10 @@ public BlockManager(final Namesystem namesystem, boolean haEnabled,
DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT,
DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT);

this.numBlocksPerRemove = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_REMOVE_BLOCKS_PER_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_REMOVE_BLOCKS_PER_INTERVAL_DEFAULT);

this.minReplicationToBeInMaintenance =
(short) initMinReplicationToBeInMaintenance(conf);
this.replQueueResetToHeadThreshold =
Expand Down Expand Up @@ -1763,7 +1773,21 @@ public BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
void removeBlocksAssociatedTo(final DatanodeDescriptor node) {
providedStorageMap.removeDatanode(node);
final Iterator<BlockInfo> it = node.getBlockIterator();
int numBlocksRemovedPerLock = 0;
while(it.hasNext()) {
if (numBlocksRemovedPerLock >= numBlocksPerRemove) {
namesystem.writeUnlock();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How is this code tested? It's usually not wise to release the namenode lock arbitrarily.
Looking at the code, the iterator is not thread-safe and there is no guarantee the iterator will be able to get to the next one after the lock is re-acquired.

try {
LOG.debug("Yielded lock during removing blocks associated to the dead datanode"); //释放锁,并sleep 500ns(1 ns = 10-9 s)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please use English for comments.

Thread.sleep(0, 500);
} catch (InterruptedException ignored) {
return;
}
// reset
numBlocksRemovedPerLock = 0;
namesystem.writeLock();
}
numBlocksRemovedPerLock++;
removeStoredBlock(it.next(), node);
}
// Remove all pending DN messages referencing this DN.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4263,6 +4263,14 @@
</description>
</property>

<property>
<name>dfs.namenode.remove.blocks.per.interval</name>
<value>500000</value>
<description>
The maximum number of blocks associated with dead datanodes to remove per tick.
</description>
</property>

<property>
<name>dfs.block.placement.ec.classname</name>
<value>org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant</value>
Expand Down