Skip to content

Commit

Permalink
HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in …
Browse files Browse the repository at this point in the history
…BlockManager#excessReplicateMap. (yliu)
  • Loading branch information
y-liu committed Oct 12, 2015
1 parent 0ff1216 commit 73b86a5
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 16 deletions.
3 changes: 3 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -1512,6 +1512,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling
values() since it creates a temporary array. (Staffan Friberg via yliu) values() since it creates a temporary array. (Staffan Friberg via yliu)


HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in
BlockManager#excessReplicateMap. (yliu)

OPTIMIZATIONS OPTIMIZATIONS


HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
Expand Down
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.hdfs.util.LightWeightHashSet;
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;


import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength; import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength;
Expand Down Expand Up @@ -219,7 +218,7 @@ public int getPendingDataNodeMessageCount() {
* Maps a StorageID to the set of blocks that are "extra" for this * Maps a StorageID to the set of blocks that are "extra" for this
* DataNode. We'll eventually remove these extras. * DataNode. We'll eventually remove these extras.
*/ */
public final Map<String, LightWeightLinkedSet<BlockInfo>> excessReplicateMap = public final Map<String, LightWeightHashSet<BlockInfo>> excessReplicateMap =
new HashMap<>(); new HashMap<>();


/** /**
Expand Down Expand Up @@ -1421,11 +1420,6 @@ int computeBlockRecoveryWork(int blocksToProcess) {
*/ */
@VisibleForTesting @VisibleForTesting
int computeRecoveryWorkForBlocks(List<List<BlockInfo>> blocksToRecover) { int computeRecoveryWorkForBlocks(List<List<BlockInfo>> blocksToRecover) {
int requiredReplication, numEffectiveReplicas;
List<DatanodeDescriptor> containingNodes;
BlockCollection bc;
int additionalReplRequired;

int scheduledWork = 0; int scheduledWork = 0;
List<BlockRecoveryWork> recovWork = new LinkedList<>(); List<BlockRecoveryWork> recovWork = new LinkedList<>();


Expand Down Expand Up @@ -1786,7 +1780,7 @@ DatanodeDescriptor[] chooseSourceDatanodes(BlockInfo block,
Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(block); Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(block);
for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) { for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor(); final DatanodeDescriptor node = storage.getDatanodeDescriptor();
LightWeightLinkedSet<BlockInfo> excessBlocks = LightWeightHashSet<BlockInfo> excessBlocks =
excessReplicateMap.get(node.getDatanodeUuid()); excessReplicateMap.get(node.getDatanodeUuid());
int countableReplica = storage.getState() == State.NORMAL ? 1 : 0; int countableReplica = storage.getState() == State.NORMAL ? 1 : 0;
if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) if ((nodesCorrupt != null) && (nodesCorrupt.contains(node)))
Expand Down Expand Up @@ -3090,7 +3084,7 @@ private void processOverReplicatedBlock(final BlockInfo block,
postponeBlock(block); postponeBlock(block);
return; return;
} }
LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get( LightWeightHashSet<BlockInfo> excessBlocks = excessReplicateMap.get(
cur.getDatanodeUuid()); cur.getDatanodeUuid());
if (excessBlocks == null || !excessBlocks.contains(block)) { if (excessBlocks == null || !excessBlocks.contains(block)) {
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
Expand Down Expand Up @@ -3297,10 +3291,10 @@ static boolean useDelHint(boolean isFirst, DatanodeStorageInfo delHint,


private void addToExcessReplicate(DatanodeInfo dn, BlockInfo storedBlock) { private void addToExcessReplicate(DatanodeInfo dn, BlockInfo storedBlock) {
assert namesystem.hasWriteLock(); assert namesystem.hasWriteLock();
LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get( LightWeightHashSet<BlockInfo> excessBlocks = excessReplicateMap.get(
dn.getDatanodeUuid()); dn.getDatanodeUuid());
if (excessBlocks == null) { if (excessBlocks == null) {
excessBlocks = new LightWeightLinkedSet<>(); excessBlocks = new LightWeightHashSet<>();
excessReplicateMap.put(dn.getDatanodeUuid(), excessBlocks); excessReplicateMap.put(dn.getDatanodeUuid(), excessBlocks);
} }
if (excessBlocks.add(storedBlock)) { if (excessBlocks.add(storedBlock)) {
Expand Down Expand Up @@ -3364,7 +3358,7 @@ public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
// We've removed a block from a node, so it's definitely no longer // We've removed a block from a node, so it's definitely no longer
// in "excess" there. // in "excess" there.
// //
LightWeightLinkedSet<BlockInfo> excessBlocks = excessReplicateMap.get( LightWeightHashSet<BlockInfo> excessBlocks = excessReplicateMap.get(
node.getDatanodeUuid()); node.getDatanodeUuid());
if (excessBlocks != null) { if (excessBlocks != null) {
if (excessBlocks.remove(storedBlock)) { if (excessBlocks.remove(storedBlock)) {
Expand Down Expand Up @@ -3581,7 +3575,7 @@ public NumberReplicas countNodes(BlockInfo b) {
} else if (node.isDecommissioned()) { } else if (node.isDecommissioned()) {
decommissioned++; decommissioned++;
} else { } else {
LightWeightLinkedSet<BlockInfo> blocksExcess = excessReplicateMap.get( LightWeightHashSet<BlockInfo> blocksExcess = excessReplicateMap.get(
node.getDatanodeUuid()); node.getDatanodeUuid());
if (blocksExcess != null && blocksExcess.contains(b)) { if (blocksExcess != null && blocksExcess.contains(b)) {
excess++; excess++;
Expand Down Expand Up @@ -3988,7 +3982,8 @@ public void removeBlockFromMap(Block block) {
private void removeFromExcessReplicateMap(Block block) { private void removeFromExcessReplicateMap(Block block) {
for (DatanodeStorageInfo info : blocksMap.getStorages(block)) { for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
String uuid = info.getDatanodeDescriptor().getDatanodeUuid(); String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
LightWeightLinkedSet<BlockInfo> excessReplicas = excessReplicateMap.get(uuid); LightWeightHashSet<BlockInfo> excessReplicas =
excessReplicateMap.get(uuid);
if (excessReplicas != null) { if (excessReplicas != null) {
if (excessReplicas.remove(block)) { if (excessReplicas.remove(block)) {
excessBlocksCount.decrementAndGet(); excessBlocksCount.decrementAndGet();
Expand Down
Original file line number Original file line Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas; import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.hdfs.util.LightWeightHashSet;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.net.NodeBase;
Expand Down Expand Up @@ -687,7 +687,7 @@ private void collectBlocksSummary(String parent, HdfsFileStatus file, Result res
.getStorageType())); .getStorageType()));
} }
if (showReplicaDetails) { if (showReplicaDetails) {
LightWeightLinkedSet<BlockInfo> blocksExcess = LightWeightHashSet<BlockInfo> blocksExcess =
bm.excessReplicateMap.get(dnDesc.getDatanodeUuid()); bm.excessReplicateMap.get(dnDesc.getDatanodeUuid());
Collection<DatanodeDescriptor> corruptReplicas = Collection<DatanodeDescriptor> corruptReplicas =
bm.getCorruptReplicas(block.getLocalBlock()); bm.getCorruptReplicas(block.getLocalBlock());
Expand Down

0 comments on commit 73b86a5

Please sign in to comment.