Skip to content
This repository has been archived by the owner on Feb 9, 2021. It is now read-only.

Commit

Permalink
HDFS-4878. On Remove Block, block is not removed from neededReplicati…
Browse files Browse the repository at this point in the history
…ons queue. Contributed by Tao Luo.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2.1-beta@1491673 13f79535-47bb-0310-9956-ffa450edef68
  • Loading branch information
shvachko committed Jun 11, 2013
1 parent bb7186b commit c17a04d
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 11 deletions.
3 changes: 3 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Expand Up @@ -327,6 +327,9 @@ Release 2.1.0-beta - UNRELEASED
HDFS-4586. TestDataDirs.testGetDataDirsFromURIs fails with all directories
in dfs.datanode.data.dir are invalid. (Ivan Mitic via atm)

HDFS-4878. On Remove Block, block is not removed from neededReplications
queue. (Tao Luo via shv)

BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS

HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.
Expand Down
Expand Up @@ -2869,8 +2869,9 @@ public void removeBlock(Block block) {
addToInvalidates(block);
corruptReplicas.removeFromCorruptReplicasMap(block);
blocksMap.removeBlock(block);
// Remove the block from pendingReplications
// Remove the block from pendingReplications and neededReplications
pendingReplications.remove(block);
neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
if (postponedMisreplicatedBlocks.remove(block)) {
postponedMisreplicatedBlocksCount.decrementAndGet();
}
Expand Down
Expand Up @@ -99,16 +99,71 @@ public void testMetaSave() throws IOException, InterruptedException {
+ "metasave.out.txt";
FileInputStream fstream = new FileInputStream(logFile);
DataInputStream in = new DataInputStream(fstream);
BufferedReader reader = new BufferedReader(new InputStreamReader(in));
String line = reader.readLine();
assertTrue(line.equals("3 files and directories, 2 blocks = 5 total"));
line = reader.readLine();
assertTrue(line.equals("Live Datanodes: 1"));
line = reader.readLine();
assertTrue(line.equals("Dead Datanodes: 1"));
line = reader.readLine();
line = reader.readLine();
assertTrue(line.matches("^/filestatus[01]:.*"));
BufferedReader reader = null;
try {
reader = new BufferedReader(new InputStreamReader(in));
String line = reader.readLine();
assertTrue(line.equals(
"3 files and directories, 2 blocks = 5 total"));
line = reader.readLine();
assertTrue(line.equals("Live Datanodes: 1"));
line = reader.readLine();
assertTrue(line.equals("Dead Datanodes: 1"));
line = reader.readLine();
line = reader.readLine();
assertTrue(line.matches("^/filestatus[01]:.*"));
} finally {
if (reader != null)
reader.close();
}
}

/**
* Tests metasave after delete, to make sure there are no orphaned blocks
*/
@Test
public void testMetasaveAfterDelete()
throws IOException, InterruptedException {

final FSNamesystem namesystem = cluster.getNamesystem();

for (int i = 0; i < 2; i++) {
Path file = new Path("/filestatus" + i);
createFile(fileSys, file);
}

cluster.stopDataNode(1);
// wait for namenode to discover that a datanode is dead
Thread.sleep(15000);
namesystem.setReplication("/filestatus0", (short) 4);
namesystem.delete("/filestatus0", true);
namesystem.delete("/filestatus1", true);

namesystem.metaSave("metasaveAfterDelete.out.txt");

// Verification
String logFile = System.getProperty("hadoop.log.dir") + "/"
+ "metasaveAfterDelete.out.txt";
BufferedReader reader = null;
try {
FileInputStream fstream = new FileInputStream(logFile);
DataInputStream in = new DataInputStream(fstream);
reader = new BufferedReader(new InputStreamReader(in));
reader.readLine();
String line = reader.readLine();
assertTrue(line.equals("Live Datanodes: 1"));
line = reader.readLine();
assertTrue(line.equals("Dead Datanodes: 1"));
line = reader.readLine();
assertTrue(line.equals("Metasave: Blocks waiting for replication: 0"));
line = reader.readLine();
assertTrue(line.equals("Mis-replicated blocks that have been postponed:"));
line = reader.readLine();
assertTrue(line.equals("Metasave: Blocks being replicated: 0"));
} finally {
if (reader != null)
reader.close();
}
}

@AfterClass
Expand Down

0 comments on commit c17a04d

Please sign in to comment.