diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 1e349759c9bc7..dbcb5885fed44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -96,6 +96,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReentrantReadWriteLock; import javax.annotation.Nullable; import javax.management.ObjectName; @@ -376,6 +377,7 @@ public static InetSocketAddress createSocketAddr(String target) { SaslDataTransferClient saslClient; SaslDataTransferServer saslServer; private ObjectName dataNodeInfoBeanName; + private ReentrantReadWriteLock dataNodeInfoBeanLock; // Test verification only private volatile long lastDiskErrorCheck; private String supergroup; @@ -946,14 +948,19 @@ private synchronized void removeVolumes( } } - private synchronized void setClusterId(final String nsCid, final String bpid + private void setClusterId(final String nsCid, final String bpid ) throws IOException { - if(clusterId != null && !clusterId.equals(nsCid)) { - throw new IOException ("Cluster IDs not matched: dn cid=" + clusterId - + " but ns cid="+ nsCid + "; bpid=" + bpid); + dataNodeInfoBeanLock.writeLock().lock(); + try { + if(clusterId != null && !clusterId.equals(nsCid)) { + throw new IOException ("Cluster IDs not matched: dn cid=" + clusterId + + " but ns cid="+ nsCid + "; bpid=" + bpid); + } + // else + clusterId = nsCid; + } finally { + dataNodeInfoBeanLock.writeLock().unlock(); } - // else - clusterId = nsCid; } /** @@ -3278,8 +3285,13 @@ public String getVolumeInfo() { } @Override // DataNodeMXBean - public synchronized String getClusterId() { - return clusterId; + public String getClusterId() { + dataNodeInfoBeanLock.readLock().lock(); + try { + return clusterId; + } finally { + dataNodeInfoBeanLock.readLock().unlock(); + } } @Override // DataNodeMXBean diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index 602ac008fab4f..b89233900a896 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -519,6 +519,29 @@ public void testDataNodeMXBeanActiveThreadCount() throws Exception { } } + @Test(timeout=60000) + public void testClusterIdMetric() throws Exception { + Configuration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf). + numDataNodes(1).build(); + try { + List datanodes = cluster.getDataNodes(); + assertEquals(datanodes.size(), 1); + DataNode datanode = datanodes.get(0); + cluster.startDataNodes(conf, 1, true, + StartupOption.REGULAR, null); + List bpOfferServices = datanode.getAllBpOs(); + assertEquals(bpOfferServices.size(), 1); + BPOfferService bpOfferService = bpOfferServices.get(0); + assertEquals(bpOfferService.getNamespaceInfo().getClusterID(), + datanode.getClusterId()); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + @Test public void testDNShouldNotDeleteBlockONTooManyOpenFiles() throws Exception {