Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

HDFS-1117. Metrics 2.0 HDFS instrumentation. Contributed by Luke Lu.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@1103834 13f79535-47bb-0310-9956-ffa450edef68
  • Loading branch information...
commit 4979d5e4b73baf56f6fb142e901dfb263a180a88 1 parent 102ee5b
Suresh Srinivas authored
Showing with 553 additions and 768 deletions.
  1. +4 −1 CHANGES.txt
  2. +1 −1  build.xml
  3. +27 −0 conf/hadoop-metrics2.properties
  4. +9 −0 src/java/org/apache/hadoop/hdfs/DFSUtil.java
  5. +2 −2 src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  6. +2 −2 src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
  7. +15 −26 src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  8. +19 −21 src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  9. +5 −6 src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
  10. +0 −80 src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java
  11. +128 −101 src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
  12. +1 −1  src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
  13. +2 −2 src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  14. +3 −3 src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  15. +51 −42 src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  16. +32 −35 src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  17. +5 −2 src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  18. +2 −2 src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
  19. +0 −132 src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
  20. +0 −69 src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeActivityMBean.java
  21. +134 −116 src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
  22. +3 −3 src/java/org/apache/hadoop/hdfs/tools/JMXGet.java
  23. +3 −0  src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
  24. +2 −4 src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
  25. +4 −0 src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
  26. +6 −7 src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
  27. +2 −1  src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
  28. +5 −3 src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
  29. +11 −16 src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
  30. +3 −9 src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
  31. +2 −1  src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
  32. +5 −6 src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
  33. +48 −60 src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
  34. +17 −14 src/test/hdfs/org/apache/hadoop/tools/TestJMXGet.java
View
5 CHANGES.txt
@@ -286,7 +286,8 @@ Trunk (unreleased changes)
HDFS-1628. Display full path in AccessControlException. (John George
via szetszwo)
- HDFS-1707. Federation: Failure in browsing data on new namenodes. (jitendra)
+ HDFS-1707. Federation: Failure in browsing data on new namenodes.
+ (jitendra)
HDFS-1683. Test Balancer with multiple NameNodes. (szetszwo)
@@ -413,6 +414,8 @@ Trunk (unreleased changes)
HDFS-1899. GenericTestUtils.formatNamenode should be moved to DFSTestUtil
(Ted Yu via todd)
+ HDFS-1117. Metrics 2.0 HDFS instrumentation. (Luke Lu via suresh)
+
OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
View
2  build.xml
@@ -92,7 +92,7 @@
<property name="test.junit.fork.mode" value="perTest" />
<property name="test.junit.printsummary" value="yes" />
<property name="test.junit.haltonfailure" value="no" />
- <property name="test.junit.maxmemory" value="512m" />
+ <property name="test.junit.maxmemory" value="1024m" />
<property name="test.conf.dir" value="${build.dir}/test/conf" />
<property name="test.hdfs.build.classes" value="${test.build.dir}/hdfs/classes"/>
View
27 conf/hadoop-metrics2.properties
@@ -0,0 +1,27 @@
+# syntax: [prefix].[source|sink].[instance].[options]
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
+
+*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
+# default sampling period
+*.period=10
+
+# The namenode-metrics.out will contain metrics from all context
+#namenode.sink.file.filename=namenode-metrics.out
+# Specifying a special sampling period for namenode:
+#namenode.sink.*.period=8
+
+#datanode.sink.file.filename=datanode-metrics.out
+
+# the following example split metrics of different
+# context to different sinks (in this case files)
+#jobtracker.sink.file_jvm.context=jvm
+#jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out
+#jobtracker.sink.file_mapred.context=mapred
+#jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out
+
+#tasktracker.sink.file.filename=tasktracker-metrics.out
+
+#maptask.sink.file.filename=maptask-metrics.out
+
+#reducetask.sink.file.filename=reducetask-metrics.out
+
View
9 src/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -576,4 +576,13 @@ public static InetSocketAddress getSocketAddress(String address) {
return new InetSocketAddress(address.substring(0, colon),
Integer.parseInt(address.substring(colon + 1)));
}
+
+ /**
+ * Round bytes to GiB (gibibyte)
+ * @param bytes number of bytes
+ * @return number of GiB
+ */
+ public static int roundBytesToGB(long bytes) {
+ return Math.round((float)bytes/ 1024 / 1024 / 1024);
+ }
}
View
4 src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
@@ -440,13 +440,13 @@ private void verifyBlock(ExtendedBlock block) {
if (second) {
totalScanErrors++;
- datanode.getMetrics().blockVerificationFailures.inc();
+ datanode.getMetrics().incrBlockVerificationFailures();
handleScanFailure(block);
return;
}
} finally {
IOUtils.closeStream(blockSender);
- datanode.getMetrics().blocksVerified.inc();
+ datanode.getMetrics().incrBlocksVerified();
totalScans++;
}
}
View
4 src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -628,7 +628,7 @@ private int receivePacket(long offsetInBlock, long seqno,
offsetInBlock, lastChunkChecksum
);
- datanode.myMetrics.bytesWritten.inc(len);
+ datanode.metrics.incrBytesWritten(len);
}
} catch (IOException iex) {
datanode.checkDiskError(iex);
@@ -696,7 +696,7 @@ void receiveBlock(
// Finalize the block. Does this fsync()?
datanode.data.finalizeBlock(block);
}
- datanode.myMetrics.blocksWritten.inc();
+ datanode.metrics.incrBlocksWritten();
}
} catch (IOException ioe) {
View
41 src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -29,7 +29,6 @@
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
-import java.lang.management.ManagementFactory;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.Socket;
@@ -54,9 +53,6 @@
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -118,6 +114,8 @@
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
@@ -353,7 +351,7 @@ void refreshNamenodes(Configuration conf)
long heartBeatInterval;
private DataStorage storage = null;
private HttpServer infoServer = null;
- DataNodeMetrics myMetrics;
+ DataNodeMetrics metrics;
private InetSocketAddress selfAddr;
private static volatile DataNode datanodeObject = null;
@@ -925,7 +923,7 @@ DatanodeCommand blockReport() throws IOException {
cmd = bpNamenode.blockReport(bpRegistration, blockPoolId, bReport
.getBlockListAsLongs());
long brTime = now() - brStartTime;
- myMetrics.blockReports.inc(brTime);
+ metrics.addBlockReport(brTime);
LOG.info("BlockReport of " + bReport.getNumberOfBlocks() +
" blocks got processed in " + brTime + " msecs");
//
@@ -1036,7 +1034,7 @@ private void offerService() throws Exception {
//
lastHeartbeat = startTime;
DatanodeCommand[] cmds = sendHeartBeat();
- myMetrics.heartbeats.inc(now() - startTime);
+ metrics.addHeartbeat(now() - startTime);
if (!processCommand(cmds))
continue;
}
@@ -1258,7 +1256,7 @@ private boolean processCommand(DatanodeCommand cmd) throws IOException {
case DatanodeProtocol.DNA_TRANSFER:
// Send a copy of a block to another datanode
transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(), bcmd.getTargets());
- myMetrics.blocksReplicated.inc(bcmd.getBlocks().length);
+ metrics.incrBlocksReplicated(bcmd.getBlocks().length);
break;
case DatanodeProtocol.DNA_INVALIDATE:
//
@@ -1276,7 +1274,7 @@ private boolean processCommand(DatanodeCommand cmd) throws IOException {
checkDiskError();
throw e;
}
- myMetrics.blocksRemoved.inc(toDelete.length);
+ metrics.incrBlocksRemoved(toDelete.length);
break;
case DatanodeProtocol.DNA_SHUTDOWN:
// shut down the data node
@@ -1377,7 +1375,7 @@ void startDataNode(Configuration conf,
this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
initIpcServer(conf);
- myMetrics = new DataNodeMetrics(conf, getMachineName());
+ metrics = DataNodeMetrics.create(conf, getMachineName());
blockPoolManager = new BlockPoolManager(conf);
}
@@ -1427,17 +1425,7 @@ public static InetSocketAddress getInfoAddr(Configuration conf) {
}
private void registerMXBean() {
- // register MXBean
- MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
- try {
- ObjectName mxbeanName = new ObjectName("HadoopInfo:type=DataNodeInfo");
- mbs.registerMBean(this, mxbeanName);
- } catch ( javax.management.InstanceAlreadyExistsException iaee ) {
- // in unit tests, we may have multiple datanodes in the same JVM
- LOG.info("DataNode MXBean already registered");
- } catch ( javax.management.JMException e ) {
- LOG.warn("Failed to register DataNode MXBean", e);
- }
+ MBeans.register("DataNode", "DataNodeInfo", this);
}
int getPort() {
@@ -1551,7 +1539,7 @@ public InetSocketAddress getSelfAddr() {
}
DataNodeMetrics getMetrics() {
- return myMetrics;
+ return metrics;
}
public static void setNewStorageID(DatanodeID dnId) {
@@ -1668,8 +1656,8 @@ public void shutdown() {
if (data != null) {
data.shutdown();
}
- if (myMetrics != null) {
- myMetrics.shutdown();
+ if (metrics != null) {
+ metrics.shutdown();
}
}
@@ -1709,7 +1697,7 @@ private void handleDiskError(String errMsgr) {
// shutdown the DN completely.
int dpError = hasEnoughResources ? DatanodeProtocol.DISK_ERROR
: DatanodeProtocol.FATAL_DISK_ERROR;
- myMetrics.volumeFailures.inc(1);
+ metrics.incrVolumeFailures();
//inform NameNodes
for(BPOfferService bpos: blockPoolManager.getAllNamenodeThreads()) {
@@ -2003,7 +1991,7 @@ public void run() {
* @param delHint
*/
void closeBlock(ExtendedBlock block, String delHint) {
- myMetrics.blocksWritten.inc();
+ metrics.incrBlocksWritten();
BPOfferService bpos = blockPoolManager.get(block.getBlockPoolId());
if(bpos != null) {
bpos.notifyNamenodeReceivedBlock(block, delHint);
@@ -2138,6 +2126,7 @@ static DataNode makeInstance(Collection<URI> dataDirs, Configuration conf,
conf.get(DFS_DATANODE_DATA_DIR_PERMISSION_KEY,
DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
ArrayList<File> dirs = getDataDirsFromURIs(dataDirs, localFS, permission);
+ DefaultMetricsSystem.initialize("DataNode");
assert dirs.size() > 0 : "number of data directories should be > 0";
return new DataNode(conf, dirs, resources);
View
40 src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -48,8 +48,8 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
@@ -183,11 +183,11 @@ protected void opReadBlock(DataInputStream in, ExtendedBlock block,
SUCCESS.write(out); // send op status
long read = blockSender.sendBlock(out, baseStream, null); // send data
- datanode.myMetrics.bytesRead.inc((int) read);
- datanode.myMetrics.blocksRead.inc();
+ datanode.metrics.incrBytesRead((int) read);
+ datanode.metrics.incrBlocksRead();
} catch ( SocketException ignored ) {
// Its ok for remote side to close the connection anytime.
- datanode.myMetrics.blocksRead.inc();
+ datanode.metrics.incrBlocksRead();
} catch ( IOException ioe ) {
/* What exactly should we do here?
* Earlier version shutdown() datanode if there is disk error.
@@ -203,9 +203,8 @@ protected void opReadBlock(DataInputStream in, ExtendedBlock block,
}
//update metrics
- updateDuration(datanode.myMetrics.readBlockOp);
- updateCounter(datanode.myMetrics.readsFromLocalClient,
- datanode.myMetrics.readsFromRemoteClient);
+ datanode.metrics.addReadBlockOp(elapsed());
+ datanode.metrics.incrReadsFromClient(isLocal);
}
/**
@@ -409,9 +408,8 @@ protected void opWriteBlock(final DataInputStream in, final ExtendedBlock block,
}
//update metrics
- updateDuration(datanode.myMetrics.writeBlockOp);
- updateCounter(datanode.myMetrics.writesFromLocalClient,
- datanode.myMetrics.writesFromRemoteClient);
+ datanode.metrics.addWriteBlockOp(elapsed());
+ datanode.metrics.incrWritesFromClient(isLocal);
}
@Override
@@ -482,7 +480,7 @@ protected void opBlockChecksum(DataInputStream in, ExtendedBlock block,
}
//update metrics
- updateDuration(datanode.myMetrics.blockChecksumOp);
+ datanode.metrics.addBlockChecksumOp(elapsed());
}
/**
@@ -535,8 +533,8 @@ protected void opCopyBlock(DataInputStream in, ExtendedBlock block,
long read = blockSender.sendBlock(reply, baseStream,
dataXceiverServer.balanceThrottler);
- datanode.myMetrics.bytesRead.inc((int) read);
- datanode.myMetrics.blocksRead.inc();
+ datanode.metrics.incrBytesRead((int) read);
+ datanode.metrics.incrBlocksRead();
LOG.info("Copied block " + block + " to " + s.getRemoteSocketAddress());
} catch (IOException ioe) {
@@ -556,7 +554,7 @@ protected void opCopyBlock(DataInputStream in, ExtendedBlock block,
}
//update metrics
- updateDuration(datanode.myMetrics.copyBlockOp);
+ datanode.metrics.addCopyBlockOp(elapsed());
}
/**
@@ -670,16 +668,16 @@ protected void opReplaceBlock(DataInputStream in,
}
//update metrics
- updateDuration(datanode.myMetrics.replaceBlockOp);
+ datanode.metrics.addReplaceBlockOp(elapsed());
}
- private void updateDuration(MetricsTimeVaryingRate mtvr) {
- mtvr.inc(now() - opStartTime);
+ private long elapsed() {
+ return now() - opStartTime;
}
- private void updateCounter(MetricsTimeVaryingInt localCounter,
- MetricsTimeVaryingInt remoteCounter) {
- (isLocal? localCounter: remoteCounter).inc();
+ private void updateCounter(MutableCounterLong localCounter,
+ MutableCounterLong remoteCounter) {
+ (isLocal? localCounter: remoteCounter).incr();
}
/**
View
11 src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
@@ -59,7 +59,7 @@
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
-import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.StringUtils;
@@ -2186,18 +2186,17 @@ void registerMBean(final String storageId) {
}
try {
bean = new StandardMBean(this,FSDatasetMBean.class);
- mbeanName = MBeanUtil.registerMBean("DataNode", "FSDatasetState-" + storageName, bean);
+ mbeanName = MBeans.register("DataNode", "FSDatasetState-" + storageName, bean);
} catch (NotCompliantMBeanException e) {
- e.printStackTrace();
+ DataNode.LOG.warn("Error registering FSDatasetState MBean", e);
}
-
- DataNode.LOG.info("Registered FSDatasetStatusMBean");
+ DataNode.LOG.info("Registered FSDatasetState MBean");
}
@Override // FSDatasetInterface
public void shutdown() {
if (mbeanName != null)
- MBeanUtil.unregisterMBean(mbeanName);
+ MBeans.unregister(mbeanName);
if (asyncDiskService != null) {
asyncDiskService.shutdown();
View
80 src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.datanode.metrics;
-import java.util.Random;
-
-import javax.management.ObjectName;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.metrics.util.MBeanUtil;
-import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-
-/**
- *
- * This is the JMX MBean for reporting the DataNode Activity.
- * The MBean is register using the name
- * "hadoop:service=DataNode,name=DataNodeActivity-<hostname>-<portNumber>"
- *
- * Many of the activity metrics are sampled and averaged on an interval
- * which can be specified in the metrics config file.
- * <p>
- * For the metrics that are sampled and averaged, one must specify
- * a metrics context that does periodic update calls. Most metrics contexts do.
- * The default Null metrics context however does NOT. So if you aren't
- * using any other metrics context then you can turn on the viewing and averaging
- * of sampled metrics by specifying the following two lines
- * in the hadoop-meterics.properties file:
-* <pre>
- * dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
- * dfs.period=10
- * </pre>
- *<p>
- * Note that the metrics are collected regardless of the context used.
- * The context with the update thread is used to average the data periodically
- *
- *
- *
- * Impl details: We use a dynamic mbean that gets the list of the metrics
- * from the metrics registry passed as an argument to the constructor
- */
-
-@InterfaceAudience.Private
-public class DataNodeActivityMBean extends MetricsDynamicMBeanBase {
- final private ObjectName mbeanName;
- private Random rand = new Random();
-
- public DataNodeActivityMBean(final MetricsRegistry mr,
- final String datanodeName) {
- super(mr, "Activity statistics at the DataNode");
- String name;
- if (datanodeName.equals("")) {// Temp fix for the uninitialized name
- name = "UndefinedDataNodeName" + rand.nextInt();
- } else {
- name = datanodeName.replace(":", "-");
- }
- mbeanName = MBeanUtil.registerMBean("DataNode",
- "DataNodeActivity-" + name, this);
- }
-
-
- public void shutdown() {
- if (mbeanName != null)
- MBeanUtil.unregisterMBean(mbeanName);
- }
-}
View
229 src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -17,23 +17,22 @@
*/
package org.apache.hadoop.hdfs.server.datanode.metrics;
+import java.util.Random;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.Updater;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
-import org.apache.hadoop.metrics.util.MetricsBase;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
+import static org.apache.hadoop.metrics2.impl.MsInfo.*;
/**
- *
+ *
* This class is for maintaining the various DataNode statistics
* and publishing them through the metrics interfaces.
* This also registers the JMX MBean for RPC.
@@ -45,97 +44,125 @@
*
*/
@InterfaceAudience.Private
-public class DataNodeMetrics implements Updater {
- private final MetricsRecord metricsRecord;
- private DataNodeActivityMBean datanodeActivityMBean;
- public MetricsRegistry registry = new MetricsRegistry();
-
-
- public MetricsTimeVaryingLong bytesWritten =
- new MetricsTimeVaryingLong("bytes_written", registry);
- public MetricsTimeVaryingLong bytesRead =
- new MetricsTimeVaryingLong("bytes_read", registry);
- public MetricsTimeVaryingInt blocksWritten =
- new MetricsTimeVaryingInt("blocks_written", registry);
- public MetricsTimeVaryingInt blocksRead =
- new MetricsTimeVaryingInt("blocks_read", registry);
- public MetricsTimeVaryingInt blocksReplicated =
- new MetricsTimeVaryingInt("blocks_replicated", registry);
- public MetricsTimeVaryingInt blocksRemoved =
- new MetricsTimeVaryingInt("blocks_removed", registry);
- public MetricsTimeVaryingInt blocksVerified =
- new MetricsTimeVaryingInt("blocks_verified", registry);
- public MetricsTimeVaryingInt blockVerificationFailures =
- new MetricsTimeVaryingInt("block_verification_failures", registry);
-
- public MetricsTimeVaryingInt readsFromLocalClient =
- new MetricsTimeVaryingInt("reads_from_local_client", registry);
- public MetricsTimeVaryingInt readsFromRemoteClient =
- new MetricsTimeVaryingInt("reads_from_remote_client", registry);
- public MetricsTimeVaryingInt writesFromLocalClient =
- new MetricsTimeVaryingInt("writes_from_local_client", registry);
- public MetricsTimeVaryingInt writesFromRemoteClient =
- new MetricsTimeVaryingInt("writes_from_remote_client", registry);
-
- public MetricsTimeVaryingInt volumeFailures =
- new MetricsTimeVaryingInt("volumeFailures", registry);
+@Metrics(about="DataNode metrics", context="dfs")
+public class DataNodeMetrics {
+
+ @Metric MutableCounterLong bytesWritten;
+ @Metric MutableCounterLong bytesRead;
+ @Metric MutableCounterLong blocksWritten;
+ @Metric MutableCounterLong blocksRead;
+ @Metric MutableCounterLong blocksReplicated;
+ @Metric MutableCounterLong blocksRemoved;
+ @Metric MutableCounterLong blocksVerified;
+ @Metric MutableCounterLong blockVerificationFailures;
+ @Metric MutableCounterLong readsFromLocalClient;
+ @Metric MutableCounterLong readsFromRemoteClient;
+ @Metric MutableCounterLong writesFromLocalClient;
+ @Metric MutableCounterLong writesFromRemoteClient;
- public MetricsTimeVaryingRate readBlockOp =
- new MetricsTimeVaryingRate("readBlockOp", registry);
- public MetricsTimeVaryingRate writeBlockOp =
- new MetricsTimeVaryingRate("writeBlockOp", registry);
- public MetricsTimeVaryingRate blockChecksumOp =
- new MetricsTimeVaryingRate("blockChecksumOp", registry);
- public MetricsTimeVaryingRate copyBlockOp =
- new MetricsTimeVaryingRate("copyBlockOp", registry);
- public MetricsTimeVaryingRate replaceBlockOp =
- new MetricsTimeVaryingRate("replaceBlockOp", registry);
- public MetricsTimeVaryingRate heartbeats =
- new MetricsTimeVaryingRate("heartBeats", registry);
- public MetricsTimeVaryingRate blockReports =
- new MetricsTimeVaryingRate("blockReports", registry);
-
-
- public DataNodeMetrics(Configuration conf, String datanodeName) {
- String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
- // Initiate reporting of Java VM metrics
- JvmMetrics.init("DataNode", sessionId);
-
-
- // Now the MBean for the data node
- datanodeActivityMBean = new DataNodeActivityMBean(registry, datanodeName);
-
- // Create record for DataNode metrics
- MetricsContext context = MetricsUtil.getContext("dfs");
- metricsRecord = MetricsUtil.createRecord(context, "datanode");
- metricsRecord.setTag("sessionId", sessionId);
- context.registerUpdater(this);
+ @Metric MutableCounterLong volumeFailures;
+
+ @Metric MutableRate readBlockOp;
+ @Metric MutableRate writeBlockOp;
+ @Metric MutableRate blockChecksumOp;
+ @Metric MutableRate copyBlockOp;
+ @Metric MutableRate replaceBlockOp;
+ @Metric MutableRate heartbeats;
+ @Metric MutableRate blockReports;
+
+ final MetricsRegistry registry = new MetricsRegistry("datanode");
+ final String name;
+ static final Random rng = new Random();
+
+ public DataNodeMetrics(String name, String sessionId) {
+ this.name = name;
+ registry.tag(SessionId, sessionId);
}
-
+
+ public static DataNodeMetrics create(Configuration conf, String dnName) {
+ String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+ MetricsSystem ms = DefaultMetricsSystem.instance();
+ JvmMetrics.create("DataNode", sessionId, ms);
+ String name = "DataNodeActivity-"+ (dnName.isEmpty()
+ ? "UndefinedDataNodeName"+ rng.nextInt() : dnName.replace(':', '-'));
+ return ms.register(name, null, new DataNodeMetrics(name, sessionId));
+ }
+
+ public String name() { return name; }
+
+ public void addHeartbeat(long latency) {
+ heartbeats.add(latency);
+ }
+
+ public void addBlockReport(long latency) {
+ blockReports.add(latency);
+ }
+
+ public void incrBlocksReplicated(int delta) {
+ blocksReplicated.incr(delta);
+ }
+
+ public void incrBlocksWritten() {
+ blocksWritten.incr();
+ }
+
+ public void incrBlocksRemoved(int delta) {
+ blocksRemoved.incr(delta);
+ }
+
+ public void incrBytesWritten(int delta) {
+ bytesWritten.incr(delta);
+ }
+
+ public void incrBlockVerificationFailures() {
+ blockVerificationFailures.incr();
+ }
+
+ public void incrBlocksVerified() {
+ blocksVerified.incr();
+ }
+
+ public void addReadBlockOp(long latency) {
+ readBlockOp.add(latency);
+ }
+
+ public void addWriteBlockOp(long latency) {
+ writeBlockOp.add(latency);
+ }
+
+ public void addReplaceBlockOp(long latency) {
+ replaceBlockOp.add(latency);
+ }
+
+ public void addCopyBlockOp(long latency) {
+ copyBlockOp.add(latency);
+ }
+
+ public void addBlockChecksumOp(long latency) {
+ blockChecksumOp.add(latency);
+ }
+
+ public void incrBytesRead(int delta) {
+ bytesRead.incr(delta);
+ }
+
+ public void incrBlocksRead() {
+ blocksRead.incr();
+ }
+
public void shutdown() {
- if (datanodeActivityMBean != null)
- datanodeActivityMBean.shutdown();
- }
-
- /**
- * Since this object is a registered updater, this method will be called
- * periodically, e.g. every 5 seconds.
- */
- public void doUpdates(MetricsContext unused) {
- synchronized (this) {
- for (MetricsBase m : registry.getMetricsList()) {
- m.pushMetric(metricsRecord);
- }
- }
- metricsRecord.update();
- }
- public void resetAllMinMax() {
- readBlockOp.resetMinMax();
- writeBlockOp.resetMinMax();
- blockChecksumOp.resetMinMax();
- copyBlockOp.resetMinMax();
- replaceBlockOp.resetMinMax();
- heartbeats.resetMinMax();
- blockReports.resetMinMax();
+ DefaultMetricsSystem.shutdown();
+ }
+
+ public void incrWritesFromClient(boolean local) {
+ (local ? writesFromLocalClient : writesFromRemoteClient).incr();
+ }
+
+ public void incrReadsFromClient(boolean local) {
+ (local ? readsFromLocalClient : readsFromRemoteClient).incr();
+ }
+
+ public void incrVolumeFailures() {
+ volumeFailures.incr();
}
}
View
2  src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
@@ -33,7 +33,7 @@
*
* <p>
* Data Node runtime statistic info is report in another MBean
- * @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean
+ * @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics
*
*/
@InterfaceAudience.Private
View
4 src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -185,7 +185,7 @@ protected void setReady(boolean flag) {
private void incrDeletedFileCount(int count) {
if (getFSNamesystem() != null)
- NameNode.getNameNodeMetrics().numFilesDeleted.inc(count);
+ NameNode.getNameNodeMetrics().incrFilesDeleted(count);
}
/**
@@ -1484,7 +1484,7 @@ boolean mkdirs(String src, PermissionStatus permissions,
// Directory creation also count towards FilesCreated
// to match count of FilesDeleted metric.
if (getFSNamesystem() != null)
- NameNode.getNameNodeMetrics().numFilesCreated.inc();
+ NameNode.getNameNodeMetrics().incrFilesCreated();
fsImage.getEditLog().logMkDir(cur, inodes[i]);
if(NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug(
View
6 src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -400,7 +400,7 @@ private void recordTransaction(long start) {
numTransactions++;
totalTimeTransactions += (end-start);
if (metrics != null) // Metrics is non-null only when used inside name node
- metrics.transactions.inc((end-start));
+ metrics.addTransaction(end-start);
}
/**
@@ -476,7 +476,7 @@ public void logSync() {
if (mytxid <= synctxid) {
numTransactionsBatchedInSync++;
if (metrics != null) // Metrics is non-null only when used inside name node
- metrics.transactionsBatchedInSync.inc();
+ metrics.incrTransactionsBatchedInSync();
return;
}
@@ -528,7 +528,7 @@ public void logSync() {
disableAndReportErrorOnStreams(errorStreams);
if (metrics != null) // Metrics non-null only when used inside name node
- metrics.syncs.inc(elapsed);
+ metrics.addSync(elapsed);
} finally {
// Prevent RuntimeException from blocking other log edit sync
synchronized (this) {
View
93 src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -35,7 +35,6 @@
import org.apache.hadoop.hdfs.server.common.Util;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
-import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
@@ -45,7 +44,6 @@
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.util.*;
-import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.net.CachedDNSToSwitchMapping;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetworkTopology;
@@ -85,6 +83,11 @@
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MutableCounterInt;
+import org.apache.hadoop.metrics2.util.MBeans;
import org.mortbay.util.ajax.JSON;
import java.io.BufferedWriter;
@@ -103,11 +106,9 @@
import java.util.concurrent.TimeUnit;
import java.util.Map.Entry;
import java.util.concurrent.locks.ReentrantReadWriteLock;
-
import javax.management.NotCompliantMBeanException;
import javax.management.ObjectName;
import javax.management.StandardMBean;
-import javax.management.MBeanServer;
/***************************************************
* FSNamesystem does the actual bookkeeping work for the
@@ -122,8 +123,9 @@
* 5) LRU cache of updated-heartbeat machines
***************************************************/
@InterfaceAudience.Private
-public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterStats,
- NameNodeMXBean {
+@Metrics(context="dfs")
+public class FSNamesystem implements FSConstants, FSNamesystemMBean,
+ FSClusterStats, NameNodeMXBean {
public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
private static final ThreadLocal<StringBuilder> auditBuffer =
@@ -177,7 +179,7 @@ private static final void logAuditEvent(UserGroupInformation ugi,
private String supergroup;
private PermissionStatus defaultPermission;
// FSNamesystemMetrics counter variables
- private FSNamesystemMetrics myFSMetrics;
+ @Metric private MutableCounterInt expiredHeartbeats;
private long capacityTotal = 0L, capacityUsed = 0L, capacityRemaining = 0L;
private long blockPoolUsed = 0L;
private int totalLoad = 0;
@@ -325,7 +327,7 @@ private void initialize(Configuration conf, FSImage fsImage)
this.fsLock = new ReentrantReadWriteLock(true); // fair locking
setConfigurationParameters(conf);
dtSecretManager = createDelegationTokenSecretManager(conf);
- this.registerMBean(conf); // register the MBean for the FSNamesystemStutus
+ this.registerMBean(); // register the MBean for the FSNamesystemState
if(fsImage == null) {
this.dir = new FSDirectory(this, conf);
StartupOption startOpt = NameNode.getStartupOption(conf);
@@ -333,7 +335,7 @@ private void initialize(Configuration conf, FSImage fsImage)
getNamespaceEditsDirs(conf), startOpt);
long timeTakenToLoadFSImage = now() - systemStart;
LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
- NameNode.getNameNodeMetrics().fsImageLoadTime.set(
+ NameNode.getNameNodeMetrics().setFsImageLoadTime(
(int) timeTakenToLoadFSImage);
} else {
this.dir = new FSDirectory(fsImage, this, conf);
@@ -391,6 +393,7 @@ void activate(Configuration conf) throws IOException {
dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts()));
}
registerMXBean();
+ DefaultMetricsSystem.instance().register(this);
}
public static Collection<URI> getNamespaceDirs(Configuration conf) {
@@ -3185,7 +3188,7 @@ void heartbeatCheck() {
it.hasNext();) {
DatanodeDescriptor nodeInfo = it.next();
if (isDatanodeDead(nodeInfo)) {
- myFSMetrics.numExpiredHeartbeats.inc();
+ expiredHeartbeats.incr();
foundDead = true;
nodeID = nodeInfo;
break;
@@ -3252,7 +3255,7 @@ public void processReport(DatanodeID nodeID, String poolId,
}
// Log the block report processing stats from Namenode perspective
- NameNode.getNameNodeMetrics().blockReport.inc((int) (endTime - startTime));
+ NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime));
NameNode.stateChangeLog.info("BLOCK* NameSystem.processReport: from "
+ nodeID.getName() + ", blocks: " + newReport.getNumberOfBlocks()
+ ", processing time: " + (endTime - startTime) + " msecs");
@@ -3396,6 +3399,7 @@ private void checkBlock(ExtendedBlock block) throws IOException {
}
}
+ @Metric({"MissingBlocks", "Number of missing blocks"})
public long getMissingBlocksCount() {
// not locking
return blockManager.getMissingBlocksCount();
@@ -3422,6 +3426,11 @@ public long getCapacityTotal() {
}
}
+ @Metric
+ public float getCapacityTotalGB() {
+ return DFSUtil.roundBytesToGB(getCapacityTotal());
+ }
+
/**
* Total used space by data nodes
*/
@@ -3431,6 +3440,12 @@ public long getCapacityUsed() {
return capacityUsed;
}
}
+
+ @Metric
+ public float getCapacityUsedGB() {
+ return DFSUtil.roundBytesToGB(getCapacityUsed());
+ }
+
/**
* Total used space by data nodes as percentage of total capacity
*/
@@ -3459,6 +3474,11 @@ public long getCapacityRemaining() {
}
}
+ @Metric
+ public float getCapacityRemainingGB() {
+ return DFSUtil.roundBytesToGB(getCapacityRemaining());
+ }
+
/**
* Total remaining space by data nodes as percentage of total capacity
*/
@@ -3471,6 +3491,7 @@ public float getCapacityRemainingPercent() {
* Total number of connections.
*/
@Override // FSNamesystemMBean
+ @Metric
public int getTotalLoad() {
synchronized (heartbeats) {
return this.totalLoad;
@@ -4038,7 +4059,7 @@ synchronized void leave(boolean checkForUpgrades) {
long timeInSafemode = now() - systemStart;
NameNode.stateChangeLog.info("STATE* Leaving safe mode after "
+ timeInSafemode/1000 + " secs.");
- NameNode.getNameNodeMetrics().safeModeTime.set((int) timeInSafemode);
+ NameNode.getNameNodeMetrics().setSafeModeTime((int) timeInSafemode);
if (reached >= 0) {
NameNode.stateChangeLog.info("STATE* Safe mode is OFF.");
@@ -4408,6 +4429,7 @@ void setBlockTotal() {
* Get the total number of blocks in the system.
*/
@Override // FSNamesystemMBean
+ @Metric
public long getBlocksTotal() {
return blockManager.getTotalBlocks();
}
@@ -4682,16 +4704,19 @@ long getMaxObjects() {
}
@Override // FSNamesystemMBean
+ @Metric
public long getFilesTotal() {
return this.dir.totalInodes();
}
@Override // FSNamesystemMBean
+ @Metric
public long getPendingReplicationBlocks() {
return blockManager.pendingReplicationBlocksCount;
}
@Override // FSNamesystemMBean
+ @Metric
public long getUnderReplicatedBlocks() {
return blockManager.underReplicatedBlocksCount;
}
@@ -4702,23 +4727,28 @@ public long getUnderReplicatedNotMissingBlocks() {
}
/** Returns number of blocks with corrupt replicas */
+ @Metric({"CorruptBlocks", "Number of blocks with corrupt replicas"})
public long getCorruptReplicaBlocks() {
return blockManager.corruptReplicaBlocksCount;
}
@Override // FSNamesystemMBean
+ @Metric
public long getScheduledReplicationBlocks() {
return blockManager.scheduledReplicationBlocksCount;
}
+ @Metric
public long getPendingDeletionBlocks() {
return blockManager.pendingDeletionBlocksCount;
}
+ @Metric
public long getExcessBlocks() {
return blockManager.excessBlocksCount;
}
+ @Metric
public int getBlockCapacity() {
return blockManager.getCapacity();
}
@@ -4733,28 +4763,16 @@ public String getFSState() {
* Register the FSNamesystem MBean using the name
* "hadoop:service=NameNode,name=FSNamesystemState"
*/
- void registerMBean(Configuration conf) {
- // We wrap to bypass standard mbean naming convention.
- // This wraping can be removed in java 6 as it is more flexible in
- // package naming for mbeans and their impl.
- StandardMBean bean;
+ void registerMBean() {
+ // We can only implement one MXBean interface, so we keep the old one.
try {
- myFSMetrics = new FSNamesystemMetrics(this, conf);
- bean = new StandardMBean(this,FSNamesystemMBean.class);
- mbeanName = MBeanUtil.registerMBean("NameNode", "FSNamesystemState", bean);
+ StandardMBean bean = new StandardMBean(this, FSNamesystemMBean.class);
+ mbeanName = MBeans.register("NameNode", "FSNamesystemState", bean);
} catch (NotCompliantMBeanException e) {
- LOG.warn("Exception in initializing StandardMBean as FSNamesystemMBean",
- e);
+ throw new RuntimeException("Bad MBean setup", e);
}
- LOG.info("Registered FSNamesystemStatusMBean");
- }
-
- /**
- * get FSNamesystemMetrics
- */
- public FSNamesystemMetrics getFSNamesystemMetrics() {
- return myFSMetrics;
+ LOG.info("Registered FSNamesystemState MBean");
}
/**
@@ -4762,7 +4780,7 @@ public FSNamesystemMetrics getFSNamesystemMetrics() {
*/
public void shutdown() {
if (mbeanName != null)
- MBeanUtil.unregisterMBean(mbeanName);
+ MBeans.unregister(mbeanName);
}
@@ -5416,17 +5434,7 @@ void logFsckEvent(String src, InetAddress remoteAddress) throws IOException {
* Register NameNodeMXBean
*/
private void registerMXBean() {
- // register MXBean
- MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
- try {
- ObjectName mxbeanName = new ObjectName("HadoopInfo:type=NameNodeInfo");
- mbs.registerMBean(this, mxbeanName);
- } catch ( javax.management.InstanceAlreadyExistsException iaee ) {
- // in unit tests, we may run and restart the NN within the same JVM
- LOG.info("NameNode MXBean already registered");
- } catch ( javax.management.JMException e ) {
- LOG.warn("Failed to register NameNodeMXBean", e);
- }
+ MBeans.register("NameNode", "NameNodeInfo", this);
}
/**
@@ -5499,6 +5507,7 @@ public long getTotalBlocks() {
}
@Override // NameNodeMXBean
+ @Metric
public long getTotalFiles() {
return getFilesTotal();
}
View
67 src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -86,6 +86,7 @@
import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
@@ -213,7 +214,7 @@ public static void format(Configuration conf) throws IOException {
format(conf, false);
}
- static NameNodeMetrics myMetrics;
+ static NameNodeMetrics metrics;
/** Return the {@link FSNamesystem} object.
* @return {@link FSNamesystem} object.
@@ -223,11 +224,11 @@ FSNamesystem getNamesystem() {
}
static void initMetrics(Configuration conf, NamenodeRole role) {
- myMetrics = new NameNodeMetrics(conf, role);
+ metrics = NameNodeMetrics.create(conf, role);
}
public static NameNodeMetrics getNameNodeMetrics() {
- return myMetrics;
+ return metrics;
}
public static InetSocketAddress getAddress(String address) {
@@ -639,8 +640,8 @@ public void stop() {
if(emptier != null) emptier.interrupt();
if(server != null) server.stop();
if(serviceRpcServer != null) serviceRpcServer.stop();
- if (myMetrics != null) {
- myMetrics.shutdown();
+ if (metrics != null) {
+ metrics.shutdown();
}
if (namesystem != null) {
namesystem.shutdown();
@@ -750,7 +751,7 @@ public LocatedBlocks getBlockLocations(String src,
long offset,
long length)
throws IOException {
- myMetrics.numGetBlockLocations.inc();
+ metrics.incrGetBlockLocations();
return namesystem.getBlockLocations(getClientMachine(),
src, offset, length);
}
@@ -789,8 +790,8 @@ public void create(String src,
new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(),
null, masked),
clientName, clientMachine, flag.get(), createParent, replication, blockSize);
- myMetrics.numFilesCreated.inc();
- myMetrics.numCreateFileOps.inc();
+ metrics.incrFilesCreated();
+ metrics.incrCreateFileOps();
}
/** {@inheritDoc} */
@@ -802,7 +803,7 @@ public LocatedBlock append(String src, String clientName)
+src+" for "+clientName+" at "+clientMachine);
}
LocatedBlock info = namesystem.appendFile(src, clientName, clientMachine);
- myMetrics.numFilesAppended.inc();
+ metrics.incrFilesAppended();
return info;
}
@@ -844,7 +845,7 @@ public LocatedBlock addBlock(String src,
LocatedBlock locatedBlock =
namesystem.getAdditionalBlock(src, clientName, previous, excludedNodesSet);
if (locatedBlock != null)
- myMetrics.numAddBlockOps.inc();
+ metrics.incrAddBlockOps();
return locatedBlock;
}
@@ -862,7 +863,7 @@ public LocatedBlock getAdditionalDatanode(final String src, final ExtendedBlock
+ ", clientName=" + clientName);
}
- myMetrics.numGetAdditionalDatanodeOps.inc();
+ metrics.incrGetAdditionalDatanodeOps();
HashMap<Node, Node> excludeSet = null;
if (excludes != null) {
@@ -959,7 +960,7 @@ public boolean rename(String src, String dst) throws IOException {
}
boolean ret = namesystem.renameTo(src, dst);
if (ret) {
- myMetrics.numFilesRenamed.inc();
+ metrics.incrFilesRenamed();
}
return ret;
}
@@ -983,7 +984,7 @@ public void rename(String src, String dst, Options.Rename... options)
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.renameTo(src, dst, options);
- myMetrics.numFilesRenamed.inc();
+ metrics.incrFilesRenamed();
}
/**
@@ -1001,7 +1002,7 @@ public boolean delete(String src, boolean recursive) throws IOException {
}
boolean ret = namesystem.delete(src, recursive);
if (ret)
- myMetrics.numDeleteFileOps.inc();
+ metrics.incrDeleteFileOps();
return ret;
}
@@ -1047,8 +1048,8 @@ public DirectoryListing getListing(String src, byte[] startAfter,
DirectoryListing files = namesystem.getListing(
src, startAfter, needLocation);
if (files != null) {
- myMetrics.numGetListingOps.inc();
- myMetrics.numFilesInGetListingOps.inc(files.getPartialListing().length);
+ metrics.incrGetListingOps();
+ metrics.incrFilesInGetListingOps(files.getPartialListing().length);
}
return files;
}
@@ -1060,7 +1061,7 @@ public DirectoryListing getListing(String src, byte[] startAfter,
* or null if file not found
*/
public HdfsFileStatus getFileInfo(String src) throws IOException {
- myMetrics.numFileInfoOps.inc();
+ metrics.incrFileInfoOps();
return namesystem.getFileInfo(src, true);
}
@@ -1072,11 +1073,11 @@ public HdfsFileStatus getFileInfo(String src) throws IOException {
* or null if file not found
*/
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
- myMetrics.numFileInfoOps.inc();
+ metrics.incrFileInfoOps();
return namesystem.getFileInfo(src, false);
}
- /** @inheritDoc */
+ @Override
public long[] getStats() {
return namesystem.getStats();
}
@@ -1092,9 +1093,7 @@ public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
return results;
}
- /**
- * @inheritDoc
- */
+ @Override
public boolean setSafeMode(SafeModeAction action) throws IOException {
return namesystem.setSafeMode(action);
}
@@ -1106,18 +1105,13 @@ public boolean isInSafeMode() {
return namesystem.isInSafeMode();
}
- /**
- * @throws AccessControlException
- * @inheritDoc
- */
+ @Override
public boolean restoreFailedStorage(String arg)
throws AccessControlException {
return namesystem.restoreFailedStorage(arg);
}
- /**
- * @inheritDoc
- */
+ @Override
public void saveNamespace() throws IOException {
namesystem.saveNamespace();
}
@@ -1207,17 +1201,17 @@ public void fsync(String src, String clientName) throws IOException {
namesystem.fsync(src, clientName);
}
- /** @inheritDoc */
+ @Override
public void setTimes(String src, long mtime, long atime)
throws IOException {
namesystem.setTimes(src, mtime, atime);
}
- /** @inheritDoc */
+ @Override
public void createSymlink(String target, String link, FsPermission dirPerms,
boolean createParent)
throws IOException {
- myMetrics.numcreateSymlinkOps.inc();
+ metrics.incrCreateSymlinkOps();
/* We enforce the MAX_PATH_LENGTH limit even though a symlink target
* URI may refer to a non-HDFS file system.
*/
@@ -1234,9 +1228,9 @@ public void createSymlink(String target, String link, FsPermission dirPerms,
new PermissionStatus(ugi.getShortUserName(), null, dirPerms), createParent);
}
- /** @inheritDoc */
+ @Override
public String getLinkTarget(String path) throws IOException {
- myMetrics.numgetLinkTargetOps.inc();
+ metrics.incrGetLinkTargetOps();
/* Resolves the first symlink in the given path, returning a
* new path consisting of the target of the symlink and any
* remaining path components from the original path.
@@ -1645,8 +1639,11 @@ public static NameNode createNameNode(String argv[], Configuration conf)
return null; // avoid javac warning
case BACKUP:
case CHECKPOINT:
- return new BackupNode(conf, startOpt.toNodeRole());
+ NamenodeRole role = startOpt.toNodeRole();
+ DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
+ return new BackupNode(conf, role);
default:
+ DefaultMetricsSystem.initialize("NameNode");
return new NameNode(conf);
}
}
View
7 src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -49,7 +49,8 @@
import org.apache.hadoop.http.HttpServer;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
import org.apache.hadoop.security.SecurityUtil;
@@ -154,7 +155,9 @@ private void initialize(final Configuration conf) throws IOException {
infoBindAddress);
}
// initiate Java VM metrics
- JvmMetrics.init("SecondaryNameNode", conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY));
+ JvmMetrics.create("SecondaryNameNode",
+ conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
+ DefaultMetricsSystem.instance());
// Create connection to the namenode.
shouldRun = true;
View
4 src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
@@ -31,8 +31,8 @@
* be published as an interface.
*
* <p>
- * Name Node runtime activity statistic info is report in another MBean
- * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean
+ * Name Node runtime activity statistic info is reported in
+ * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics
*
*/
@InterfaceAudience.Private
View
132 src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
@@ -1,132 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode.metrics;
-
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.metrics.*;
-import org.apache.hadoop.metrics.util.MetricsBase;
-import org.apache.hadoop.metrics.util.MetricsIntValue;
-import org.apache.hadoop.metrics.util.MetricsLongValue;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-
-/**
- *
- * This class is for maintaining the various FSNamesystem status metrics
- * and publishing them through the metrics interfaces.
- * The SNamesystem creates and registers the JMX MBean.
- * <p>
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values;
- * for example:
- * <p> {@link #filesTotal}.set()
- *
- */
-@InterfaceAudience.Private
-public class FSNamesystemMetrics implements Updater {
- private static Log log = LogFactory.getLog(FSNamesystemMetrics.class);
- private final MetricsRecord metricsRecord;
- final MetricsRegistry registry = new MetricsRegistry();
-
- final MetricsIntValue filesTotal = new MetricsIntValue("FilesTotal", registry);
- final MetricsLongValue blocksTotal = new MetricsLongValue("BlocksTotal", registry);
- final MetricsIntValue capacityTotalGB = new MetricsIntValue("CapacityTotalGB", registry);
- final MetricsIntValue capacityUsedGB = new MetricsIntValue("CapacityUsedGB", registry);
- final MetricsIntValue capacityRemainingGB = new MetricsIntValue("CapacityRemainingGB", registry);
- final MetricsIntValue totalLoad = new MetricsIntValue("TotalLoad", registry);
- final MetricsIntValue pendingDeletionBlocks = new MetricsIntValue("PendingDeletionBlocks", registry);
- final MetricsIntValue corruptBlocks = new MetricsIntValue("CorruptBlocks", registry);
- final MetricsIntValue excessBlocks = new MetricsIntValue("ExcessBlocks", registry);
- final MetricsIntValue pendingReplicationBlocks = new MetricsIntValue("PendingReplicationBlocks", registry);
- final MetricsIntValue underReplicatedBlocks = new MetricsIntValue("UnderReplicatedBlocks", registry);
- final MetricsIntValue scheduledReplicationBlocks = new MetricsIntValue("ScheduledReplicationBlocks", registry);
- final MetricsIntValue missingBlocks = new MetricsIntValue("MissingBlocks", registry);
- final MetricsIntValue blockCapacity = new MetricsIntValue("BlockCapacity", registry);
- public final MetricsTimeVaryingInt numExpiredHeartbeats =
- new MetricsTimeVaryingInt("ExpiredHeartbeats", registry);
-
- private final FSNamesystem fsNameSystem;
-
- public FSNamesystemMetrics(FSNamesystem fsNameSystem, Configuration conf) {
- this.fsNameSystem = fsNameSystem;
- String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
-
- // Create a record for FSNamesystem metrics
- MetricsContext metricsContext = MetricsUtil.getContext("dfs");
- metricsRecord = MetricsUtil.createRecord(metricsContext, "FSNamesystem");
- metricsRecord.setTag("sessionId", sessionId);
- metricsContext.registerUpdater(this);
- log.info("Initializing FSNamesystemMetrics using context object:" +
- metricsContext.getClass().getName());
- }
-
- private int roundBytesToGBytes(long bytes) {
- return Math.round(((float)bytes/(1024 * 1024 * 1024)));
- }
-
- /**
- * Since this object is a registered updater, this method will be called
- * periodically, e.g. every 5 seconds.
- * We set the metrics value within this function before pushing it out.
- * FSNamesystem updates its own local variables which are
- * light weight compared to Metrics counters.
- *
- * Some of the metrics are explicity casted to int. Few metrics collectors
- * do not handle long values. It is safe to cast to int for now as all these
- * values fit in int value.
- * Metrics related to DFS capacity are stored in bytes which do not fit in
- * int, so they are rounded to GB
- */
- public void doUpdates(MetricsContext unused) {
- /**
- * ToFix
- * If the metrics counter were instead stored in the metrics objects themselves
- * we could avoid copying the values on each update.
- */
- synchronized (this) {
- filesTotal.set((int)fsNameSystem.getFilesTotal());
- blocksTotal.set((int)fsNameSystem.getBlocksTotal());
- capacityTotalGB.set(roundBytesToGBytes(fsNameSystem.getCapacityTotal()));
- capacityUsedGB.set(roundBytesToGBytes(fsNameSystem.getCapacityUsed()));
- capacityRemainingGB.set(roundBytesToGBytes(fsNameSystem.
- getCapacityRemaining()));
- totalLoad.set(fsNameSystem.getTotalLoad());
- corruptBlocks.set((int)fsNameSystem.getCorruptReplicaBlocks());
- excessBlocks.set((int)fsNameSystem.getExcessBlocks());
- pendingDeletionBlocks.set((int)fsNameSystem.getPendingDeletionBlocks());
- pendingReplicationBlocks.set((int)fsNameSystem.
- getPendingReplicationBlocks());
- underReplicatedBlocks.set((int)fsNameSystem.getUnderReplicatedBlocks());
- scheduledReplicationBlocks.set((int)fsNameSystem.
- getScheduledReplicationBlocks());
- missingBlocks.set((int)fsNameSystem.getMissingBlocksCount());
- blockCapacity.set(fsNameSystem.getBlockCapacity());
-
- for (MetricsBase m : registry.getMetricsList()) {
- m.pushMetric(metricsRecord);
- }
- }
- metricsRecord.update();
- }
-}
View
69 src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeActivityMBean.java
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode.metrics;
-
-import javax.management.ObjectName;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.metrics.util.MBeanUtil;
-import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-
-/**
- *
- * This is the JMX MBean for reporting the NameNode Activity.
- * The MBean is register using the name
- * "hadoop:service=NameNode,name=NameNodeActivity"
- *
- * Many of the activity metrics are sampled and averaged on an interval
- * which can be specified in the metrics config file.
- * <p>
- * For the metrics that are sampled and averaged, one must specify
- * a metrics context that does periodic update calls. Most metrics contexts do.
- * The default Null metrics context however does NOT. So if you aren't
- * using any other metrics context then you can turn on the viewing and averaging
- * of sampled metrics by specifying the following two lines
- * in the hadoop-meterics.properties file:
-* <pre>
- * dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
- * dfs.period=10
- * </pre>
- *<p>
- * Note that the metrics are collected regardless of the context used.
- * The context with the update thread is used to average the data periodically
- *
- *
- *
- * Impl details: We use a dynamic mbean that gets the list of the metrics
- * from the metrics registry passed as an argument to the constructor
- */
-
-@InterfaceAudience.Private
-public class NameNodeActivityMBean extends MetricsDynamicMBeanBase {
- final private ObjectName mbeanName;
-
- protected NameNodeActivityMBean(final MetricsRegistry mr) {
- super(mr, "Activity statistics at the NameNode");
- mbeanName = MBeanUtil.registerMBean("NameNode", "NameNodeActivity", this);
- }
-
- public void shutdown() {
- if (mbeanName != null)
- MBeanUtil.unregisterMBean(mbeanName);
- }
-}
View
250 src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
@@ -17,128 +17,146 @@
*/
package org.apache.hadoop.hdfs.server.namenode.metrics;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.metrics.*;
-import org.apache.hadoop.metrics.jvm.JvmMetrics;
-import org.apache.hadoop.metrics.util.MetricsBase;
-import org.apache.hadoop.metrics.util.MetricsIntValue;
-import org.apache.hadoop.metrics.util.MetricsRegistry;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
-import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
+import org.apache.hadoop.metrics2.MetricsSystem;
+import org.apache.hadoop.metrics2.annotation.Metric;
+import org.apache.hadoop.metrics2.annotation.Metrics;
+import static org.apache.hadoop.metrics2.impl.MsInfo.*;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
+import org.apache.hadoop.metrics2.lib.MetricsRegistry;
+import org.apache.hadoop.metrics2.lib.MutableCounterLong;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
/**
- *
* This class is for maintaining the various NameNode activity statistics
* and publishing them through the metrics interfaces.
- * This also registers the JMX MBean for RPC.
- * <p>
- * This class has a number of metrics variables that are publicly accessible;
- * these variables (objects) have methods to update their values;
- * for example:
- * <p> {@link #syncs}.inc()
- *
*/
-@InterfaceAudience.Private
-public class NameNodeMetrics implements Updater {
- private static Log log = LogFactory.getLog(NameNodeMetrics.class);
- private final MetricsRecord metricsRecord;
- public MetricsRegistry registry = new MetricsRegistry();
-
- private NameNodeActivityMBean namenodeActivityMBean;
-
- public MetricsTimeVaryingInt numCreateFileOps =
- new MetricsTimeVaryingInt("CreateFileOps", registry);
- public MetricsTimeVaryingInt numFilesCreated =
- new MetricsTimeVaryingInt("FilesCreated", registry);
- public MetricsTimeVaryingInt numFilesAppended =
- new MetricsTimeVaryingInt("FilesAppended", registry);
- public MetricsTimeVaryingInt numGetBlockLocations =
- new MetricsTimeVaryingInt("GetBlockLocations", registry);
- public MetricsTimeVaryingInt numFilesRenamed =
- new MetricsTimeVaryingInt("FilesRenamed", registry);
- public MetricsTimeVaryingInt numGetListingOps =
- new MetricsTimeVaryingInt("GetListingOps", registry);
- public MetricsTimeVaryingInt numDeleteFileOps =
- new MetricsTimeVaryingInt("DeleteFileOps", registry);
- public MetricsTimeVaryingInt numFilesDeleted = new MetricsTimeVaryingInt(
- "FilesDeleted", registry,
- "Number of files and directories deleted by delete or rename operation");
- public MetricsTimeVaryingInt numFileInfoOps =
- new MetricsTimeVaryingInt("FileInfoOps", registry);
- public MetricsTimeVaryingInt numAddBlockOps =
- new MetricsTimeVaryingInt("AddBlockOps", registry);
- public final MetricsTimeVaryingInt numGetAdditionalDatanodeOps
- = new MetricsTimeVaryingInt("GetAdditionalDatanodeOps", registry);
- public MetricsTimeVaryingInt numcreateSymlinkOps =
- new MetricsTimeVaryingInt("CreateSymlinkOps", registry);
- public MetricsTimeVaryingInt numgetLinkTargetOps =
- new MetricsTimeVaryingInt("GetLinkTargetOps", registry);
-
- public MetricsTimeVaryingRate transactions = new MetricsTimeVaryingRate(
- "Transactions", registry, "Journal Transaction");
- public MetricsTimeVaryingRate syncs =
- new MetricsTimeVaryingRate("Syncs", registry, "Journal Sync");
- public MetricsTimeVaryingInt transactionsBatchedInSync = new MetricsTimeVaryingInt(
- "JournalTransactionsBatchedInSync", registry,
- "Journal Transactions Batched In Sync");
- public MetricsTimeVaryingRate blockReport =
- new MetricsTimeVaryingRate("blockReport", registry, "Block Report");
- public MetricsIntValue safeModeTime =
- new MetricsIntValue("SafemodeTime", registry, "Duration in SafeMode at Startup");
- public MetricsIntValue fsImageLoadTime =
- new MetricsIntValue("fsImageLoadTime", registry, "Time loading FS Image at Startup");
- public MetricsIntValue numBlocksCorrupted =
- new MetricsIntValue("BlocksCorrupted", registry);
- public MetricsTimeVaryingInt numFilesInGetListingOps =
- new MetricsTimeVaryingInt("FilesInGetListingOps", registry);
-
-
- public NameNodeMetrics(Configuration conf, NamenodeRole nameNodeRole) {
- String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
- // Initiate Java VM metrics
- String processName = nameNodeRole.toString();
- JvmMetrics.init(processName, sessionId);
-
- // Now the Mbean for the name node - this also registers the MBean
- namenodeActivityMBean = new NameNodeActivityMBean(registry);
-
- // Create a record for NameNode metrics
- MetricsContext metricsContext = MetricsUtil.getContext("dfs");
- metricsRecord = MetricsUtil.createRecord(metricsContext, processName.toLowerCase());
- metricsRecord.setTag("sessionId", sessionId);
- metricsContext.registerUpdater(this);
- log.info("Initializing NameNodeMeterics using context object:" +
- metricsContext.getClass().getName());
- }
-
-
-
- public void shutdown() {
- if (namenodeActivityMBean != null)
- namenodeActivityMBean.shutdown();
- }
-
- /**
- * Since this object is a registered updater, this method will be called
- * periodically, e.g. every 5 seconds.
- */
- public void doUpdates(MetricsContext unused) {
- synchronized (this) {
- for (MetricsBase m : registry.getMetricsList()) {
- m.pushMetric(metricsRecord);
- }
- }
- metricsRecord.update();
- }
-
- public void resetAllMinMax() {
- transactions.resetMinMax();
- syncs.resetMinMax();
- blockReport.resetMinMax();
- }
+@Metrics(name="NameNodeActivity", about="NameNode metrics", context="dfs")
+public class NameNodeMetrics {
+ final MetricsRegistry registry = new MetricsRegistry("namenode");
+
+ @Metric MutableCounterLong createFileOps;
+ @Metric MutableCounterLong filesCreated;
+ @Metric MutableCounterLong filesAppended;
+ @Metric MutableCounterLong getBlockLocations;
+ @Metric MutableCounterLong filesRenamed;
+ @Metric MutableCounterLong getListingOps;
+ @Metric MutableCounterLong deleteFileOps;
+ @Metric("Number of files/dirs deleted by delete or rename operations")
+ MutableCounterLong filesDeleted;
+ @Metric MutableCounterLong fileInfoOps;
+ @Metric MutableCounterLong addBlockOps;
+ @Metric MutableCounterLong getAdditionalDatanodeOps;
+ @Metric MutableCounterLong createSymlinkOps;
+ @Metric MutableCounterLong getLinkTargetOps;
+ @Metric MutableCounterLong filesInGetListingOps;
+
+ @Metric("Journal transactions") MutableRate transactions;
+ @Metric("Journal syncs") MutableRate syncs;
+ @Metric("Journal transactions batched in sync")
+ MutableCounterLong transactionsBatchedInSync;
+ @Metric("Block report") MutableRate blockReport;
+
+ @Metric("Duration in SafeMode at startup") MutableGaugeInt safeModeTime;
+ @Metric("Time loading FS Image at startup") MutableGaugeInt fsImageLoadTime;
+
+ NameNodeMetrics(String processName, String sessionId) {
+ registry.tag(ProcessName, processName).tag(SessionId, sessionId);
+ }
+
+ public static NameNodeMetrics create(Configuration conf, NamenodeRole r) {
+ String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
+ String processName = r.toString();
+ MetricsSystem ms = DefaultMetricsSystem.instance();
+ JvmMetrics.create(processName, sessionId, ms);
+ return ms.register(new NameNodeMetrics(processName, sessionId));
+ }
+
+ public void shutdown() {
+ DefaultMetricsSystem.shutdown();
+ }
+
+ public void incrGetBlockLocations() {
+ getBlockLocations.incr();
+ }
+
+ public void incrFilesCreated() {
+ filesCreated.incr();
+ }
+
+ public void incrCreateFileOps() {
+ createFileOps.incr();
+ }
+
+ public void incrFilesAppended() {
+ filesAppended.incr();
+ }
+
+ public void incrAddBlockOps() {
+ addBlockOps.incr();
+ }
+
+ public void incrGetAdditionalDatanodeOps() {
+ getAdditionalDatanodeOps.incr();
+ }
+
+ public void incrFilesRenamed() {
+ filesRenamed.incr();
+ }
+
+ public void incrFilesDeleted(int delta) {
+ filesDeleted.incr(delta);
+ }
+
+ public void incrDeleteFileOps() {
+ deleteFileOps.incr();
+ }
+
+ public void incrGetListingOps() {
+ getListingOps.incr();
+ }
+
+ public void incrFilesInGetListingOps(int delta) {
+ filesInGetListingOps.incr(delta);
+ }
+
+ public void incrFileInfoOps() {
+ fileInfoOps.incr();
+ }
+
+ public void incrCreateSymlinkOps() {
+ createSymlinkOps.incr();
+ }
+
+ public void incrGetLinkTargetOps() {
+ getLinkTargetOps.incr();
+ }
+
+ public void addTransaction(long latency) {
+ transactions.add(latency);
+ }
+
+ public void incrTransactionsBatchedInSync() {
+ transactionsBatchedInSync.incr();
+ }
+
+ public void addSync(long elapsed) {
+ syncs.add(elapsed);
+ }
+
+ public void setFsImageLoadTime(long elapsed) {
+ fsImageLoadTime.set((int) elapsed);
+ }
+
+ public void addBlockReport(long latency) {
+ blockReport.add(latency);
+ }
+
+ public void setSafeModeTime(long elapsed) {
+ safeModeTime.set((int) elapsed);
+ }
}
View
6 src/java/org/apache/hadoop/hdfs/tools/JMXGet.java
@@ -126,7 +126,7 @@ public String getValue(String key) throws Exception {
continue;
}
}
- err("Info: key = " + key + "; val = " + val);
+ err("Info: key = " + key + "; val = "+ val.getClass() +":"+ val);
break;
}
@@ -193,7 +193,7 @@ public void init() throws Exception {
err("\nMBean count = " + mbsc.getMBeanCount());
// Query MBean names for specific domain "hadoop" and service
- ObjectName query = new ObjectName("hadoop:service=" + service + ",*");
+ ObjectName query = new ObjectName("Hadoop:service=" + service + ",*");
hadoopObjectNames = new ArrayList<ObjectName>(5);
err("\nQuery MBeanServer MBeans:");
Set<ObjectName> names = new TreeSet<ObjectName>(mbsc
@@ -201,7 +201,7 @@ public void init() throws Exception {
for (ObjectName name : names) {
hadoopObjectNames.add(name);
- err("hadoop services: " + name);
+ err("Hadoop service: " + name);
}
}
View
3  src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -61,6 +61,7 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.StaticMapping;
@@ -83,6 +84,8 @@
private static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);
+ static { DefaultMetricsSystem.setMiniClusterMode(true); }
+
/**
* Class to construct instances of MiniDFSClusters with specific options.
*/
View
6 src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
@@ -26,7 +26,7 @@
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics;
+import static org.apache.hadoop.test.MetricsAsserts.*;
/**
* This test ensures the all types of data node report work correctly.
@@ -77,9 +77,7 @@ public void testDatanodeReport() throws Exception {
NUM_OF_DATANODES);
Thread.sleep(5000);
- FSNamesystemMetrics fsMetrics =
- cluster.getNamesystem().getFSNamesystemMetrics();
- assertEquals(1,fsMetrics.numExpiredHeartbeats.getCurrentIntervalValue());
+ assertCounter("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
}finally {
cluster.shutdown();
}
View
4 src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
@@ -34,6 +34,7 @@
import org.apache.hadoop.hdfs.server.namenode.BackupNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.test.GenericTestUtils;
@@ -52,6 +53,9 @@
// reset default 0.0.0.0 addresses in order to avoid IPv6 problem
static final String THIS_HOST = getFullHostName() + ":0";
+ static {
+ DefaultMetricsSystem.setMiniClusterMode(true);
+ }
Configuration config;
File hdfsDir;
View
13 src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -43,7 +43,7 @@
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
-import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -909,18 +909,17 @@ void registerMBean(final String storageId) {
try {
bean = new StandardMBean(this,FSDatasetMBean.class);
- mbeanName = MBeanUtil.registerMBean("DataNode",
- "FSDatasetState-" + storageId, bean);
+ mbeanName = MBeans.register("DataNode", "FSDatasetState-"+
+ storageId, bean);
} catch (NotCompliantMBeanException e) {
- e.printStackTrace();
+ DataNode.LOG.warn("Error registering FSDatasetState MBean", e);
}
- DataNode.LOG.info("Registered FSDatasetStatusMBean");
+ DataNode.LOG.info("Registered FSDatasetState MBean");
}
public void shutdown() {
- if (mbeanName != null)
- MBeanUtil.unregisterMBean(mbeanName);
+ if (mbeanName != null) MBeans.unregister(mbeanName);
}
public String getStorageInfo() {
View
3  src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
@@ -43,7 +43,8 @@ public void testDataNodeMXBean() throws Exception {
DataNode datanode = datanodes.get(0);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
- ObjectName mxbeanName = new ObjectName("HadoopInfo:type=DataNodeInfo");
+ ObjectName mxbeanName = new ObjectName(
+ "Hadoop:service=DataNode,name=DataNodeInfo");
// get attribute "ClusterId"
String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
Assert.assertEquals(datanode.getClusterId(), clusterId);
View
8 src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -24,8 +24,10 @@
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import static org.apache.hadoop.test.MetricsAsserts.*;
+
import junit.framework.TestCase;
public class TestDataNodeMetrics extends TestCase {
@@ -42,8 +44,8 @@ public void testDataNodeMetrics() throws Exception {
List<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 1);
DataNode datanode = datanodes.get(0);
- DataNodeMetrics metrics = datanode.getMetrics();
- assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
+ MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
+ assertCounter("BytesWritten", LONG_FILE_LEN, rb);
} finally {
if (cluster != null) {cluster.shutdown();}
}
View
27 src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
@@ -19,18 +19,17 @@
import java.io.File;