diff --git a/LICENSE-binary b/LICENSE-binary
index b25c1f3addd89..d838ac1cb6ade 100644
--- a/LICENSE-binary
+++ b/LICENSE-binary
@@ -225,7 +225,7 @@ com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.12.7
com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.12.7
com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.12.7
com.fasterxml.uuid:java-uuid-generator:3.1.4
-com.fasterxml.woodstox:woodstox-core:5.3.0
+com.fasterxml.woodstox:woodstox-core:5.4.0
com.github.davidmoten:rxjava-extras:0.8.0.17
com.github.stephenc.jcip:jcip-annotations:1.0-1
com.google:guice:4.0
@@ -523,7 +523,7 @@ junit:junit:4.13.2
HSQL License
------------
-org.hsqldb:hsqldb:2.5.2
+org.hsqldb:hsqldb:2.7.1
JDOM License
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 0a5db2565b8c5..0b1c601267358 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -41,11 +41,21 @@
hadoop-hdfs-client
provided
+
+ org.apache.hadoop
+ hadoop-hdfs-native-client
+ provided
+
org.apache.hadoop
hadoop-mapreduce-client-app
provided
+
+ org.apache.hadoop
+ hadoop-mapreduce-client-nativetask
+ provided
+
org.apache.hadoop
hadoop-yarn-api
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index dfe48f7bde1f4..ded489308326f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3616,7 +3616,7 @@ private Block addStoredBlock(final BlockInfo block,
if (storedBlock == null || storedBlock.isDeleted()) {
// If this block does not belong to anyfile, then we are done.
blockLog.debug("BLOCK* addStoredBlock: {} on {} size {} but it does not belong to any file",
- block, node, block.getNumBytes());
+ reportedBlock, node, reportedBlock.getNumBytes());
// we could add this block to invalidate set of this datanode.
// it will happen in next block report otherwise.
return block;
@@ -3631,12 +3631,12 @@ private Block addStoredBlock(final BlockInfo block,
(node.isDecommissioned() || node.isDecommissionInProgress()) ? 0 : 1;
if (logEveryBlock) {
blockLog.info("BLOCK* addStoredBlock: {} is added to {} (size={})",
- node, storedBlock, storedBlock.getNumBytes());
+ node, reportedBlock, reportedBlock.getNumBytes());
}
} else if (result == AddBlockResult.REPLACED) {
curReplicaDelta = 0;
blockLog.warn("BLOCK* addStoredBlock: block {} moved to storageType " +
- "{} on node {}", storedBlock, storageInfo.getStorageType(), node);
+ "{} on node {}", reportedBlock, storageInfo.getStorageType(), node);
} else {
// if the same block is added again and the replica was corrupt
// previously because of a wrong gen stamp, remove it from the
@@ -3646,8 +3646,8 @@ private Block addStoredBlock(final BlockInfo block,
curReplicaDelta = 0;
if (blockLog.isDebugEnabled()) {
blockLog.debug("BLOCK* addStoredBlock: Redundant addStoredBlock request"
- + " received for {} on node {} size {}", storedBlock, node,
- storedBlock.getNumBytes());
+ + " received for {} on node {} size {}", reportedBlock, node,
+ reportedBlock.getNumBytes());
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java
index a7d72d019bde5..79d5a065b08a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
+import org.apache.hadoop.classification.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.HashMap;
@@ -70,10 +71,10 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
outOfServiceNodeBlocks = new HashMap<>();
/**
- * The numbe of blocks to process when moving blocks to pendingReplication
+ * The number of blocks to process when moving blocks to pendingReplication
* before releasing and reclaiming the namenode lock.
*/
- private int blocksPerLock;
+ private volatile int blocksPerLock;
/**
* The number of blocks that have been checked on this tick.
@@ -82,7 +83,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase
/**
* The maximum number of blocks to hold in PendingRep at any time.
*/
- private int pendingRepLimit;
+ private volatile int pendingRepLimit;
/**
* The list of blocks which have been placed onto the replication queue
@@ -801,6 +802,26 @@ private boolean isBlockReplicatedOk(DatanodeDescriptor datanode,
return false;
}
+ @VisibleForTesting
+ @Override
+ public int getPendingRepLimit() {
+ return pendingRepLimit;
+ }
+
+ public void setPendingRepLimit(int pendingRepLimit) {
+ this.pendingRepLimit = pendingRepLimit;
+ }
+
+ @VisibleForTesting
+ @Override
+ public int getBlocksPerLock() {
+ return blocksPerLock;
+ }
+
+ public void setBlocksPerLock(int blocksPerLock) {
+ this.blocksPerLock = blocksPerLock;
+ }
+
static class BlockStats {
private LightWeightHashSet openFiles =
new LightWeightLinkedSet<>();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java
index e642dfba35188..a75fc5d6e352f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java
@@ -27,6 +27,7 @@
import org.apache.hadoop.hdfs.util.LightWeightHashSet;
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
import org.apache.hadoop.util.ChunkedArrayList;
+import org.apache.hadoop.classification.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -137,6 +138,28 @@ public int getNumNodesChecked() {
return numNodesChecked;
}
+ @VisibleForTesting
+ @Override
+ public int getPendingRepLimit() {
+ return 0;
+ }
+
+ @Override
+ public void setPendingRepLimit(int pendingRepLimit) {
+ // nothing.
+ }
+
+ @VisibleForTesting
+ @Override
+ public int getBlocksPerLock() {
+ return 0;
+ }
+
+ @Override
+ public void setBlocksPerLock(int blocksPerLock) {
+ // nothing.
+ }
+
@Override
public void run() {
LOG.debug("DatanodeAdminMonitor is running.");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
index 887cb1072d95f..92966f7fe457b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java
@@ -419,4 +419,30 @@ void runMonitorForTest() throws ExecutionException, InterruptedException {
executor.submit(monitor).get();
}
+ public void refreshPendingRepLimit(int pendingRepLimit, String key) {
+ ensurePositiveInt(pendingRepLimit, key);
+ this.monitor.setPendingRepLimit(pendingRepLimit);
+ }
+
+ @VisibleForTesting
+ public int getPendingRepLimit() {
+ return this.monitor.getPendingRepLimit();
+ }
+
+ public void refreshBlocksPerLock(int blocksPerLock, String key) {
+ ensurePositiveInt(blocksPerLock, key);
+ this.monitor.setBlocksPerLock(blocksPerLock);
+ }
+
+ @VisibleForTesting
+ public int getBlocksPerLock() {
+ return this.monitor.getBlocksPerLock();
+ }
+
+ private void ensurePositiveInt(int val, String key) {
+ Preconditions.checkArgument(
+ (val > 0),
+ key + " = '" + val + "' is invalid. " +
+ "It should be a positive, non-zero integer value.");
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminMonitorInterface.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminMonitorInterface.java
index 89673a759eaed..a4774742108fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminMonitorInterface.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminMonitorInterface.java
@@ -37,4 +37,12 @@ public interface DatanodeAdminMonitorInterface extends Runnable {
void setBlockManager(BlockManager bm);
void setDatanodeAdminManager(DatanodeAdminManager dnm);
void setNameSystem(Namesystem ns);
+
+ int getPendingRepLimit();
+
+ void setPendingRepLimit(int pendingRepLimit);
+
+ int getBlocksPerLock();
+
+ void setBlocksPerLock(int blocksPerLock);
}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 3d3b65d8e217b..32ff45b2bfd9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -205,6 +205,10 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK_DEFAULT;
import static org.apache.hadoop.util.ExitUtil.terminate;
import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
@@ -353,7 +357,9 @@ public enum OperationCategory {
DFS_BLOCK_INVALIDATE_LIMIT_KEY,
DFS_DATANODE_PEER_STATS_ENABLED_KEY,
DFS_DATANODE_MAX_NODES_TO_REPORT_KEY,
- DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY));
+ DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
+ DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
+ DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK));
private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
@@ -2321,6 +2327,10 @@ protected String reconfigurePropertyImpl(String property, String newVal)
return reconfigureSlowNodesParameters(datanodeManager, property, newVal);
} else if (property.equals(DFS_BLOCK_INVALIDATE_LIMIT_KEY)) {
return reconfigureBlockInvalidateLimit(datanodeManager, property, newVal);
+ } else if (property.equals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT) ||
+ (property.equals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK))) {
+ return reconfigureDecommissionBackoffMonitorParameters(datanodeManager, property,
+ newVal);
} else {
throw new ReconfigurationException(property, newVal, getConf().get(
property));
@@ -2601,6 +2611,34 @@ private String reconfigureBlockInvalidateLimit(final DatanodeManager datanodeMan
}
}
+ private String reconfigureDecommissionBackoffMonitorParameters(
+ final DatanodeManager datanodeManager, final String property, final String newVal)
+ throws ReconfigurationException {
+ String newSetting = null;
+ try {
+ if (property.equals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT)) {
+ int pendingRepLimit = (newVal == null ?
+ DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT_DEFAULT :
+ Integer.parseInt(newVal));
+ datanodeManager.getDatanodeAdminManager().refreshPendingRepLimit(pendingRepLimit,
+ DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT);
+ newSetting = String.valueOf(datanodeManager.getDatanodeAdminManager().getPendingRepLimit());
+ } else if (property.equals(
+ DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK)) {
+ int blocksPerLock = (newVal == null ?
+ DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK_DEFAULT :
+ Integer.parseInt(newVal));
+ datanodeManager.getDatanodeAdminManager().refreshBlocksPerLock(blocksPerLock,
+ DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK);
+ newSetting = String.valueOf(datanodeManager.getDatanodeAdminManager().getBlocksPerLock());
+ }
+ LOG.info("RECONFIGURE* changed reconfigureDecommissionBackoffMonitorParameters {} to {}",
+ property, newSetting);
+ return newSetting;
+ } catch (IllegalArgumentException e) {
+ throw new ReconfigurationException(property, newVal, getConf().get(property), e);
+ }
+ }
@Override // ReconfigurableBase
protected Configuration getNewConf() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index d048429814656..5573b1fa107fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -22,6 +22,8 @@
import java.util.ArrayList;
import java.util.List;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeAdminBackoffMonitor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeAdminMonitorInterface;
import org.junit.Test;
import org.junit.Before;
import org.junit.After;
@@ -62,6 +64,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_BACKOFF_ENABLE_DEFAULT;
public class TestNameNodeReconfigure {
@@ -567,6 +571,87 @@ private List validatePeerReport(String jsonReport) {
return containReport;
}
+ @Test
+ public void testReconfigureDecommissionBackoffMonitorParameters()
+ throws ReconfigurationException, IOException {
+ Configuration conf = new HdfsConfiguration();
+ conf.setClass(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MONITOR_CLASS,
+ DatanodeAdminBackoffMonitor.class, DatanodeAdminMonitorInterface.class);
+ int defaultPendingRepLimit = 1000;
+ conf.setInt(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT, defaultPendingRepLimit);
+ int defaultBlocksPerLock = 1000;
+ conf.setInt(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK,
+ defaultBlocksPerLock);
+
+ try (MiniDFSCluster newCluster = new MiniDFSCluster.Builder(conf).build()) {
+ newCluster.waitActive();
+ final NameNode nameNode = newCluster.getNameNode();
+ final DatanodeManager datanodeManager = nameNode.namesystem
+ .getBlockManager().getDatanodeManager();
+
+ // verify defaultPendingRepLimit.
+ assertEquals(datanodeManager.getDatanodeAdminManager().getPendingRepLimit(),
+ defaultPendingRepLimit);
+
+ // try invalid pendingRepLimit.
+ try {
+ nameNode.reconfigureProperty(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
+ "non-numeric");
+ fail("Should not reach here");
+ } catch (ReconfigurationException e) {
+ assertEquals("Could not change property " +
+ "dfs.namenode.decommission.backoff.monitor.pending.limit from '" +
+ defaultPendingRepLimit + "' to 'non-numeric'", e.getMessage());
+ }
+
+ try {
+ nameNode.reconfigureProperty(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
+ "-1");
+ fail("Should not reach here");
+ } catch (ReconfigurationException e) {
+ assertEquals("Could not change property " +
+ "dfs.namenode.decommission.backoff.monitor.pending.limit from '" +
+ defaultPendingRepLimit + "' to '-1'", e.getMessage());
+ }
+
+ // try correct pendingRepLimit.
+ nameNode.reconfigureProperty(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT,
+ "20000");
+ assertEquals(datanodeManager.getDatanodeAdminManager().getPendingRepLimit(), 20000);
+
+ // verify defaultBlocksPerLock.
+ assertEquals(datanodeManager.getDatanodeAdminManager().getBlocksPerLock(),
+ defaultBlocksPerLock);
+
+ // try invalid blocksPerLock.
+ try {
+ nameNode.reconfigureProperty(
+ DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK,
+ "non-numeric");
+ fail("Should not reach here");
+ } catch (ReconfigurationException e) {
+ assertEquals("Could not change property " +
+ "dfs.namenode.decommission.backoff.monitor.pending.blocks.per.lock from '" +
+ defaultBlocksPerLock + "' to 'non-numeric'", e.getMessage());
+ }
+
+ try {
+ nameNode.reconfigureProperty(
+ DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK, "-1");
+ fail("Should not reach here");
+ } catch (ReconfigurationException e) {
+ assertEquals("Could not change property " +
+ "dfs.namenode.decommission.backoff.monitor.pending.blocks.per.lock from '" +
+ defaultBlocksPerLock + "' to '-1'", e.getMessage());
+ }
+
+ // try correct blocksPerLock.
+ nameNode.reconfigureProperty(
+ DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK, "10000");
+ assertEquals(datanodeManager.getDatanodeAdminManager().getBlocksPerLock(), 10000);
+ }
+ }
+
@After
public void shutDown() throws IOException {
if (cluster != null) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 99e4b348f6157..59491206dcbbe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -43,6 +43,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK;
import org.apache.commons.io.FileUtils;
import org.apache.commons.text.TextStringBuilder;
@@ -438,7 +440,7 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr
final List outs = Lists.newArrayList();
final List errs = Lists.newArrayList();
getReconfigurableProperties("namenode", address, outs, errs);
- assertEquals(20, outs.size());
+ assertEquals(22, outs.size());
assertTrue(outs.get(0).contains("Reconfigurable properties:"));
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1));
assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2));
@@ -449,8 +451,10 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr
assertEquals(DFS_IMAGE_PARALLEL_LOAD_KEY, outs.get(7));
assertEquals(DFS_NAMENODE_AVOID_SLOW_DATANODE_FOR_READ_KEY, outs.get(8));
assertEquals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_EXCLUDE_SLOW_NODES_ENABLED_KEY, outs.get(9));
- assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(10));
- assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(11));
+ assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK, outs.get(10));
+ assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT, outs.get(11));
+ assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(12));
+ assertEquals(DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY, outs.get(13));
assertEquals(errs.size(), 0);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
index 848d33d92453b..7e1b49c925fe9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
@@ -1660,14 +1660,18 @@
yarn.app.mapreduce.client-am.ipc.max-retries
3
The number of client retries to the AM - before reconnecting
- to the RM to fetch Application Status.
+ to the RM to fetch Application Status.
+ In other words, it is the ipc.client.connect.max.retries to be used during
+ reconnecting to the RM and fetching Application Status.
yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts
3
The number of client retries on socket timeouts to the AM - before
- reconnecting to the RM to fetch Application Status.
+ reconnecting to the RM to fetch Application Status.
+ In other words, it is the ipc.client.connect.max.retries.on.timeouts to be used during
+ reconnecting to the RM and fetching Application Status.
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
index 75f250e1d7271..17358a37da32d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
@@ -110,6 +110,7 @@
org.hsqldb
hsqldb
test
+ jdk8
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
index 11932e04e3784..16ce47579fe65 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
@@ -112,6 +112,7 @@
org.hsqldb
hsqldb
provided
+ jdk8
org.apache.hadoop.thirdparty
diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml
index 3ce66a10a84f7..21554090d7855 100644
--- a/hadoop-mapreduce-project/pom.xml
+++ b/hadoop-mapreduce-project/pom.xml
@@ -41,117 +41,51 @@
hadoop-mapreduce-examples
+
-
- com.google.protobuf
- protobuf-java
-
-
- org.apache.avro
- avro
-
-
- org.eclipse.jetty
- jetty-server
-
-
- org.apache.ant
- ant
-
-
- io.netty
- netty
-
-
- org.apache.velocity
- velocity
-
-
- org.slf4j
- slf4j-api
-
-
- paranamer-ant
- com.thoughtworks.paranamer
-
-
- org.xerial.snappy
- snappy-java
-
-
-
org.apache.hadoop
- hadoop-common
- provided
-
-
-
- org.slf4j
- slf4j-api
-
-
- org.slf4j
- slf4j-log4j12
+ hadoop-mapreduce-client-app
+ ${project.version}
org.apache.hadoop
- hadoop-annotations
-
-
- org.mockito
- mockito-core
- test
+ hadoop-mapreduce-client-common
+ ${project.version}
org.apache.hadoop
- hadoop-common
- test-jar
- test
+ hadoop-mapreduce-client-core
+ ${project.version}
org.apache.hadoop
- hadoop-hdfs
- test
-
-
- com.google.inject
- guice
+ hadoop-mapreduce-client-hs
+ ${project.version}
- com.sun.jersey
- jersey-server
-
-
- com.sun.jersey.contribs
- jersey-guice
-
-
- com.google.inject.extensions
- guice-servlet
-
-
- junit
- junit
-
-
- io.netty
- netty
+ org.apache.hadoop
+ hadoop-mapreduce-client-jobclient
+ ${project.version}
- commons-io
- commons-io
+ org.apache.hadoop
+ hadoop-mapreduce-client-nativetask
+ ${project.version}
- org.hsqldb
- hsqldb
- compile
+ org.apache.hadoop
+ hadoop-mapreduce-client-shuffle
+ ${project.version}
- ${leveldbjni.group}
- leveldbjni-all
+ org.apache.hadoop
+ hadoop-mapreduce-examples
+ ${project.version}
-
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index a246397d46d77..f60be7535043f 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -185,7 +185,7 @@
1.0-beta-1
900
1.12.316
- 2.5.2
+ 2.7.1
1.11.2
2.1
0.7
@@ -210,7 +210,7 @@
8.8.2
1.0.7.Final
1.0.2
- 5.3.0
+ 5.4.0
2.4.7
9.8.1
v12.22.1
@@ -358,6 +358,11 @@
hadoop-hdfs-client
${hadoop.version}
+
+ org.apache.hadoop
+ hadoop-hdfs-native-client
+ ${hadoop.version}
+
org.apache.hadoop
hadoop-hdfs-rbf
@@ -391,6 +396,11 @@
hadoop-mapreduce-client-common
${hadoop.version}
+
+ org.apache.hadoop
+ hadoop-mapreduce-client-nativetask
+ ${hadoop.version}
+
org.apache.hadoop
hadoop-yarn-api
@@ -1470,6 +1480,7 @@
org.hsqldb
hsqldb
${hsqldb.version}
+ jdk8
io.dropwizard.metrics
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
index e9c7dd4a6d3f2..0c10e01768522 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java
@@ -467,6 +467,10 @@ public String toString() {
return getFormattedString(String.valueOf(getMemorySize()));
}
+ public String toFormattedString() {
+ return getFormattedString();
+ }
+
private String getFormattedString(String memory) {
StringBuilder sb = new StringBuilder();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
index fdc36667bfea0..6837de8001464 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
@@ -144,6 +144,7 @@
org.hsqldb
hsqldb
test
+ jdk8
com.microsoft.sqlserver
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java
index 898e11f182015..e1ea302380a7b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ApplicationHomeSubCluster.java
@@ -17,6 +17,8 @@
package org.apache.hadoop.yarn.server.federation.store.records;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -123,32 +125,42 @@ public static ApplicationHomeSubCluster newInstance(ApplicationId appId, long cr
@Override
public boolean equals(Object obj) {
+
if (this == obj) {
return true;
}
+
if (obj == null) {
return false;
}
- if (getClass() != obj.getClass()) {
- return false;
- }
- ApplicationHomeSubCluster other = (ApplicationHomeSubCluster) obj;
- if (!this.getApplicationId().equals(other.getApplicationId())) {
- return false;
+
+ if (obj instanceof ApplicationHomeSubCluster) {
+ ApplicationHomeSubCluster other = (ApplicationHomeSubCluster) obj;
+ return new EqualsBuilder()
+ .append(this.getApplicationId(), other.getApplicationId())
+ .append(this.getHomeSubCluster(), other.getHomeSubCluster())
+ .isEquals();
}
- return this.getHomeSubCluster().equals(other.getHomeSubCluster());
+
+ return false;
}
@Override
public int hashCode() {
- return getApplicationId().hashCode() * 31 + getHomeSubCluster().hashCode();
+ return new HashCodeBuilder().
+ append(this.getApplicationId()).
+ append(this.getHomeSubCluster()).
+ append(this.getCreateTime()).toHashCode();
}
@Override
public String toString() {
- return "ApplicationHomeSubCluster [getApplicationId()="
- + getApplicationId() + ", getHomeSubCluster()=" + getHomeSubCluster()
- + "]";
+ StringBuilder sb = new StringBuilder();
+ sb.append("ApplicationHomeSubCluster: [")
+ .append("ApplicationId: ").append(getApplicationId()).append(", ")
+ .append("HomeSubCluster: ").append(getHomeSubCluster()).append(", ")
+ .append("CreateTime: ").append(getCreateTime()).append(", ")
+ .append("]");
+ return sb.toString();
}
-
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ReservationHomeSubCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ReservationHomeSubCluster.java
index e080d115716dd..c1a15536d2688 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ReservationHomeSubCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/ReservationHomeSubCluster.java
@@ -94,21 +94,24 @@ public static ReservationHomeSubCluster newInstance(ReservationId resId,
@Override
public boolean equals(Object obj) {
+
if (this == obj) {
return true;
}
+
if (obj == null) {
return false;
}
- if (getClass() != obj.getClass()) {
- return false;
+
+ if (obj instanceof ReservationHomeSubCluster) {
+ ReservationHomeSubCluster other = (ReservationHomeSubCluster) obj;
+ return new EqualsBuilder()
+ .append(this.getReservationId(), other.getReservationId())
+ .append(this.getHomeSubCluster(), other.getHomeSubCluster())
+ .isEquals();
}
- ReservationHomeSubCluster other = (ReservationHomeSubCluster) obj;
- return new EqualsBuilder()
- .append(this.getReservationId(), other.getReservationId())
- .append(this.getHomeSubCluster(), other.getHomeSubCluster())
- .isEquals();
+ return false;
}
@Override
@@ -121,9 +124,11 @@ public int hashCode() {
@Override
public String toString() {
- return "ReservationHomeSubCluster [getReservationId()="
- + getReservationId() + ", getApplicationHomeSubcluster()=" + getHomeSubCluster()
- + "]";
+ StringBuilder sb = new StringBuilder();
+ sb.append("ReservationHomeSubCluster: [")
+ .append("ReservationId: ").append(getReservationId()).append(", ")
+ .append("HomeSubCluster: ").append(getHomeSubCluster())
+ .append("]");
+ return sb.toString();
}
-
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/RouterMasterKey.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/RouterMasterKey.java
index 0090723e517e2..8cd80328c3fce 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/RouterMasterKey.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/RouterMasterKey.java
@@ -114,20 +114,36 @@ public int hashCode() {
}
@Override
- public boolean equals(Object right) {
- if (this == right) {
+ public boolean equals(Object obj) {
+
+ if (this == obj) {
return true;
}
- if (right == null || getClass() != right.getClass()) {
+ if (obj == null) {
return false;
}
- RouterMasterKey r = (RouterMasterKey) right;
- return new EqualsBuilder()
- .append(this.getKeyId().intValue(), r.getKeyId().intValue())
- .append(this.getExpiryDate().longValue(), this.getExpiryDate().longValue())
- .append(getKeyBytes().array(), r.getKeyBytes())
- .isEquals();
+ if (obj instanceof RouterMasterKey) {
+ RouterMasterKey other = (RouterMasterKey) obj;
+ return new EqualsBuilder()
+ .append(this.getKeyId().intValue(), other.getKeyId().intValue())
+ .append(this.getExpiryDate().longValue(), other.getExpiryDate().longValue())
+ .append(this.getKeyBytes().array(), other.getKeyBytes())
+ .isEquals();
+ }
+
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append("RouterMasterKey: [")
+ .append("KeyId: ").append(getKeyId()).append(", ")
+ .append("ExpiryDate: ").append(getExpiryDate()).append(", ")
+ .append("KeyBytes: ").append(getKeyBytes()).append(", ")
+ .append("]");
+ return sb.toString();
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterId.java
index 7eeb44bba55a7..db638c2fac53d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterId.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterId.java
@@ -17,6 +17,8 @@
package org.apache.hadoop.yarn.server.federation.store.records;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -78,19 +80,26 @@ public boolean equals(Object obj) {
if (this == obj) {
return true;
}
+
if (obj == null) {
return false;
}
- if (getClass() != obj.getClass()) {
- return false;
+
+ if (obj instanceof SubClusterId) {
+ SubClusterId other = (SubClusterId) obj;
+ return new EqualsBuilder()
+ .append(this.getId(), other.getId())
+ .isEquals();
}
- SubClusterId other = (SubClusterId) obj;
- return this.getId().equals(other.getId());
+
+ return false;
}
@Override
public int hashCode() {
- return getId().hashCode();
+ return new HashCodeBuilder().
+ append(this.getId()).
+ toHashCode();
}
@Override
@@ -104,5 +113,4 @@ public String toString() {
sb.append(getId());
return sb.toString();
}
-
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterIdInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterIdInfo.java
index e2260a1f457fd..ad03fb09a4e85 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterIdInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterIdInfo.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.yarn.server.federation.store.records;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -58,18 +60,28 @@ public SubClusterId toId() {
}
@Override
- public boolean equals(Object other) {
- if (other instanceof SubClusterIdInfo) {
- if (((SubClusterIdInfo) other).id.equals(this.id)) {
- return true;
- }
+ public boolean equals(Object obj) {
+
+ if (this == obj) {
+ return true;
+ }
+
+ if (obj == null) {
+ return false;
}
+
+ if (obj instanceof SubClusterIdInfo) {
+ SubClusterIdInfo other = (SubClusterIdInfo) obj;
+ return new EqualsBuilder()
+ .append(this.id, other.id)
+ .isEquals();
+ }
+
return false;
}
@Override
public int hashCode() {
- return id.hashCode();
+ return new HashCodeBuilder().append(this.id).toHashCode();
}
-
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterInfo.java
index cbf64e6126b28..40b87c7eb094e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterInfo.java
@@ -17,6 +17,8 @@
package org.apache.hadoop.yarn.server.federation.store.records;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -43,6 +45,7 @@ public abstract class SubClusterInfo {
@Private
@Unstable
+ @SuppressWarnings("checkstyle:ParameterNumber")
public static SubClusterInfo newInstance(SubClusterId subClusterId,
String amRMServiceAddress, String clientRMServiceAddress,
String rmAdminServiceAddress, String rmWebServiceAddress,
@@ -54,6 +57,7 @@ public static SubClusterInfo newInstance(SubClusterId subClusterId,
@Private
@Unstable
+ @SuppressWarnings("checkstyle:ParameterNumber")
public static SubClusterInfo newInstance(SubClusterId subClusterId,
String amRMServiceAddress, String clientRMServiceAddress,
String rmAdminServiceAddress, String rmWebServiceAddress,
@@ -252,48 +256,49 @@ public static SubClusterInfo newInstance(SubClusterId subClusterId,
@Override
public String toString() {
- return "SubClusterInfo [getSubClusterId() = " + getSubClusterId()
- + ", getAMRMServiceAddress() = " + getAMRMServiceAddress()
- + ", getClientRMServiceAddress() = " + getClientRMServiceAddress()
- + ", getRMAdminServiceAddress() = " + getRMAdminServiceAddress()
- + ", getRMWebServiceAddress() = " + getRMWebServiceAddress()
- + ", getState() = " + getState() + ", getLastStartTime() = "
- + getLastStartTime() + ", getCapability() = " + getCapability() + "]";
+ StringBuilder sb = new StringBuilder();
+ sb.append("SubClusterInfo: [")
+ .append("SubClusterId: ").append(getSubClusterId()).append(", ")
+ .append("AMRMServiceAddress: ").append(getAMRMServiceAddress()).append(", ")
+ .append("ClientRMServiceAddress: ").append(getClientRMServiceAddress()).append(", ")
+ .append("RMAdminServiceAddress: ").append(getRMAdminServiceAddress()).append(", ")
+ .append("RMWebServiceAddress: ").append(getRMWebServiceAddress()).append(", ")
+ .append("State: ").append(getState()).append(", ")
+ .append("LastStartTime: ").append(getLastStartTime()).append(", ")
+ .append("Capability: ").append(getCapability())
+ .append("]");
+ return sb.toString();
}
@Override
public boolean equals(Object obj) {
+
if (this == obj) {
return true;
}
+
if (obj == null) {
return false;
}
+
if (getClass() != obj.getClass()) {
return false;
}
- SubClusterInfo other = (SubClusterInfo) obj;
- if (!this.getSubClusterId().equals(other.getSubClusterId())) {
- return false;
- }
- if (!this.getAMRMServiceAddress().equals(other.getAMRMServiceAddress())) {
- return false;
- }
- if (!this.getClientRMServiceAddress()
- .equals(other.getClientRMServiceAddress())) {
- return false;
- }
- if (!this.getRMAdminServiceAddress()
- .equals(other.getRMAdminServiceAddress())) {
- return false;
- }
- if (!this.getRMWebServiceAddress().equals(other.getRMWebServiceAddress())) {
- return false;
- }
- if (!this.getState().equals(other.getState())) {
- return false;
+
+ if (obj instanceof SubClusterInfo) {
+ SubClusterInfo other = (SubClusterInfo) obj;
+ return new EqualsBuilder()
+ .append(this.getSubClusterId(), other.getSubClusterId())
+ .append(this.getAMRMServiceAddress(), other.getAMRMServiceAddress())
+ .append(this.getClientRMServiceAddress(), other.getClientRMServiceAddress())
+ .append(this.getRMAdminServiceAddress(), other.getRMAdminServiceAddress())
+ .append(this.getRMWebServiceAddress(), other.getRMWebServiceAddress())
+ .append(this.getState(), other.getState())
+ .append(this.getLastStartTime(), other.getLastStartTime())
+ .isEquals();
}
- return this.getLastStartTime() == other.getLastStartTime();
+
+ return false;
// Capability and HeartBeat fields are not included as they are temporal
// (i.e. timestamps), so they change during the lifetime of the same
// sub-cluster
@@ -301,23 +306,16 @@ public boolean equals(Object obj) {
@Override
public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result
- + ((getSubClusterId() == null) ? 0 : getSubClusterId().hashCode());
- result = prime * result + ((getAMRMServiceAddress() == null) ? 0
- : getAMRMServiceAddress().hashCode());
- result = prime * result + ((getClientRMServiceAddress() == null) ? 0
- : getClientRMServiceAddress().hashCode());
- result = prime * result + ((getRMAdminServiceAddress() == null) ? 0
- : getRMAdminServiceAddress().hashCode());
- result = prime * result + ((getRMWebServiceAddress() == null) ? 0
- : getRMWebServiceAddress().hashCode());
- result =
- prime * result + ((getState() == null) ? 0 : getState().hashCode());
- result = prime * result
- + (int) (getLastStartTime() ^ (getLastStartTime() >>> 32));
- return result;
+
+ return new HashCodeBuilder()
+ .append(this.getSubClusterId())
+ .append(this.getAMRMServiceAddress())
+ .append(this.getClientRMServiceAddress())
+ .append(this.getRMAdminServiceAddress())
+ .append(this.getRMWebServiceAddress())
+ .append(this.getState())
+ .append(this.getLastStartTime())
+ .toHashCode();
// Capability and HeartBeat fields are not included as they are temporal
// (i.e. timestamps), so they change during the lifetime of the same
// sub-cluster
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterPolicyConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterPolicyConfiguration.java
index 817d270146fd1..822e40c384ebb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterPolicyConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/SubClusterPolicyConfiguration.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.yarn.server.federation.store.records;
+import org.apache.commons.lang3.builder.EqualsBuilder;
+import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.classification.InterfaceAudience.Public;
import org.apache.hadoop.classification.InterfaceStability.Unstable;
@@ -127,36 +129,44 @@ public static SubClusterPolicyConfiguration newInstance(
@Override
public int hashCode() {
- return 31 * getParams().hashCode() + getType().hashCode();
+ return new HashCodeBuilder()
+ .append(this.getType())
+ .append(this.getQueue())
+ .append(this.getParams()).
+ toHashCode();
}
@Override
public boolean equals(Object obj) {
+
if (this == obj) {
return true;
}
+
if (obj == null) {
return false;
}
- if (getClass() != obj.getClass()) {
- return false;
- }
- SubClusterPolicyConfiguration other = (SubClusterPolicyConfiguration) obj;
- if (!this.getType().equals(other.getType())) {
- return false;
- }
- if (!this.getParams().equals(other.getParams())) {
- return false;
+
+ if (obj instanceof SubClusterPolicyConfiguration) {
+ SubClusterPolicyConfiguration other = (SubClusterPolicyConfiguration) obj;
+ return new EqualsBuilder()
+ .append(this.getType(), other.getType())
+ .append(this.getQueue(), other.getQueue())
+ .append(this.getParams(), other.getParams())
+ .isEquals();
}
- return true;
+
+ return false;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
- sb.append(getType())
- .append(" : ")
- .append(getParams());
+ sb.append("SubClusterPolicyConfiguration: [")
+ .append("Type: ").append(getType()).append(", ")
+ .append("Queue: ").append(getQueue()).append(", ")
+ .append("Params: ").append(getParams()).append(", ")
+ .append("]");
return sb.toString();
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/records/TestFederationProtocolRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/records/TestFederationProtocolRecords.java
index 6398a3dac81a7..bc20856e8c5d9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/records/TestFederationProtocolRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/records/TestFederationProtocolRecords.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.yarn.server.federation.store.records;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.api.BasePBImplRecordsTest;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ReservationId;
@@ -56,6 +57,7 @@
import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.RouterMasterKeyRequestProto;
import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.RouterMasterKeyResponseProto;
import org.apache.hadoop.yarn.federation.proto.YarnServerFederationProtos.ApplicationHomeSubClusterProto;
+import org.apache.hadoop.yarn.server.federation.policies.dao.WeightedPolicyInfo;
import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.AddApplicationHomeSubClusterRequestPBImpl;
import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.AddApplicationHomeSubClusterResponsePBImpl;
import org.apache.hadoop.yarn.server.federation.store.records.impl.pb.DeleteApplicationHomeSubClusterRequestPBImpl;
@@ -97,6 +99,11 @@
import org.junit.BeforeClass;
import org.junit.Test;
+import java.nio.ByteBuffer;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+
/**
* Test class for federation protocol records.
*/
@@ -326,4 +333,92 @@ public void testGetReservationHomeSubClusterRequest() throws Exception {
validatePBImplRecord(GetReservationHomeSubClusterRequestPBImpl.class,
GetReservationHomeSubClusterRequestProto.class);
}
-}
\ No newline at end of file
+
+ @Test
+ public void testValidateApplicationHomeSubClusterEqual() throws Exception {
+ long now = Time.now();
+
+ ApplicationId appId1 = ApplicationId.newInstance(now, 1);
+ SubClusterId subClusterId1 = SubClusterId.newInstance("SC-1");
+ ApplicationHomeSubCluster applicationHomeSubCluster1 =
+ ApplicationHomeSubCluster.newInstance(appId1, subClusterId1);
+
+ ApplicationId appId2 = ApplicationId.newInstance(now, 1);
+ SubClusterId subClusterId2 = SubClusterId.newInstance("SC-1");
+ ApplicationHomeSubCluster applicationHomeSubCluster2 =
+ ApplicationHomeSubCluster.newInstance(appId2, subClusterId2);
+
+ assertEquals(applicationHomeSubCluster1, applicationHomeSubCluster2);
+ }
+
+ @Test
+ public void testValidateReservationHomeSubClusterEqual() throws Exception {
+ long now = Time.now();
+
+ ReservationId reservationId1 = ReservationId.newInstance(now, 1);
+ SubClusterId subClusterId1 = SubClusterId.newInstance("SC-1");
+ ReservationHomeSubCluster reservationHomeSubCluster1 =
+ ReservationHomeSubCluster.newInstance(reservationId1, subClusterId1);
+
+ ReservationId reservationId2 = ReservationId.newInstance(now, 1);
+ SubClusterId subClusterId2 = SubClusterId.newInstance("SC-1");
+ ReservationHomeSubCluster reservationHomeSubCluster2 =
+ ReservationHomeSubCluster.newInstance(reservationId2, subClusterId2);
+
+ assertEquals(reservationHomeSubCluster1, reservationHomeSubCluster2);
+ }
+
+ @Test
+ public void testSubClusterIdEqual() throws Exception {
+ SubClusterId subClusterId1 = SubClusterId.newInstance("SC-1");
+ SubClusterId subClusterId2 = SubClusterId.newInstance("SC-1");
+ assertEquals(subClusterId1, subClusterId2);
+ }
+
+ @Test
+ public void testSubClusterIdInfoEqual() throws Exception {
+ SubClusterIdInfo subClusterIdInfo1 = new SubClusterIdInfo("SC-1");
+ SubClusterIdInfo subClusterIdInfo2 = new SubClusterIdInfo("SC-1");
+ assertEquals(subClusterIdInfo1, subClusterIdInfo2);
+ }
+
+ @Test
+ public void testSubClusterPolicyConfigurationEqual() throws Exception {
+
+ String queue1 = "queue1";
+ WeightedPolicyInfo policyInfo1 = mock(WeightedPolicyInfo.class);
+ ByteBuffer buf1 = policyInfo1.toByteBuffer();
+ SubClusterPolicyConfiguration configuration1 = SubClusterPolicyConfiguration
+ .newInstance(queue1, policyInfo1.getClass().getCanonicalName(), buf1);
+
+ String queue2 = "queue1";
+ WeightedPolicyInfo policyInfo2 = mock(WeightedPolicyInfo.class);
+ ByteBuffer buf2 = policyInfo1.toByteBuffer();
+ SubClusterPolicyConfiguration configuration2 = SubClusterPolicyConfiguration
+ .newInstance(queue2, policyInfo2.getClass().getCanonicalName(), buf2);
+
+ assertEquals(configuration1, configuration2);
+ }
+
+ @Test
+ public void testSubClusterInfoEqual() throws Exception {
+
+ String scAmRMAddress = "5.6.7.8:5";
+ String scClientRMAddress = "5.6.7.8:6";
+ String scRmAdminAddress = "5.6.7.8:7";
+ String scWebAppAddress = "127.0.0.1:8080";
+ String capabilityJson = "-";
+
+ SubClusterInfo sc1 =
+ SubClusterInfo.newInstance(SubClusterId.newInstance("SC-1"),
+ scAmRMAddress, scClientRMAddress, scRmAdminAddress, scWebAppAddress,
+ SubClusterState.SC_RUNNING, Time.now(), capabilityJson);
+
+ SubClusterInfo sc2 =
+ SubClusterInfo.newInstance(SubClusterId.newInstance("SC-1"),
+ scAmRMAddress, scClientRMAddress, scRmAdminAddress, scWebAppAddress,
+ SubClusterState.SC_RUNNING, Time.now(), capabilityJson);
+
+ assertEquals(sc1, sc2);
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index c89ac520f4bff..14f5ffeefe0d3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -208,6 +208,8 @@ public class DockerLinuxContainerRuntime extends OCIContainerRuntime {
private static final Pattern dockerImagePattern =
Pattern.compile(DOCKER_IMAGE_PATTERN);
+ private static final Pattern DOCKER_DIGEST_PATTERN = Pattern.compile("^sha256:[a-z0-9]{12,64}$");
+
private static final String DEFAULT_PROCFS = "/proc";
@InterfaceAudience.Private
@@ -1201,9 +1203,17 @@ public static void validateImageName(String imageName)
throw new ContainerExecutionException(
ENV_DOCKER_CONTAINER_IMAGE + " not set!");
}
- if (!dockerImagePattern.matcher(imageName).matches()) {
- throw new ContainerExecutionException("Image name '" + imageName
- + "' doesn't match docker image name pattern");
+ // check if digest is part of imageName, extract and validate it.
+ String digest = null;
+ if (imageName.contains("@sha256")) {
+ String[] digestParts = imageName.split("@");
+ digest = digestParts[1];
+ imageName = digestParts[0];
+ }
+ if (!dockerImagePattern.matcher(imageName).matches() || (digest != null
+ && !DOCKER_DIGEST_PATTERN.matcher(digest).matches())) {
+ throw new ContainerExecutionException(
+ "Image name '" + imageName + "' doesn't match docker image name pattern");
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
index f0ae037f9ffaf..ea7c213809330 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java
@@ -2033,19 +2033,27 @@ public static Configuration enableMockContainerExecutor(Configuration conf) {
@Test
public void testDockerImageNamePattern() throws Exception {
- String[] validNames =
- { "ubuntu", "fedora/httpd:version1.0",
- "fedora/httpd:version1.0.test",
- "fedora/httpd:version1.0.TEST",
- "myregistryhost:5000/ubuntu",
- "myregistryhost:5000/fedora/httpd:version1.0",
- "myregistryhost:5000/fedora/httpd:version1.0.test",
- "myregistryhost:5000/fedora/httpd:version1.0.TEST"};
-
- String[] invalidNames = { "Ubuntu", "ubuntu || fedora", "ubuntu#",
- "myregistryhost:50AB0/ubuntu", "myregistry#host:50AB0/ubuntu",
- ":8080/ubuntu"
- };
+ String[] validNames = {"ubuntu", "fedora/httpd:version1.0", "fedora/httpd:version1.0.test",
+ "fedora/httpd:version1.0.TEST", "myregistryhost:5000/ubuntu",
+ "myregistryhost:5000/fedora/httpd:version1.0",
+ "myregistryhost:5000/fedora/httpd:version1.0.test",
+ "myregistryhost:5000/fedora/httpd:version1.0.TEST",
+ "123456789123.dkr.ecr.us-east-1.amazonaws.com/emr-docker-examples:pyspark-example"
+ + "@sha256:f1d4ae3f7261a72e98c6ebefe9985cf10a0ea5bd762585a43e0700ed99863807"};
+
+ String[] invalidNames = {"Ubuntu", "ubuntu || fedora", "ubuntu#", "myregistryhost:50AB0/ubuntu",
+ "myregistry#host:50AB0/ubuntu", ":8080/ubuntu",
+
+ // Invalid: contains "@sha256" but doesn't really contain a digest.
+ "123456789123.dkr.ecr.us-east-1.amazonaws.com/emr-docker-examples:pyspark-example@sha256",
+
+ // Invalid: digest is too short.
+ "123456789123.dkr.ecr.us-east-1.amazonaws.com/emr-docker-examples:pyspark-example"
+ + "@sha256:f1d4",
+
+ // Invalid: digest is too long
+ "123456789123.dkr.ecr.us-east-1.amazonaws.com/emr-docker-examples:pyspark-example"
+ + "@sha256:f1d4ae3f7261a72e98c6ebefe9985cf10a0ea5bd762585a43e0700ed99863807f"};
for (String name : validNames) {
DockerLinuxContainerRuntime.validateImageName(name);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
index 5c7787ce02360..de75631486b92 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
@@ -126,6 +126,9 @@ public final class RMWSConsts {
/** Path for {@code RMWebServiceProtocol#getClusterNodeLabels}. */
public static final String GET_NODE_LABELS = "/get-node-labels";
+ /** Path for {@code RMWebServiceProtocol#getRMNodeLabels}. */
+ public static final String GET_RM_NODE_LABELS = "/get-rm-node-labels";
+
/** Path for {@code RMWebServiceProtocol#addToClusterNodeLabels}. */
public static final String ADD_NODE_LABELS = "/add-node-labels";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
index 6c0309969eebb..43b5307279b83 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
@@ -325,6 +325,8 @@ Response updateAppState(AppState targetState, HttpServletRequest hsr,
*/
NodeToLabelsInfo getNodeToLabels(HttpServletRequest hsr) throws IOException;
+ NodeLabelsInfo getRMNodeLabels(HttpServletRequest hsr) throws IOException;
+
/**
* This method retrieves all the node within multiple node labels in the
* cluster, and it is reachable by using {@link RMWSConsts#LABEL_MAPPINGS}.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 0803af5e76d1b..df247b06afa9c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -129,6 +129,7 @@
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.nodelabels.RMNodeLabel;
import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
import org.apache.hadoop.yarn.server.resourcemanager.AdminService;
@@ -138,6 +139,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NodeLabelsUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
@@ -1404,6 +1406,32 @@ public NodeLabelsInfo getClusterNodeLabels(@Context HttpServletRequest hsr)
return new NodeLabelsInfo(nodeLabelsInfo);
}
+ @GET
+ @Path(RMWSConsts.GET_RM_NODE_LABELS)
+ @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
+ MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+ public NodeLabelsInfo getRMNodeLabels(@Context HttpServletRequest hsr)
+ throws IOException {
+
+ initForReadableEndpoints();
+ RMNodeLabelsManager nlm = rm.getRMContext().getNodeLabelManager();
+
+ ArrayList nodeLabelsInfo = new ArrayList<>();
+ for (RMNodeLabel info : nlm.pullRMNodeLabelsInfo()) {
+ String labelName = info.getLabelName().isEmpty() ?
+ NodeLabel.DEFAULT_NODE_LABEL_PARTITION : info.getLabelName();
+ int activeNMs = info.getNumActiveNMs();
+ PartitionInfo partitionInfo =
+ new PartitionInfo(new ResourceInfo(info.getResource()));
+ NodeLabel nodeLabel = NodeLabel.newInstance(labelName, info.getIsExclusive());
+ NodeLabelInfo nodeLabelInfo = new NodeLabelInfo(nodeLabel, partitionInfo);
+ nodeLabelInfo.setActiveNMs(activeNMs);
+ nodeLabelsInfo.add(nodeLabelInfo);
+ }
+
+ return new NodeLabelsInfo(nodeLabelsInfo);
+ }
+
@POST
@Path(RMWSConsts.ADD_NODE_LABELS)
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeLabelInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeLabelInfo.java
index ce47471219411..1b451c797817f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeLabelInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeLabelInfo.java
@@ -31,6 +31,7 @@ public class NodeLabelInfo {
private String name;
private boolean exclusivity;
private PartitionInfo partitionInfo;
+ private Integer activeNMs;
public NodeLabelInfo() {
// JAXB needs this
@@ -68,6 +69,26 @@ public PartitionInfo getPartitionInfo() {
return partitionInfo;
}
+ public Integer getActiveNMs() {
+ return activeNMs;
+ }
+
+ public void setActiveNMs(Integer activeNMs) {
+ this.activeNMs = activeNMs;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public void setExclusivity(boolean exclusivity) {
+ this.exclusivity = exclusivity;
+ }
+
+ public void setPartitionInfo(PartitionInfo partitionInfo) {
+ this.partitionInfo = partitionInfo;
+ }
+
@Override
public boolean equals(Object obj) {
if (this == obj) {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
index 67369c8fa3ac7..707452fbadfd1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
@@ -72,6 +72,10 @@ public String toString() {
return getResource().toString();
}
+ public String toFormattedString() {
+ return getResource().toFormattedString();
+ }
+
public void setMemory(int memory) {
if (resources == null) {
resources = Resource.newInstance(memory, vCores);
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
index 36ba1732ea7d4..a74b50b64f466 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/RouterMetrics.java
@@ -121,6 +121,8 @@ public final class RouterMetrics {
private MutableGaugeInt numGetAppTimeoutFailedRetrieved;
@Metric("# of getAppTimeouts failed to be retrieved")
private MutableGaugeInt numGetAppTimeoutsFailedRetrieved;
+ @Metric("# of getRMNodeLabels failed to be retrieved")
+ private MutableGaugeInt numGetRMNodeLabelsFailedRetrieved;
@Metric("# of checkUserAccessToQueue failed to be retrieved")
private MutableGaugeInt numCheckUserAccessToQueueFailedRetrieved;
@@ -205,6 +207,8 @@ public final class RouterMetrics {
private MutableRate totalSucceededGetAppTimeoutRetrieved;
@Metric("Total number of successful Retrieved GetAppTimeouts and latency(ms)")
private MutableRate totalSucceededGetAppTimeoutsRetrieved;
+ @Metric("Total number of successful Retrieved GetRMNodeLabels and latency(ms)")
+ private MutableRate totalSucceededGetRMNodeLabelsRetrieved;
@Metric("Total number of successful Retrieved CheckUserAccessToQueue and latency(ms)")
private MutableRate totalSucceededCheckUserAccessToQueueRetrieved;
@@ -251,6 +255,7 @@ public final class RouterMetrics {
private MutableQuantiles getUpdateQueueLatency;
private MutableQuantiles getAppTimeoutLatency;
private MutableQuantiles getAppTimeoutsLatency;
+ private MutableQuantiles getRMNodeLabelsLatency;
private MutableQuantiles checkUserAccessToQueueLatency;
private static volatile RouterMetrics instance = null;
@@ -405,6 +410,9 @@ private RouterMetrics() {
getAppTimeoutsLatency = registry.newQuantiles("getAppTimeoutsLatency",
"latency of get apptimeouts timeouts", "ops", "latency", 10);
+ getRMNodeLabelsLatency = registry.newQuantiles("getRMNodeLabelsLatency",
+ "latency of get rmnodelabels timeouts", "ops", "latency", 10);
+
checkUserAccessToQueueLatency = registry.newQuantiles("checkUserAccessToQueueLatency",
"latency of get apptimeouts timeouts", "ops", "latency", 10);
}
@@ -628,6 +636,11 @@ public long getNumSucceededGetAppTimeoutsRetrieved() {
return totalSucceededGetAppTimeoutsRetrieved.lastStat().numSamples();
}
+ @VisibleForTesting
+ public long getNumSucceededGetRMNodeLabelsRetrieved() {
+ return totalSucceededGetRMNodeLabelsRetrieved.lastStat().numSamples();
+ }
+
@VisibleForTesting
public long getNumSucceededCheckUserAccessToQueueRetrievedRetrieved() {
return totalSucceededCheckUserAccessToQueueRetrieved.lastStat().numSamples();
@@ -833,6 +846,11 @@ public double getLatencySucceededGetAppTimeoutsRetrieved() {
return totalSucceededGetAppTimeoutsRetrieved.lastStat().mean();
}
+ @VisibleForTesting
+ public double getLatencySucceededGetRMNodeLabelsRetrieved() {
+ return totalSucceededGetRMNodeLabelsRetrieved.lastStat().mean();
+ }
+
@VisibleForTesting
public double getLatencySucceededCheckUserAccessToQueueRetrieved() {
return totalSucceededCheckUserAccessToQueueRetrieved.lastStat().mean();
@@ -1019,6 +1037,10 @@ public int getAppTimeoutsFailedRetrieved() {
return numGetAppTimeoutsFailedRetrieved.value();
}
+ public int getRMNodeLabelsFailedRetrieved() {
+ return numGetRMNodeLabelsFailedRetrieved.value();
+ }
+
public int getCheckUserAccessToQueueFailedRetrieved() {
return numCheckUserAccessToQueueFailedRetrieved.value();
}
@@ -1223,6 +1245,11 @@ public void succeededGetAppTimeoutsRetrieved(long duration) {
getAppTimeoutsLatency.add(duration);
}
+ public void succeededGetRMNodeLabelsRetrieved(long duration) {
+ totalSucceededGetRMNodeLabelsRetrieved.add(duration);
+ getRMNodeLabelsLatency.add(duration);
+ }
+
public void succeededCheckUserAccessToQueueRetrieved(long duration) {
totalSucceededCheckUserAccessToQueueRetrieved.add(duration);
checkUserAccessToQueueLatency.add(duration);
@@ -1388,6 +1415,10 @@ public void incrGetAppTimeoutsFailedRetrieved() {
numGetAppTimeoutsFailedRetrieved.incr();
}
+ public void incrGetRMNodeLabelsFailedRetrieved() {
+ numGetRMNodeLabelsFailedRetrieved.incr();
+ }
+
public void incrCheckUserAccessToQueueFailedRetrieved() {
numCheckUserAccessToQueueFailedRetrieved.incr();
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
index 0bdd87e4ebc9a..9046fb8cc9c65 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/DefaultRequestInterceptorREST.java
@@ -602,4 +602,12 @@ public Response signalToContainer(String containerId, String command,
+ containerId + "/" + RMWSConsts.SIGNAL + "/" + command, null,
null, getConf(), client);
}
+
+ @Override
+ public NodeLabelsInfo getRMNodeLabels(HttpServletRequest hsr) {
+ return RouterWebServiceUtil.genericForward(webAppAddress, hsr,
+ NodeLabelsInfo.class, HTTPMethods.GET,
+ RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.GET_RM_NODE_LABELS,
+ null, null, getConf(), client);
+ }
}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
index 0dcfa1dc08560..bf98d813e7fbe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/FederationInterceptorREST.java
@@ -1282,10 +1282,39 @@ public NodeToLabelsInfo getNodeToLabels(HttpServletRequest hsr)
routerMetrics.incrNodeToLabelsFailedRetrieved();
RouterServerUtil.logAndThrowIOException("getNodeToLabels error.", e);
}
- routerMetrics.incrGetAppStatisticsFailedRetrieved();
+ routerMetrics.incrNodeToLabelsFailedRetrieved();
throw new RuntimeException("getNodeToLabels Failed.");
}
+ @Override
+ public NodeLabelsInfo getRMNodeLabels(HttpServletRequest hsr) throws IOException {
+ try {
+ long startTime = clock.getTime();
+ Map subClustersActive = getActiveSubclusters();
+ final HttpServletRequest hsrCopy = clone(hsr);
+ Class[] argsClasses = new Class[]{HttpServletRequest.class};
+ Object[] args = new Object[]{hsrCopy};
+ ClientMethod remoteMethod = new ClientMethod("getRMNodeLabels", argsClasses, args);
+ Map nodeToLabelsInfoMap =
+ invokeConcurrent(subClustersActive.values(), remoteMethod, NodeLabelsInfo.class);
+ NodeLabelsInfo nodeToLabelsInfo =
+ RouterWebServiceUtil.mergeNodeLabelsInfo(nodeToLabelsInfoMap);
+ if (nodeToLabelsInfo != null) {
+ long stopTime = clock.getTime();
+ routerMetrics.succeededGetRMNodeLabelsRetrieved(stopTime - startTime);
+ return nodeToLabelsInfo;
+ }
+ } catch (NotFoundException e) {
+ routerMetrics.incrGetRMNodeLabelsFailedRetrieved();
+ RouterServerUtil.logAndThrowIOException("get all active sub cluster(s) error.", e);
+ } catch (YarnException e) {
+ routerMetrics.incrGetRMNodeLabelsFailedRetrieved();
+ RouterServerUtil.logAndThrowIOException("getRMNodeLabels error.", e);
+ }
+ routerMetrics.incrGetRMNodeLabelsFailedRetrieved();
+ throw new RuntimeException("getRMNodeLabels Failed.");
+ }
+
@Override
public LabelsToNodesInfo getLabelsToNodes(Set labels)
throws IOException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/NavBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/NavBlock.java
index b1d3b61ab4bee..2266370ee95f6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/NavBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/NavBlock.java
@@ -20,7 +20,6 @@
import com.google.inject.Inject;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.server.router.Router;
import org.apache.hadoop.yarn.server.webapp.WebPageUtils;
import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
@@ -49,35 +48,14 @@ public void render(Block html) {
List subClusterIds = getActiveSubClusterIds();
- Hamlet.UL>>> subAppsList1 =
- mainList.li().a(url("nodes"), "Nodes").ul().$style("padding:0.3em 1em 0.1em 2em");
-
// ### nodes info
- subAppsList1.li().__();
- for (String subClusterId : subClusterIds) {
- subAppsList1.li().a(url("nodes", subClusterId), subClusterId).__();
- }
- subAppsList1.__().__();
+ initNodesMenu(mainList, subClusterIds);
- // ### applications info
- Hamlet.UL>>> subAppsList2 =
- mainList.li().a(url("apps"), "Applications").ul();
+ // ### nodelabels info
+ initNodeLabelsMenu(mainList, subClusterIds);
- subAppsList2.li().__();
- for (String subClusterId : subClusterIds) {
- Hamlet.LI>>>> subAppsList3 = subAppsList2.
- li().a(url("apps", subClusterId), subClusterId);
- Hamlet.UL>>>>> subAppsList4 =
- subAppsList3.ul().$style("padding:0.3em 1em 0.1em 2em");
- subAppsList4.li().__();
- for (YarnApplicationState state : YarnApplicationState.values()) {
- subAppsList4.
- li().a(url("apps", subClusterId, state.toString()), state.toString()).__();
- }
- subAppsList4.li().__().__();
- subAppsList3.__();
- }
- subAppsList2.__().__();
+ // ### applications info
+ initApplicationsMenu(mainList, subClusterIds);
// ### tools
Hamlet.DIV sectionBefore = mainList.__();
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/NodeLabelsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/NodeLabelsBlock.java
new file mode 100644
index 0000000000000..62e2b5d8537e7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/NodeLabelsBlock.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.router.webapp;
+
+import com.google.inject.Inject;
+import com.sun.jersey.api.client.Client;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
+import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo;
+import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.PartitionInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
+import org.apache.hadoop.yarn.server.router.Router;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
+
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_SC;
+
+/**
+ * Navigation block for the Router Web UI.
+ */
+public class NodeLabelsBlock extends RouterBlock {
+
+ private Router router;
+
+ @Inject
+ public NodeLabelsBlock(Router router, ViewContext ctx) {
+ super(router, ctx);
+ this.router = router;
+ }
+
+ @Override
+ protected void render(Block html) {
+ boolean isEnabled = isYarnFederationEnabled();
+
+ // Get subClusterName
+ String subClusterName = $(NODE_SC);
+
+ NodeLabelsInfo nodeLabelsInfo = null;
+ if (StringUtils.isNotEmpty(subClusterName)) {
+ nodeLabelsInfo = getSubClusterNodeLabelsInfo(subClusterName);
+ } else {
+ nodeLabelsInfo = getYarnFederationNodeLabelsInfo(isEnabled);
+ }
+
+ initYarnFederationNodeLabelsOfCluster(nodeLabelsInfo, html);
+ }
+
+ /**
+ * Get NodeLabels Info based on SubCluster.
+ * @return NodeLabelsInfo.
+ */
+ private NodeLabelsInfo getSubClusterNodeLabelsInfo(String subCluster) {
+ try {
+ SubClusterId subClusterId = SubClusterId.newInstance(subCluster);
+ FederationStateStoreFacade facade = FederationStateStoreFacade.getInstance();
+ SubClusterInfo subClusterInfo = facade.getSubCluster(subClusterId);
+
+ if (subClusterInfo != null) {
+ // Prepare webAddress
+ String webAddress = subClusterInfo.getRMWebServiceAddress();
+ String herfWebAppAddress = "";
+ if (webAddress != null && !webAddress.isEmpty()) {
+ herfWebAppAddress =
+ WebAppUtils.getHttpSchemePrefix(this.router.getConfig()) + webAddress;
+ return getSubClusterNodeLabelsByWebAddress(herfWebAppAddress);
+ }
+ }
+ } catch (Exception e) {
+ LOG.error("get NodeLabelsInfo From SubCluster = {} error.", subCluster, e);
+ }
+ return null;
+ }
+
+ private NodeLabelsInfo getYarnFederationNodeLabelsInfo(boolean isEnabled) {
+ if (isEnabled) {
+ String webAddress = WebAppUtils.getRouterWebAppURLWithScheme(this.router.getConfig());
+ return getSubClusterNodeLabelsByWebAddress(webAddress);
+ }
+ return null;
+ }
+
+ private NodeLabelsInfo getSubClusterNodeLabelsByWebAddress(String webAddress) {
+ Configuration conf = this.router.getConfig();
+ Client client = RouterWebServiceUtil.createJerseyClient(conf);
+ NodeLabelsInfo nodes = RouterWebServiceUtil
+ .genericForward(webAddress, null, NodeLabelsInfo.class, HTTPMethods.GET,
+ RMWSConsts.RM_WEB_SERVICE_PATH + RMWSConsts.GET_RM_NODE_LABELS, null, null, conf,
+ client);
+ client.destroy();
+ return nodes;
+ }
+
+ private void initYarnFederationNodeLabelsOfCluster(NodeLabelsInfo nodeLabelsInfo, Block html) {
+
+ Hamlet.TBODY> tbody = html.table("#nodelabels").
+ thead().
+ tr().
+ th(".name", "Label Name").
+ th(".type", "Label Type").
+ th(".numOfActiveNMs", "Num Of Active NMs").
+ th(".totalResource", "Total Resource").
+ __().__().
+ tbody();
+
+ if (nodeLabelsInfo != null) {
+ for (NodeLabelInfo info : nodeLabelsInfo.getNodeLabelsInfo()) {
+ Hamlet.TR>> row =
+ tbody.tr().td(info.getName().isEmpty() ?
+ NodeLabel.DEFAULT_NODE_LABEL_PARTITION : info.getName());
+ String type = (info.getExclusivity()) ? "Exclusive Partition" : "Non Exclusive Partition";
+ row = row.td(type);
+ int nActiveNMs = info.getActiveNMs();
+ row = row.td(String.valueOf(nActiveNMs));
+ PartitionInfo partitionInfo = info.getPartitionInfo();
+ ResourceInfo available = partitionInfo.getResourceAvailable();
+ row.td(available.toFormattedString()).__();
+ }
+ }
+
+ tbody.__().__();
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/NodeLabelsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/NodeLabelsPage.java
new file mode 100644
index 0000000000000..9b3cea468172a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/NodeLabelsPage.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.router.webapp;
+
+import org.apache.hadoop.yarn.webapp.SubView;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
+
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_SC;
+import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
+
+/**
+ * Renders a block for the nodelabels with metrics information.
+ */
+public class NodeLabelsPage extends RouterView {
+
+ @Override
+ protected void preHead(Hamlet.HTML<__> html) {
+ commonPreHead(html);
+ String type = $(NODE_SC);
+ String title = "Node labels of the cluster";
+ if (type != null && !type.isEmpty()) {
+ title = title + " (" + type + ")";
+ }
+ setTitle(title);
+ set(DATATABLES_ID, "nodelabels");
+ setTableStyles(html, "nodelabels", ".healthStatus {width:10em}", ".healthReport {width:10em}");
+ }
+
+ @Override
+ protected Class extends SubView> content() {
+ return NodeLabelsBlock.class;
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterBlock.java
index 0a03b25d79dbe..31ab83daaaf10 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterBlock.java
@@ -18,7 +18,9 @@
package org.apache.hadoop.yarn.server.router.webapp;
import com.sun.jersey.api.client.Client;
+import org.apache.commons.collections.CollectionUtils;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId;
@@ -27,6 +29,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWSConsts;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
import org.apache.hadoop.yarn.server.router.Router;
+import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet;
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
@@ -174,4 +177,84 @@ protected Collection getSubClusterInfoList(String subclusterId)
public FederationStateStoreFacade getFacade() {
return facade;
}
+
+ /**
+ * Initialize the Nodes menu.
+ *
+ * @param mainList HTML Object.
+ * @param subClusterIds subCluster List.
+ */
+ protected void initNodesMenu(Hamlet.UL> mainList,
+ List subClusterIds) {
+ if (CollectionUtils.isNotEmpty(subClusterIds)) {
+ Hamlet.UL>>> nodesList =
+ mainList.li().a(url("nodes"), "Nodes").ul().
+ $style("padding:0.3em 1em 0.1em 2em");
+
+ // ### nodes info
+ nodesList.li().__();
+ for (String subClusterId : subClusterIds) {
+ nodesList.li().a(url("nodes", subClusterId), subClusterId).__();
+ }
+ nodesList.__().__();
+ } else {
+ mainList.li().a(url("nodes"), "Nodes").__();
+ }
+ }
+
+ /**
+ * Initialize the Applications menu.
+ *
+ * @param mainList HTML Object.
+ * @param subClusterIds subCluster List.
+ */
+ protected void initApplicationsMenu(Hamlet.UL> mainList,
+ List subClusterIds) {
+ if (CollectionUtils.isNotEmpty(subClusterIds)) {
+ Hamlet.UL>>> apps =
+ mainList.li().a(url("apps"), "Applications").ul();
+ apps.li().__();
+ for (String subClusterId : subClusterIds) {
+ Hamlet.LI>>>> subClusterList = apps.
+ li().a(url("apps", subClusterId), subClusterId);
+ Hamlet.UL>>>>> subAppStates =
+ subClusterList.ul().$style("padding:0.3em 1em 0.1em 2em");
+ subAppStates.li().__();
+ for (YarnApplicationState state : YarnApplicationState.values()) {
+ subAppStates.
+ li().a(url("apps", subClusterId, state.toString()), state.toString()).__();
+ }
+ subAppStates.li().__().__();
+ subClusterList.__();
+ }
+ apps.__().__();
+ } else {
+ mainList.li().a(url("apps"), "Applications").__();
+ }
+ }
+
+ /**
+ * Initialize the NodeLabels menu.
+ *
+ * @param mainList HTML Object.
+ * @param subClusterIds subCluster List.
+ */
+ protected void initNodeLabelsMenu(Hamlet.UL> mainList,
+ List subClusterIds) {
+
+ if (CollectionUtils.isNotEmpty(subClusterIds)) {
+ Hamlet.UL>>> nodesList =
+ mainList.li().a(url("nodelabels"), "Node Labels").ul().
+ $style("padding:0.3em 1em 0.1em 2em");
+
+ // ### nodelabels info
+ nodesList.li().__();
+ for (String subClusterId : subClusterIds) {
+ nodesList.li().a(url("nodelabels", subClusterId), subClusterId).__();
+ }
+ nodesList.__().__();
+ } else {
+ mainList.li().a(url("nodelabels"), "Node Labels").__();
+ }
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterController.java
index 38df0e7886c86..7d7165f7cadc8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterController.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterController.java
@@ -56,4 +56,9 @@ public void nodes() {
setTitle("Nodes");
render(NodesPage.class);
}
+
+ public void nodeLabels() {
+ setTitle("Node Labels");
+ render(NodeLabelsPage.class);
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebApp.java
index ec99415dc3f33..989a3d43b439d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebApp.java
@@ -52,5 +52,6 @@ public void setup() {
route(pajoin("/apps", APP_SC, APP_STATE), RouterController.class, "apps");
route(pajoin("/nodes", NODE_SC), RouterController.class, "nodes");
route("/federation", RouterController.class, "federation");
+ route(pajoin("/nodelabels", NODE_SC), RouterController.class, "nodeLabels");
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
index 7423c8c907bb7..4182a12f303b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServiceUtil.java
@@ -59,6 +59,8 @@
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.StatisticsItemInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.PartitionInfo;
import org.apache.hadoop.yarn.server.uam.UnmanagedApplicationManager;
import org.apache.hadoop.yarn.webapp.BadRequestException;
import org.apache.hadoop.yarn.webapp.ForbiddenException;
@@ -576,4 +578,40 @@ public static ApplicationStatisticsInfo mergeApplicationStatisticsInfo(
return result;
}
+
+ public static NodeLabelsInfo mergeNodeLabelsInfo(Map paramMap) {
+ Map resultMap = new HashMap<>();
+ paramMap.values().stream()
+ .flatMap(nodeLabelsInfo -> nodeLabelsInfo.getNodeLabelsInfo().stream())
+ .forEach(nodeLabelInfo -> {
+ String keyLabelName = nodeLabelInfo.getName();
+ if (resultMap.containsKey(keyLabelName)) {
+ NodeLabelInfo mapNodeLabelInfo = resultMap.get(keyLabelName);
+ mapNodeLabelInfo = mergeNodeLabelInfo(mapNodeLabelInfo, nodeLabelInfo);
+ resultMap.put(keyLabelName, mapNodeLabelInfo);
+ } else {
+ resultMap.put(keyLabelName, nodeLabelInfo);
+ }
+ });
+ NodeLabelsInfo nodeLabelsInfo = new NodeLabelsInfo();
+ nodeLabelsInfo.getNodeLabelsInfo().addAll(resultMap.values());
+ return nodeLabelsInfo;
+ }
+
+ private static NodeLabelInfo mergeNodeLabelInfo(NodeLabelInfo left, NodeLabelInfo right) {
+ NodeLabelInfo resultNodeLabelInfo = new NodeLabelInfo();
+ resultNodeLabelInfo.setName(left.getName());
+
+ int newActiveNMs = left.getActiveNMs() + right.getActiveNMs();
+ resultNodeLabelInfo.setActiveNMs(newActiveNMs);
+
+ boolean newExclusivity = left.getExclusivity() && right.getExclusivity();
+ resultNodeLabelInfo.setExclusivity(newExclusivity);
+
+ PartitionInfo leftPartition = left.getPartitionInfo();
+ PartitionInfo rightPartition = right.getPartitionInfo();
+ PartitionInfo newPartitionInfo = PartitionInfo.addTo(leftPartition, rightPartition);
+ resultNodeLabelInfo.setPartitionInfo(newPartitionInfo);
+ return resultNodeLabelInfo;
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
index b1dc8635b3fd1..02f545beda085 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/main/java/org/apache/hadoop/yarn/server/router/webapp/RouterWebServices.java
@@ -943,4 +943,15 @@ public Response signalToContainer(
return pipeline.getRootInterceptor()
.signalToContainer(containerId, command, req);
}
+
+ @GET
+ @Path(RMWSConsts.GET_RM_NODE_LABELS)
+ @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
+ MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+ public NodeLabelsInfo getRMNodeLabels(@Context HttpServletRequest hsr)
+ throws IOException {
+ init();
+ RequestInterceptorChainWrapper pipeline = getInterceptorChain(hsr);
+ return pipeline.getRootInterceptor().getRMNodeLabels(hsr);
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterMetrics.java
index c74780089ee20..828e5c69f35ef 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/TestRouterMetrics.java
@@ -514,6 +514,11 @@ public void getAppTimeoutsFailed() {
metrics.incrGetAppTimeoutsFailedRetrieved();
}
+ public void getRMNodeLabelsFailed() {
+ LOG.info("Mocked: failed getRMNodeLabelsFailed call");
+ metrics.incrGetRMNodeLabelsFailedRetrieved();
+ }
+
public void getCheckUserAccessToQueueRetrieved() {
LOG.info("Mocked: failed checkUserAccessToQueueRetrieved call");
metrics.incrCheckUserAccessToQueueFailedRetrieved();
@@ -729,6 +734,11 @@ public void getAppTimeoutsRetrieved(long duration) {
metrics.succeededGetAppTimeoutsRetrieved(duration);
}
+ public void getRMNodeLabelsRetrieved(long duration) {
+ LOG.info("Mocked: successful getRMNodeLabels call with duration {}", duration);
+ metrics.succeededGetRMNodeLabelsRetrieved(duration);
+ }
+
public void getCheckUserAccessToQueueRetrieved(long duration) {
LOG.info("Mocked: successful CheckUserAccessToQueue call with duration {}", duration);
metrics.succeededCheckUserAccessToQueueRetrieved(duration);
@@ -1476,6 +1486,29 @@ public void testGetAppTimeoutsRetrievedFailed() {
metrics.getAppTimeoutsFailedRetrieved());
}
+ @Test
+ public void testGetRMNodeLabelsRetrieved() {
+ long totalGoodBefore = metrics.getNumSucceededGetRMNodeLabelsRetrieved();
+ goodSubCluster.getRMNodeLabelsRetrieved(150);
+ Assert.assertEquals(totalGoodBefore + 1,
+ metrics.getNumSucceededGetRMNodeLabelsRetrieved());
+ Assert.assertEquals(150,
+ metrics.getLatencySucceededGetRMNodeLabelsRetrieved(), ASSERT_DOUBLE_DELTA);
+ goodSubCluster.getRMNodeLabelsRetrieved(300);
+ Assert.assertEquals(totalGoodBefore + 2,
+ metrics.getNumSucceededGetRMNodeLabelsRetrieved());
+ Assert.assertEquals(225,
+ metrics.getLatencySucceededGetRMNodeLabelsRetrieved(), ASSERT_DOUBLE_DELTA);
+ }
+
+ @Test
+ public void testGetRMNodeLabelsRetrievedFailed() {
+ long totalBadBefore = metrics.getRMNodeLabelsFailedRetrieved();
+ badSubCluster.getRMNodeLabelsFailed();
+ Assert.assertEquals(totalBadBefore + 1,
+ metrics.getRMNodeLabelsFailedRetrieved());
+ }
+
@Test
public void testCheckUserAccessToQueueRetrievedRetrieved() {
long totalGoodBefore = metrics.getNumSucceededCheckUserAccessToQueueRetrievedRetrieved();
@@ -1498,4 +1531,4 @@ public void testCheckUserAccessToQueueRetrievedFailed() {
Assert.assertEquals(totalBadBefore + 1,
metrics.getCheckUserAccessToQueueFailedRetrieved());
}
-}
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java
index 70bae1cac0f44..488ca095d78e9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockDefaultRequestInterceptorREST.java
@@ -134,6 +134,7 @@
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationUpdateResponseInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDeleteResponseInfo;
import org.apache.hadoop.yarn.server.router.RouterServerUtil;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.PartitionInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.RMQueueAclInfo;
import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices;
import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
@@ -1060,6 +1061,42 @@ public MockRM getMockRM() {
public void setMockRM(MockRM mockRM) {
this.mockRM = mockRM;
}
+
+ @Override
+ public NodeLabelsInfo getRMNodeLabels(HttpServletRequest hsr) {
+
+ NodeLabelInfo nodeLabelInfo = new NodeLabelInfo();
+ nodeLabelInfo.setExclusivity(true);
+ nodeLabelInfo.setName("Test-Label");
+ nodeLabelInfo.setActiveNMs(10);
+ PartitionInfo partitionInfo = new PartitionInfo();
+
+ NodeLabelsInfo nodeLabelsInfo = new NodeLabelsInfo();
+ nodeLabelsInfo.getNodeLabelsInfo().add(nodeLabelInfo);
+
+ return nodeLabelsInfo;
+ }
+
+ private MockRM setupResourceManager() throws Exception {
+ DefaultMetricsSystem.setMiniClusterMode(true);
+
+ CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
+
+ // Define default queue
+ conf.setCapacity(QUEUE_DEFAULT_FULL, 20);
+ // Define dedicated queues
+ conf.setQueues(CapacitySchedulerConfiguration.ROOT,
+ new String[] {QUEUE_DEFAULT, QUEUE_DEDICATED});
+ conf.setCapacity(QUEUE_DEDICATED_FULL, 80);
+ conf.setReservable(QUEUE_DEDICATED_FULL, true);
+
+ conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
+ conf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
+ MockRM rm = new MockRM(conf);
+ rm.start();
+ rm.registerNode("127.0.0.1:5678", 100*1024, 100);
+ return rm;
+ }
@Override
public RMQueueAclInfo checkUserAccessToQueue(String queue, String username,
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java
index 5951676a6d8e9..a09199b9e856b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/MockRESTRequestInterceptor.java
@@ -185,6 +185,11 @@ public NodeToLabelsInfo getNodeToLabels(HttpServletRequest hsr)
return new NodeToLabelsInfo();
}
+ @Override
+ public NodeLabelsInfo getRMNodeLabels(HttpServletRequest hsr) throws IOException {
+ return new NodeLabelsInfo();
+ }
+
@Override
public LabelsToNodesInfo getLabelsToNodes(Set labels)
throws IOException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java
index 84a6de3205f5e..1bffd40db3c19 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/PassThroughRESTRequestInterceptor.java
@@ -217,6 +217,11 @@ public NodeToLabelsInfo getNodeToLabels(HttpServletRequest hsr)
return getNextInterceptor().getNodeToLabels(hsr);
}
+ @Override
+ public NodeLabelsInfo getRMNodeLabels(HttpServletRequest hsr) throws IOException {
+ return getNextInterceptor().getRMNodeLabels(hsr);
+ }
+
@Override
public LabelsToNodesInfo getLabelsToNodes(Set labels)
throws IOException {
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationWebApp.java
index dad73b206b5a5..4ec482c615a58 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationWebApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-router/src/test/java/org/apache/hadoop/yarn/server/router/webapp/TestFederationWebApp.java
@@ -99,4 +99,22 @@ public void testFederationAppViewNotEnable()
config.setBoolean(YarnConfiguration.FEDERATION_ENABLED, false);
WebAppTests.testPage(AppsPage.class, Router.class, new MockRouter(config));
}
+
+ @Test
+ public void testNodeLabelAppViewNotEnable()
+ throws InterruptedException, YarnException, IOException {
+ // Test Federation Not Enabled
+ Configuration config = new YarnConfiguration();
+ config.setBoolean(YarnConfiguration.FEDERATION_ENABLED, false);
+ WebAppTests.testPage(NodeLabelsPage.class, Router.class, new MockRouter(config));
+ }
+
+ @Test
+ public void testNodeLabelAppViewEnable()
+ throws InterruptedException, YarnException, IOException {
+ // Test Federation Not Enabled
+ Configuration config = new YarnConfiguration();
+ config.setBoolean(YarnConfiguration.FEDERATION_ENABLED, true);
+ WebAppTests.testPage(NodeLabelsPage.class, Router.class, new MockRouter(config));
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersion.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersion.java
deleted file mode 100644
index 57439de078fbf..0000000000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersion.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timeline;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-@Retention(value = RetentionPolicy.RUNTIME)
-@Target(value = {ElementType.METHOD})
-public @interface TimelineVersion {
- float value() default TimelineVersionWatcher.DEFAULT_TIMELINE_VERSION;
-}
-
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java
deleted file mode 100644
index e06281ce33d40..0000000000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/timeline/TimelineVersionWatcher.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.timeline;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-import org.junit.jupiter.api.extension.TestWatcher;
-
-@InterfaceAudience.Private
-@InterfaceStability.Unstable
-public class TimelineVersionWatcher implements TestWatcher {
- static final float DEFAULT_TIMELINE_VERSION = 1.0f;
- private TimelineVersion version;
-
-}
diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml
index abaf2e869c48f..298fa597e9940 100644
--- a/hadoop-yarn-project/pom.xml
+++ b/hadoop-yarn-project/pom.xml
@@ -81,6 +81,11 @@
org.apache.hadoop
hadoop-yarn-services-core
+
+ org.apache.hadoop
+ hadoop-yarn-applications-catalog-webapp
+ war
+