acFlushSASToken = ArgumentCaptor.forClass(String.class);
verify(client, times(1)).flush(acFlushPath.capture(), acFlushPosition.capture(), acFlushRetainUnCommittedData.capture(), acFlushClose.capture(),
- acFlushSASToken.capture());
+ acFlushSASToken.capture(), isNull());
assertThat(Arrays.asList(PATH)).describedAs("path").isEqualTo(acFlushPath.getAllValues());
assertThat(Arrays.asList(Long.valueOf(2*BUFFER_SIZE))).describedAs("position").isEqualTo(acFlushPosition.getAllValues());
assertThat(Arrays.asList(false)).describedAs("RetainUnCommittedData flag").isEqualTo(acFlushRetainUnCommittedData.getAllValues());
@@ -388,7 +389,7 @@ public void verifyWriteRequestOfBufferSizeAndFlush() throws Exception {
AbfsPerfTracker tracker = new AbfsPerfTracker("test", accountName1, abfsConf);
when(client.getAbfsPerfTracker()).thenReturn(tracker);
when(client.append(anyString(), any(byte[].class), any(AppendRequestParameters.class), any())).thenReturn(op);
- when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any())).thenReturn(op);
+ when(client.flush(anyString(), anyLong(), anyBoolean(), anyBoolean(), any(), isNull())).thenReturn(op);
AbfsOutputStream out = new AbfsOutputStream(client, null, PATH, 0,
populateAbfsOutputStreamContext(BUFFER_SIZE, true, false, false));
@@ -403,9 +404,9 @@ public void verifyWriteRequestOfBufferSizeAndFlush() throws Exception {
Thread.sleep(1000);
AppendRequestParameters firstReqParameters = new AppendRequestParameters(
- 0, 0, BUFFER_SIZE, APPEND_MODE, false);
+ 0, 0, BUFFER_SIZE, APPEND_MODE, false, null);
AppendRequestParameters secondReqParameters = new AppendRequestParameters(
- BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false);
+ BUFFER_SIZE, 0, BUFFER_SIZE, APPEND_MODE, false, null);
verify(client, times(1)).append(
eq(PATH), any(byte[].class), refEq(firstReqParameters), any());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java
index 81d98b5604885..5a00fa93f0c6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java
@@ -395,7 +395,7 @@ public abstract void setStartRange(long begin, long end)
public abstract void setScope(ApplicationsRequestScope scope);
/**
- * Set the name to filter applications.
+ * Get the name to filter applications.
*
* @return the name
*/
@@ -404,7 +404,7 @@ public abstract void setStartRange(long begin, long end)
public abstract String getName();
/**
- * Get the name to filter applications.
+ * Set the name to filter applications.
*
* @param name of the application
*/
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
index f7c75a6079ffb..3654965c05df1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java
@@ -902,4 +902,19 @@ private static void validateResourceTypes(
}
}
}
+
+ public static StringBuilder
+ getCustomResourcesStrings(Resource resource) {
+ StringBuilder res = new StringBuilder();
+ if (ResourceUtils.getNumberOfKnownResourceTypes() > 2) {
+ ResourceInformation[] resources =
+ resource.getResources();
+ for (int i = 2; i < resources.length; i++) {
+ ResourceInformation resInfo = resources[i];
+ res.append(","
+ + resInfo.getName() + "=" + resInfo.getValue());
+ }
+ }
+ return res;
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties
index cf499b8b46c0a..678e3a74c897a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties
@@ -16,7 +16,7 @@ hadoop.root.logger=DEBUG,CLA
yarn.app.mapreduce.shuffle.logger=${hadoop.root.logger}
# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
+log4j.rootLogger=${hadoop.root.logger}
# Logging Threshold
log4j.threshold=ALL
@@ -69,8 +69,3 @@ log4j.additivity.org.apache.hadoop.mapreduce.task.reduce=false
#
log4j.logger.org.apache.hadoop.mapred.Merger=${yarn.app.mapreduce.shuffle.logger}
log4j.additivity.org.apache.hadoop.mapred.Merger=false
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
index 06575be4c7a3e..250fcc716d645 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AbstractCSQueue.java
@@ -18,25 +18,15 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-
-import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
+import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueACL;
@@ -58,13 +48,13 @@
import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.AbsoluteResourceType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueResourceQuotas;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.activities.ActivitiesManager;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.AbsoluteResourceType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ContainerAllocationProposal;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCommitRequest;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
@@ -74,8 +64,17 @@
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
import org.apache.hadoop.yarn.util.resource.ResourceUtils;
import org.apache.hadoop.yarn.util.resource.Resources;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.UNDEFINED;
@@ -361,6 +360,10 @@ protected void setupQueueConfigs(Resource clusterResource,
writeLock.lock();
try {
+ if (isDynamicQueue() && getParent() instanceof ParentQueue) {
+ ((ParentQueue) getParent()).getAutoCreatedQueueTemplate()
+ .setTemplateEntriesForChild(configuration, getQueuePath());
+ }
// get labels
this.accessibleLabels =
configuration.getAccessibleNodeLabels(getQueuePath());
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java
new file mode 100644
index 0000000000000..6c516c04770b5
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AutoCreatedQueueTemplate.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.AUTO_QUEUE_CREATION_V2_PREFIX;
+
+/**
+ * A handler for storing and setting auto created queue template settings.
+ */
+public class AutoCreatedQueueTemplate {
+ public static final String AUTO_QUEUE_TEMPLATE_PREFIX =
+ AUTO_QUEUE_CREATION_V2_PREFIX + "template.";
+ private static final String WILDCARD_QUEUE = "*";
+ private static final int MAX_WILDCARD_LEVEL = 1;
+
+ private final Map templateProperties = new HashMap<>();
+
+ public AutoCreatedQueueTemplate(Configuration configuration,
+ String queuePath) {
+ setTemplateConfigEntries(configuration, queuePath);
+ }
+
+ @VisibleForTesting
+ public static String getAutoQueueTemplatePrefix(String queue) {
+ return CapacitySchedulerConfiguration.getQueuePrefix(queue)
+ + AUTO_QUEUE_TEMPLATE_PREFIX;
+ }
+
+ /**
+ * Get the template properties attached to a parent queue.
+ * @return template property names and values
+ */
+ public Map getTemplateProperties() {
+ return templateProperties;
+ }
+
+ /**
+ * Sets the configuration properties of a child queue based on its parent
+ * template settings.
+ * @param conf configuration to set
+ * @param childQueuePath child queue path used for prefixing the properties
+ */
+ public void setTemplateEntriesForChild(Configuration conf,
+ String childQueuePath) {
+ // Get all properties that are explicitly set
+ Set alreadySetProps = conf.getPropsWithPrefix(
+ CapacitySchedulerConfiguration.getQueuePrefix(childQueuePath)).keySet();
+
+ for (Map.Entry entry : templateProperties.entrySet()) {
+ // Do not overwrite explicitly configured properties
+ if (alreadySetProps.contains(entry.getKey())) {
+ continue;
+ }
+ conf.set(CapacitySchedulerConfiguration.getQueuePrefix(
+ childQueuePath) + entry.getKey(), entry.getValue());
+ }
+ }
+
+ /**
+ * Store the template configuration properties. Explicit templates always take
+ * precedence over wildcard values. An example template precedence
+ * hierarchy for root.a ParentQueue from highest to lowest:
+ * yarn.scheduler.capacity.root.a.auto-queue-creation-v2.template.capacity
+ * yarn.scheduler.capacity.root.*.auto-queue-creation-v2.template.capacity
+ */
+ private void setTemplateConfigEntries(Configuration configuration,
+ String queuePath) {
+ List queuePathParts = new ArrayList<>(Arrays.asList(
+ queuePath.split("\\.")));
+
+ if (queuePathParts.size() <= 1) {
+ // This is either root or an empty queue name
+ return;
+ }
+ int queuePathMaxIndex = queuePathParts.size() - 1;
+
+ // start with the most explicit format (without wildcard)
+ int wildcardLevel = 0;
+ // root can not be wildcarded
+ // MAX_WILDCARD_LEVEL will be configurable in the future
+ int supportedWildcardLevel = Math.min(queuePathMaxIndex - 1,
+ MAX_WILDCARD_LEVEL);
+
+
+ // Collect all template entries
+ while (wildcardLevel <= supportedWildcardLevel) {
+ // Get all config entries with the specified prefix
+ String templateQueuePath = String.join(".", queuePathParts);
+ // Get all configuration entries with
+ // .auto-queue-creation-v2.template prefix
+ Map props = configuration.getPropsWithPrefix(
+ getAutoQueueTemplatePrefix(templateQueuePath));
+
+ for (Map.Entry entry : props.entrySet()) {
+ // If an entry is already present, it had a higher precedence
+ templateProperties.putIfAbsent(entry.getKey(), entry.getValue());
+ }
+
+ // Replace a queue part with a wildcard based on the wildcard level
+ // eg. root.a -> root.*
+ int queuePartToWildcard = queuePathMaxIndex - wildcardLevel;
+ queuePathParts.set(queuePartToWildcard, WILDCARD_QUEUE);
+
+ ++wildcardLevel;
+ }
+ }
+}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 5bae2b375f1a8..074e3711ba757 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -2029,7 +2029,7 @@ public void setDefaultLifetimePerQueue(String queue, long defaultLifetime) {
AUTO_CREATE_CHILD_QUEUE_PREFIX + "enabled";
@Private
- private static final String AUTO_QUEUE_CREATION_V2_PREFIX =
+ protected static final String AUTO_QUEUE_CREATION_V2_PREFIX =
"auto-queue-creation-v2.";
@Private
@@ -2350,11 +2350,14 @@ public void setAutoCreatedLeafQueueTemplateCapacityByLabel(String queuePath,
getAutoCreatedQueueTemplateConfPrefix(queuePath);
StringBuilder resourceString = new StringBuilder();
+
resourceString
.append("[" + AbsoluteResourceType.MEMORY.toString().toLowerCase() + "="
+ resource.getMemorySize() + ","
+ AbsoluteResourceType.VCORES.toString().toLowerCase() + "="
- + resource.getVirtualCores() + "]");
+ + resource.getVirtualCores()
+ + ResourceUtils.
+ getCustomResourcesStrings(resource) + "]");
setCapacityByLabel(leafQueueConfPrefix, label, resourceString.toString());
}
@@ -2385,11 +2388,14 @@ public void setAutoCreatedLeafQueueTemplateMaxCapacity(String queuePath,
queuePath);
StringBuilder resourceString = new StringBuilder();
+
resourceString
.append("[" + AbsoluteResourceType.MEMORY.toString().toLowerCase() + "="
+ resource.getMemorySize() + ","
+ AbsoluteResourceType.VCORES.toString().toLowerCase() + "="
- + resource.getVirtualCores() + "]");
+ + resource.getVirtualCores()
+ + ResourceUtils.
+ getCustomResourcesStrings(resource) + "]");
setMaximumCapacityByLabel(leafQueueConfPrefix, label, resourceString.toString());
}
@@ -2489,11 +2495,14 @@ private void updateMinMaxResourceToConf(String label, String queue,
}
StringBuilder resourceString = new StringBuilder();
+
resourceString
.append("[" + AbsoluteResourceType.MEMORY.toString().toLowerCase() + "="
+ resource.getMemorySize() + ","
+ AbsoluteResourceType.VCORES.toString().toLowerCase() + "="
- + resource.getVirtualCores() + "]");
+ + resource.getVirtualCores()
+ + ResourceUtils.
+ getCustomResourcesStrings(resource) + "]");
String prefix = getQueuePrefix(queue) + type;
if (!label.isEmpty()) {
@@ -2567,8 +2576,12 @@ private Resource internalGetLabeledResourceRequirementForQueue(String queue,
private void updateResourceValuesFromConfig(Set resourceTypes,
Resource resource, String[] splits) {
+ String resourceName = splits[0].trim();
+
// If key is not a valid type, skip it.
- if (!resourceTypes.contains(splits[0])) {
+ if (!resourceTypes.contains(resourceName)
+ && !ResourceUtils.getResourceTypes().containsKey(resourceName)) {
+ LOG.error(resourceName + " not supported.");
return;
}
@@ -2581,9 +2594,17 @@ private void updateResourceValuesFromConfig(Set resourceTypes,
resourceValue = UnitsConversionUtil.convert(units, "Mi", resourceValue);
}
+ // Custom resource type defined by user.
+ // Such as GPU FPGA etc.
+ if (!resourceTypes.contains(resourceName)) {
+ resource.setResourceInformation(resourceName, ResourceInformation
+ .newInstance(resourceName, units, resourceValue));
+ return;
+ }
+
// map it based on key.
AbsoluteResourceType resType = AbsoluteResourceType
- .valueOf(StringUtils.toUpperCase(splits[0].trim()));
+ .valueOf(StringUtils.toUpperCase(resourceName));
switch (resType) {
case MEMORY :
resource.setMemorySize(resourceValue);
@@ -2592,8 +2613,8 @@ private void updateResourceValuesFromConfig(Set resourceTypes,
resource.setVirtualCores(resourceValue.intValue());
break;
default :
- resource.setResourceInformation(splits[0].trim(), ResourceInformation
- .newInstance(splits[0].trim(), units, resourceValue));
+ resource.setResourceInformation(resourceName, ResourceInformation
+ .newInstance(resourceName, units, resourceValue));
break;
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
index 00d1cda30886e..5cd14908e8914 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerQueueManager.java
@@ -27,8 +27,6 @@
import java.util.Map;
import java.util.Set;
-import org.apache.hadoop.yarn.util.resource.ResourceUtils;
-import org.apache.hadoop.yarn.util.resource.Resources;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -340,11 +338,14 @@ private void updateQueues(CSQueueStore existingQueues,
}
for (CSQueue queue : existingQueues.getQueues()) {
- if (!((AbstractCSQueue) queue).isDynamicQueue() && newQueues.get(
- queue.getQueuePath()) == null && !(
- queue instanceof AutoCreatedLeafQueue && conf
- .isAutoCreateChildQueueEnabled(
- queue.getParent().getQueuePath()))) {
+ boolean isDanglingDynamicQueue = isDanglingDynamicQueue(
+ newQueues, existingQueues, queue);
+ boolean isRemovable = isDanglingDynamicQueue || !isDynamicQueue(queue)
+ && newQueues.get(queue.getQueuePath()) == null
+ && !(queue instanceof AutoCreatedLeafQueue &&
+ conf.isAutoCreateChildQueueEnabled(queue.getParent().getQueuePath()));
+
+ if (isRemovable) {
existingQueues.remove(queue);
}
}
@@ -435,4 +436,32 @@ private Map> getQueueToLabels() {
getQueueStateManager() {
return this.queueStateManager;
}
+
+ private boolean isDynamicQueue(CSQueue queue) {
+ return (queue instanceof AbstractCSQueue) &&
+ ((AbstractCSQueue) queue).isDynamicQueue();
+ }
+
+ private boolean isDanglingDynamicQueue(
+ CSQueueStore newQueues, CSQueueStore existingQueues,
+ CSQueue queue) {
+ if (!isDynamicQueue(queue)) {
+ return false;
+ }
+ if (queue.getParent() == null) {
+ return true;
+ }
+ if (newQueues.get(queue.getParent().getQueuePath()) != null) {
+ return false;
+ }
+ CSQueue parent = existingQueues.get(queue.getParent().getQueuePath());
+ if (parent == null) {
+ return true;
+ }
+ // A dynamic queue is dangling, if its parent is not parsed in newQueues
+ // or if its parent is not a dynamic queue. Dynamic queues are not parsed in
+ // newQueues but they are deleted automatically, so it is safe to assume
+ // that existingQueues contain valid dynamic queues.
+ return !isDynamicQueue(parent);
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
index 3d28933141359..798c71037845d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java
@@ -30,6 +30,7 @@
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap;
import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.Sets;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException;
import org.slf4j.Logger;
@@ -101,6 +102,8 @@ public class ParentQueue extends AbstractCSQueue {
private final boolean allowZeroCapacitySum;
+ private AutoCreatedQueueTemplate autoCreatedQueueTemplate;
+
// effective min ratio per resource, it is used during updateClusterResource,
// leaf queue can use this to calculate effective resources.
// This field will not be edited, reference will point to a new immutable map
@@ -152,6 +155,8 @@ protected void setupQueueConfigs(Resource clusterResource,
throws IOException {
writeLock.lock();
try {
+ autoCreatedQueueTemplate = new AutoCreatedQueueTemplate(
+ csConf, getQueuePath());
super.setupQueueConfigs(clusterResource, csConf);
StringBuilder aclsString = new StringBuilder();
for (Map.Entry e : acls.entrySet()) {
@@ -477,6 +482,8 @@ private CapacitySchedulerConfiguration getConfForAutoCreatedQueue(
CapacitySchedulerConfiguration dupCSConfig =
new CapacitySchedulerConfiguration(
csContext.getConfiguration(), false);
+ autoCreatedQueueTemplate.setTemplateEntriesForChild(dupCSConfig,
+ childQueuePath);
if (isLeaf) {
// set to -1, to disable it
dupCSConfig.setUserLimitFactor(childQueuePath, -1);
@@ -647,6 +654,18 @@ public void reinitialize(CSQueue newlyParsedQueue,
Map currentChildQueues = getQueuesMap(childQueues);
Map newChildQueues = getQueuesMap(
newlyParsedParentQueue.childQueues);
+
+ // Reinitialize dynamic queues as well, because they are not parsed
+ for (String queue : Sets.difference(currentChildQueues.keySet(),
+ newChildQueues.keySet())) {
+ CSQueue candidate = currentChildQueues.get(queue);
+ if (candidate instanceof AbstractCSQueue) {
+ if (((AbstractCSQueue) candidate).isDynamicQueue()) {
+ candidate.reinitialize(candidate, clusterResource);
+ }
+ }
+ }
+
for (Map.Entry e : newChildQueues.entrySet()) {
String newChildQueueName = e.getKey();
CSQueue newChildQueue = e.getValue();
@@ -1217,7 +1236,9 @@ public void updateClusterResource(Resource clusterResource,
// For dynamic queue, we will set weight to 1 every time, because it
// is possible new labels added to the parent.
if (((AbstractCSQueue) queue).isDynamicQueue()) {
- queue.getQueueCapacities().setWeight(nodeLabel, 1f);
+ if (queue.getQueueCapacities().getWeight(nodeLabel) == -1f) {
+ queue.getQueueCapacities().setWeight(nodeLabel, 1f);
+ }
}
}
}
@@ -1637,4 +1658,8 @@ public boolean isEligibleForAutoDeletion() {
csContext.getConfiguration().
isAutoExpiredDeletionEnabled(this.getQueuePath());
}
+
+ public AutoCreatedQueueTemplate getAutoCreatedQueueTemplate() {
+ return autoCreatedQueueTemplate;
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index 8e8a2610f0ce7..545ae881803ed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
import java.util.Collection;
+import java.util.Map;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_LABEL;
import static org.apache.hadoop.yarn.webapp.YarnWebParams.NODE_STATE;
@@ -90,9 +91,7 @@ protected void render(Block html) {
.th(".mem", "Phys Mem Used %")
.th(".vcores", "VCores Used")
.th(".vcores", "VCores Avail")
- .th(".vcores", "Phys VCores Used %")
- .th(".gpus", "GPUs Used")
- .th(".gpus", "GPUs Avail");
+ .th(".vcores", "Phys VCores Used %");
} else {
trbody.th(".containers", "Running Containers (G)")
.th(".allocationTags", "Allocation Tags")
@@ -102,14 +101,26 @@ protected void render(Block html) {
.th(".vcores", "VCores Used (G)")
.th(".vcores", "VCores Avail (G)")
.th(".vcores", "Phys VCores Used %")
- .th(".gpus", "GPUs Used (G)")
- .th(".gpus", "GPUs Avail (G)")
.th(".containers", "Running Containers (O)")
.th(".mem", "Mem Used (O)")
.th(".vcores", "VCores Used (O)")
.th(".containers", "Queued Containers");
}
+ for (Map.Entry integerEntry :
+ ResourceUtils.getResourceTypeIndex().entrySet()) {
+ if (integerEntry.getKey().equals(ResourceInformation.MEMORY_URI)
+ || integerEntry.getKey().equals(ResourceInformation.VCORES_URI)) {
+ continue;
+ }
+
+ trbody.th("." + integerEntry.getKey(),
+ integerEntry.getKey() + " " + "Used");
+
+ trbody.th("." + integerEntry.getKey(),
+ integerEntry.getKey() + " " + "Avail");
+ }
+
TBODY> tbody =
trbody.th(".nodeManagerVersion", "Version").__().__().tbody();
@@ -175,17 +186,7 @@ protected void render(Block html) {
nodeTableData.append("\",\"").append(httpAddress).append("\",").append("\"");
}
- Integer gpuIndex = ResourceUtils.getResourceTypeIndex()
- .get(ResourceInformation.GPU_URI);
- long usedGPUs = 0;
- long availableGPUs = 0;
- if (gpuIndex != null && info.getUsedResource() != null
- && info.getAvailableResource() != null) {
- usedGPUs = info.getUsedResource().getResource()
- .getResourceValue(ResourceInformation.GPU_URI);
- availableGPUs = info.getAvailableResource().getResource()
- .getResourceValue(ResourceInformation.GPU_URI);
- }
+
nodeTableData.append("
")
.append(Times.format(info.getLastHealthUpdate())).append("\",\"")
@@ -205,10 +206,6 @@ protected void render(Block html) {
.append(String.valueOf(info.getAvailableVirtualCores()))
.append("\",\"")
.append(String.valueOf((int) info.getVcoreUtilization()))
- .append("\",\"")
- .append(String.valueOf(usedGPUs))
- .append("\",\"")
- .append(String.valueOf(availableGPUs))
.append("\",\"");
// If opportunistic containers are enabled, add extra fields.
@@ -226,6 +223,34 @@ protected void render(Block html) {
.append("\",\"");
}
+ for (Map.Entry integerEntry :
+ ResourceUtils.getResourceTypeIndex().entrySet()) {
+ if (integerEntry.getKey().equals(ResourceInformation.MEMORY_URI)
+ || integerEntry.getKey().equals(ResourceInformation.VCORES_URI)) {
+ continue;
+ }
+
+ long usedCustomResource = 0;
+ long availableCustomResource = 0;
+
+ String resourceName = integerEntry.getKey();
+ Integer index = integerEntry.getValue();
+
+ if (index != null && info.getUsedResource() != null
+ && info.getAvailableResource() != null) {
+ usedCustomResource = info.getUsedResource().getResource()
+ .getResourceValue(resourceName);
+ availableCustomResource = info.getAvailableResource().getResource()
+ .getResourceValue(resourceName);
+
+ nodeTableData
+ .append(usedCustomResource)
+ .append("\",\"")
+ .append(availableCustomResource)
+ .append("\",\"");
+ }
+ }
+
nodeTableData.append(ni.getNodeManagerVersion())
.append("\"],\n");
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueTemplate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueTemplate.java
new file mode 100644
index 0000000000000..1c021f6efb04d
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestAutoCreatedQueueTemplate.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestAutoCreatedQueueTemplate {
+ private static final String TEST_QUEUE_ABC = "root.a.b.c";
+ private static final String TEST_QUEUE_AB = "root.a.b";
+ private static final String TEST_QUEUE_A = "root.a";
+ private static final String ROOT = "root";
+ private CapacitySchedulerConfiguration conf;
+
+ @Before
+ public void setUp() throws Exception {
+ conf = new CapacitySchedulerConfiguration();
+ conf.setQueues("root", new String[]{"a"});
+ conf.setQueues("a", new String[]{"b"});
+ conf.setQueues("b", new String[]{"c"});
+
+ }
+
+ @Test
+ public void testNonWildCardTemplate() {
+ conf.set(getTemplateKey(TEST_QUEUE_AB, "capacity"), "6w");
+ AutoCreatedQueueTemplate template =
+ new AutoCreatedQueueTemplate(conf, TEST_QUEUE_AB);
+ template.setTemplateEntriesForChild(conf, TEST_QUEUE_ABC);
+
+ Assert.assertEquals("weight is not set", 6f,
+ conf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6);
+
+ }
+
+ @Test
+ public void testOneLevelWildcardTemplate() {
+ conf.set(getTemplateKey("root.a.*", "capacity"), "6w");
+ AutoCreatedQueueTemplate template =
+ new AutoCreatedQueueTemplate(conf, TEST_QUEUE_AB);
+ template.setTemplateEntriesForChild(conf, TEST_QUEUE_ABC);
+
+ Assert.assertEquals("weight is not set", 6f,
+ conf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6);
+
+ }
+
+ @Test
+ public void testIgnoredWhenRootWildcarded() {
+ conf.set(getTemplateKey("*", "capacity"), "6w");
+ AutoCreatedQueueTemplate template =
+ new AutoCreatedQueueTemplate(conf, ROOT);
+ template.setTemplateEntriesForChild(conf, TEST_QUEUE_A);
+
+ Assert.assertEquals("weight is set", -1f,
+ conf.getNonLabeledQueueWeight(TEST_QUEUE_A), 10e-6);
+ }
+
+ @Test
+ public void testIgnoredWhenNoParent() {
+ conf.set(getTemplateKey("root", "capacity"), "6w");
+ AutoCreatedQueueTemplate template =
+ new AutoCreatedQueueTemplate(conf, ROOT);
+ template.setTemplateEntriesForChild(conf, ROOT);
+
+ Assert.assertEquals("weight is set", -1f,
+ conf.getNonLabeledQueueWeight(ROOT), 10e-6);
+ }
+
+ @Test
+ public void testTemplatePrecedence() {
+ conf.set(getTemplateKey("root.a.b", "capacity"), "6w");
+ conf.set(getTemplateKey("root.a.*", "capacity"), "4w");
+ conf.set(getTemplateKey("root.*.*", "capacity"), "2w");
+
+ AutoCreatedQueueTemplate template =
+ new AutoCreatedQueueTemplate(conf, TEST_QUEUE_AB);
+ template.setTemplateEntriesForChild(conf, TEST_QUEUE_ABC);
+
+ Assert.assertEquals(
+ "explicit template does not have the highest precedence", 6f,
+ conf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6);
+
+ CapacitySchedulerConfiguration newConf =
+ new CapacitySchedulerConfiguration();
+ newConf.set(getTemplateKey("root.a.*", "capacity"), "4w");
+ template =
+ new AutoCreatedQueueTemplate(newConf, TEST_QUEUE_AB);
+ template.setTemplateEntriesForChild(newConf, TEST_QUEUE_ABC);
+
+ Assert.assertEquals("precedence is invalid", 4f,
+ newConf.getNonLabeledQueueWeight(TEST_QUEUE_ABC), 10e-6);
+ }
+
+ private String getTemplateKey(String queuePath, String entryKey) {
+ return CapacitySchedulerConfiguration.getQueuePrefix(queuePath)
+ + AutoCreatedQueueTemplate.AUTO_QUEUE_TEMPLATE_PREFIX + entryKey;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java
index 7b0254cdcc5da..36b3c9b4d63ff 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCSAllocateCustomResource.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -50,10 +51,16 @@
import java.io.File;
import java.io.IOException;
+
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import java.util.Set;
+import java.util.Map;
+import java.util.Arrays;
+import java.util.stream.Collectors;
+import static org.apache.hadoop.yarn.api.records.ResourceInformation.FPGA_URI;
import static org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI;
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.MAXIMUM_ALLOCATION_MB;
import static org.junit.Assert.assertEquals;
@@ -248,4 +255,79 @@ public void testClusterMetricsWithGPU()
.get(GPU_URI)).longValue(), 0);
ClusterMetrics.destroy();
}
+
+ /**
+ * Test CS absolute conf with Custom resource type.
+ * */
+ @Test
+ public void testCapacitySchedulerAbsoluteConfWithCustomResourceType()
+ throws IOException {
+ // reset resource types
+ ResourceUtils.resetResourceTypes();
+ String resourceTypesFileName = "resource-types-test.xml";
+ File source = new File(
+ conf.getClassLoader().getResource(resourceTypesFileName).getFile());
+ resourceTypesFile = new File(source.getParent(), "resource-types.xml");
+ FileUtils.copyFile(source, resourceTypesFile);
+
+ CapacitySchedulerConfiguration newConf =
+ new CapacitySchedulerConfiguration(conf);
+
+ // Only memory vcores for first class.
+ Set resourceTypes = Arrays.
+ stream(CapacitySchedulerConfiguration.
+ AbsoluteResourceType.values()).
+ map(value -> value.toString().toLowerCase()).
+ collect(Collectors.toSet());
+
+ Map valuesMin = Maps.newHashMap();
+ valuesMin.put(GPU_URI, 10L);
+ valuesMin.put(FPGA_URI, 10L);
+ valuesMin.put("testType", 10L);
+
+ Map valuesMax = Maps.newHashMap();
+ valuesMax.put(GPU_URI, 100L);
+ valuesMax.put(FPGA_URI, 100L);
+ valuesMax.put("testType", 100L);
+
+ Resource aMINRES =
+ Resource.newInstance(1000, 10, valuesMin);
+
+ Resource aMAXRES =
+ Resource.newInstance(1000, 10, valuesMax);
+
+ // Define top-level queues
+ newConf.setQueues(CapacitySchedulerConfiguration.ROOT,
+ new String[] {"a", "b", "c"});
+ newConf.setMinimumResourceRequirement("", "root.a",
+ aMINRES);
+ newConf.setMaximumResourceRequirement("", "root.a",
+ aMAXRES);
+
+ newConf.setClass(CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
+ DominantResourceCalculator.class, ResourceCalculator.class);
+
+ //start RM
+ MockRM rm = new MockRM(newConf);
+ rm.start();
+
+ // Check the gpu resource conf is right.
+ CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler();
+ Assert.assertEquals(aMINRES,
+ cs.getConfiguration().
+ getMinimumResourceRequirement("", "root.a", resourceTypes));
+ Assert.assertEquals(aMAXRES,
+ cs.getConfiguration().
+ getMaximumResourceRequirement("", "root.a", resourceTypes));
+
+ // Check the gpu resource of queue is right.
+ Assert.assertEquals(aMINRES, cs.getQueue("root.a").
+ getQueueResourceQuotas().getConfiguredMinResource());
+ Assert.assertEquals(aMAXRES, cs.getQueue("root.a").
+ getQueueResourceQuotas().getConfiguredMaxResource());
+
+ rm.close();
+
+ }
+
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java
index c403d23f07b98..c514fc7af2a04 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNewQueueAutoCreation.java
@@ -18,10 +18,12 @@
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -646,6 +648,85 @@ public void testAutoCreateQueueMaxQueuesLimit() throws Exception {
}
}
+ @Test
+ public void testAutoCreatedQueueTemplateConfig() throws Exception {
+ startScheduler();
+ csConf.set(AutoCreatedQueueTemplate.getAutoQueueTemplatePrefix(
+ "root.a.*") + "capacity", "6w");
+ cs.reinitialize(csConf, mockRM.getRMContext());
+
+ LeafQueue a2 = createQueue("root.a.a-auto.a2");
+ Assert.assertEquals("weight is not set by template", 6f,
+ a2.getQueueCapacities().getWeight(), 1e-6);
+
+ cs.reinitialize(csConf, mockRM.getRMContext());
+ a2 = (LeafQueue) cs.getQueue("root.a.a-auto.a2");
+ Assert.assertEquals("weight is overridden", 6f,
+ a2.getQueueCapacities().getWeight(), 1e-6);
+
+ csConf.setNonLabeledQueueWeight("root.a.a-auto.a2", 4f);
+ cs.reinitialize(csConf, mockRM.getRMContext());
+ Assert.assertEquals("weight is not explicitly set", 4f,
+ a2.getQueueCapacities().getWeight(), 1e-6);
+ }
+
+ @Test
+ public void testAutoCreatedQueueConfigChange() throws Exception {
+ startScheduler();
+ LeafQueue a2 = createQueue("root.a.a-auto.a2");
+ csConf.setNonLabeledQueueWeight("root.a.a-auto.a2", 4f);
+ cs.reinitialize(csConf, mockRM.getRMContext());
+
+ Assert.assertEquals("weight is not explicitly set", 4f,
+ a2.getQueueCapacities().getWeight(), 1e-6);
+
+ a2 = (LeafQueue) cs.getQueue("root.a.a-auto.a2");
+ csConf.setState("root.a.a-auto.a2", QueueState.STOPPED);
+ cs.reinitialize(csConf, mockRM.getRMContext());
+ Assert.assertEquals("root.a.a-auto.a2 has not been stopped",
+ QueueState.STOPPED, a2.getState());
+
+ csConf.setState("root.a.a-auto.a2", QueueState.RUNNING);
+ cs.reinitialize(csConf, mockRM.getRMContext());
+ Assert.assertEquals("root.a.a-auto.a2 is not running",
+ QueueState.RUNNING, a2.getState());
+ }
+
+ @Test
+ public void testAutoCreateQueueState() throws Exception {
+ startScheduler();
+
+ createQueue("root.e.e1");
+ csConf.setState("root.e", QueueState.STOPPED);
+ csConf.setState("root.e.e1", QueueState.STOPPED);
+ csConf.setState("root.a", QueueState.STOPPED);
+ cs.reinitialize(csConf, mockRM.getRMContext());
+
+ // Make sure the static queue is stopped
+ Assert.assertEquals(cs.getQueue("root.a").getState(),
+ QueueState.STOPPED);
+ // If not set, default is the queue state of parent
+ Assert.assertEquals(cs.getQueue("root.a.a1").getState(),
+ QueueState.STOPPED);
+
+ Assert.assertEquals(cs.getQueue("root.e").getState(),
+ QueueState.STOPPED);
+ Assert.assertEquals(cs.getQueue("root.e.e1").getState(),
+ QueueState.STOPPED);
+
+ // Make root.e state to RUNNING
+ csConf.setState("root.e", QueueState.RUNNING);
+ cs.reinitialize(csConf, mockRM.getRMContext());
+ Assert.assertEquals(cs.getQueue("root.e.e1").getState(),
+ QueueState.STOPPED);
+
+ // Make root.e.e1 state to RUNNING
+ csConf.setState("root.e.e1", QueueState.RUNNING);
+ cs.reinitialize(csConf, mockRM.getRMContext());
+ Assert.assertEquals(cs.getQueue("root.e.e1").getState(),
+ QueueState.RUNNING);
+ }
+
@Test
public void testAutoQueueCreationDepthLimitFromStaticParent()
throws Exception {
@@ -968,6 +1049,66 @@ public void testQueueInfoIfAmbiguousQueueNames() throws Exception {
bAutoLeafQueue.getQueueInfo().getQueueName());
}
+ @Test
+ public void testRemoveDanglingAutoCreatedQueuesOnReinit() throws Exception {
+ startScheduler();
+
+ // Validate static parent deletion
+ createQueue("root.a.a-auto");
+ AbstractCSQueue aAuto = (AbstractCSQueue) cs.
+ getQueue("root.a.a-auto");
+ Assert.assertTrue(aAuto.isDynamicQueue());
+
+ csConf.setState("root.a", QueueState.STOPPED);
+ cs.reinitialize(csConf, mockRM.getRMContext());
+ aAuto = (AbstractCSQueue) cs.
+ getQueue("root.a.a-auto");
+ Assert.assertEquals("root.a.a-auto is not in STOPPED state", QueueState.STOPPED, aAuto.getState());
+ csConf.setQueues("root", new String[]{"b"});
+ cs.reinitialize(csConf, mockRM.getRMContext());
+ CSQueue aAutoNew = cs.getQueue("root.a.a-auto");
+ Assert.assertNull(aAutoNew);
+
+ submitApp(cs, USER0, "a-auto", "root.a");
+ aAutoNew = cs.getQueue("root.a.a-auto");
+ Assert.assertNotNull(aAutoNew);
+
+ // Validate static grandparent deletion
+ csConf.setQueues("root", new String[]{"a", "b"});
+ csConf.setQueues("root.a", new String[]{"a1"});
+ csConf.setAutoQueueCreationV2Enabled("root.a.a1", true);
+ cs.reinitialize(csConf, mockRM.getRMContext());
+
+ createQueue("root.a.a1.a1-auto");
+ CSQueue a1Auto = cs.getQueue("root.a.a1.a1-auto");
+ Assert.assertNotNull("a1-auto should exist", a1Auto);
+
+ csConf.setQueues("root", new String[]{"b"});
+ cs.reinitialize(csConf, mockRM.getRMContext());
+ a1Auto = cs.getQueue("root.a.a1.a1-auto");
+ Assert.assertNull("a1-auto has no parent and should not exist", a1Auto);
+
+ // Validate dynamic parent deletion
+ csConf.setState("root.b", QueueState.STOPPED);
+ cs.reinitialize(csConf, mockRM.getRMContext());
+ csConf.setAutoQueueCreationV2Enabled("root.b", true);
+ cs.reinitialize(csConf, mockRM.getRMContext());
+
+ createQueue("root.b.b-auto-parent.b-auto-leaf");
+ CSQueue bAutoParent = cs.getQueue("root.b.b-auto-parent");
+ Assert.assertNotNull("b-auto-parent should exist", bAutoParent);
+ ParentQueue b = (ParentQueue) cs.getQueue("root.b");
+ b.removeChildQueue(bAutoParent);
+
+ cs.reinitialize(csConf, mockRM.getRMContext());
+
+ bAutoParent = cs.getQueue("root.b.b-auto-parent");
+ Assert.assertNull("b-auto-parent should not exist ", bAutoParent);
+ CSQueue bAutoLeaf = cs.getQueue("root.b.b-auto-parent.b-auto-leaf");
+ Assert.assertNull("b-auto-leaf should not exist " +
+ "when its dynamic parent is removed", bAutoLeaf);
+ }
+
protected LeafQueue createQueue(String queuePath) throws YarnException {
return autoQueueHandler.autoCreateQueue(
CSQueueUtils.extractQueuePath(queuePath));
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
index 9ab6583b06ce4..dd271fd34d798 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestNodesPage.java
@@ -53,7 +53,7 @@ public class TestNodesPage {
// Number of Actual Table Headers for NodesPage.NodesBlock might change in
// future. In that case this value should be adjusted to the new value.
private final int numberOfThInMetricsTable = 23;
- private final int numberOfActualTableHeaders = 18;
+ private final int numberOfActualTableHeaders = 16;
private final int numberOfThForOpportunisticContainers = 4;
private Injector injector;
@@ -119,12 +119,34 @@ public void testNodesBlockRenderForLostNodesWithGPUResources()
initResourceTypes(ResourceInformation.GPU_URI);
this.setUpInternal(true);
try {
- this.testNodesBlockRenderForLostNodes();
+ // Test gpu as a custom resource.
+ //
+ // yarn.io/gpu Used
+ // |
+ //
+ // yarn.io/gpu Avail
+ // |
+ this.testNodesBlockRenderForLostNodesWithGPU();
} finally {
ResourceUtils.initializeResourcesFromResourceInformationMap(oldRtMap);
}
}
+ public void testNodesBlockRenderForLostNodesWithGPU() {
+ NodesBlock nodesBlock = injector.getInstance(NodesBlock.class);
+ nodesBlock.set("node.state", "lost");
+ nodesBlock.render();
+ PrintWriter writer = injector.getInstance(PrintWriter.class);
+ WebAppTests.flushOutput(injector);
+
+ Mockito.verify(writer,
+ Mockito.times(numberOfActualTableHeaders
+ + numberOfThInMetricsTable + 2))
+ .print("
yarn.resource-types
- yarn.io/gpu, yarn.io/fpga
+ yarn.io/gpu, yarn.io/fpga, testType
\ No newline at end of file
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock
index bbafc667a30ef..1d36e689da053 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock
@@ -79,11 +79,6 @@ amdefine@>=0.0.4:
resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
integrity sha1-SlKCrBZHKek2Gbz9OtFR+BfOkfU=
-ansi-regex@*, ansi-regex@^5.0.0:
- version "5.0.0"
- resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.0.tgz#388539f55179bf39339c81af30a654d69f87cb75"
- integrity sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==
-
ansi-regex@^0.2.0, ansi-regex@^0.2.1:
version "0.2.1"
resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9"
@@ -6163,16 +6158,16 @@ spdx-expression-parse@^3.0.0:
spdx-exceptions "^2.1.0"
spdx-license-ids "^3.0.0"
-spdx-license-ids@*, spdx-license-ids@^3.0.0:
- version "3.0.5"
- resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz#3694b5804567a458d3c8045842a6358632f62654"
- integrity sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q==
-
spdx-license-ids@^1.0.0:
version "1.2.2"
resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-1.2.2.tgz#c9df7a3424594ade6bd11900d596696dc06bac57"
integrity sha1-yd96NCRZSt5r0RkA1ZZpbcBrrFc=
+spdx-license-ids@^3.0.0:
+ version "3.0.5"
+ resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.5.tgz#3694b5804567a458d3c8045842a6358632f62654"
+ integrity sha512-J+FWzZoynJEXGphVIS+XEh3kFSjZX/1i9gFBaWQcB+/tmpe2qUsSBABpcxqxnAxFdiUFEgAX1bjYGQvIZmoz9Q==
+
spdx@~0.4.1:
version "0.4.3"
resolved "https://registry.yarnpkg.com/spdx/-/spdx-0.4.3.tgz#ab373c3fcf7b84ffd8fdeb0592d24ff0d14812e4"
@@ -6276,13 +6271,6 @@ stringstream@~0.0.4:
resolved "https://registry.yarnpkg.com/stringstream/-/stringstream-0.0.6.tgz#7880225b0d4ad10e30927d167a1d6f2fd3b33a72"
integrity sha512-87GEBAkegbBcweToUrdzf3eLhWNg06FJTebl4BVJz/JgWy8CvEr9dRtX5qWphiynMSQlxxi+QqN0z5T32SLlhA==
-strip-ansi@*:
- version "6.0.0"
- resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.0.tgz#0b1571dd7669ccd4f3e06e14ef1eed26225ae532"
- integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==
- dependencies:
- ansi-regex "^5.0.0"
-
strip-ansi@^0.3.0:
version "0.3.0"
resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-0.3.0.tgz#25f48ea22ca79187f3174a4db8759347bb126220"
@@ -6906,9 +6894,9 @@ xtend@^4.0.0:
integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==
y18n@^3.2.0, y18n@^3.2.1:
- version "3.2.1"
- resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.1.tgz#6d15fba884c08679c0d77e88e7759e811e07fa41"
- integrity sha1-bRX7qITAhnnA136I53WegR4H+kE=
+ version "3.2.2"
+ resolved "https://registry.yarnpkg.com/y18n/-/y18n-3.2.2.tgz#85c901bd6470ce71fc4bb723ad209b70f7f28696"
+ integrity sha512-uGZHXkHnhF0XeeAPgnKfPv1bgKAYyVvmNL1xlKsPYZPaIHxGti2hHqvOCQv71XMsLxu1QjergkqogUnms5D3YQ==
yallist@^2.1.2:
version "2.1.2"
|