-
Notifications
You must be signed in to change notification settings - Fork 593
HDDS-6567. Store datanode command queue counts from heartbeat in DatanodeInfo in SCM #3329
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,49 @@ | ||
| /** | ||
| * Licensed to the Apache Software Foundation (ASF) under one | ||
| * or more contributor license agreements. See the NOTICE file | ||
| * distributed with this work for additional information | ||
| * regarding copyright ownership. The ASF licenses this file | ||
| * to you under the Apache License, Version 2.0 (the | ||
| * "License"); you may not use this file except in compliance | ||
| * with the License. You may obtain a copy of the License at | ||
| * | ||
| * http://www.apache.org/licenses/LICENSE-2.0 | ||
| * | ||
| * Unless required by applicable law or agreed to in writing, software | ||
| * distributed under the License is distributed on an "AS IS" BASIS, | ||
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| * See the License for the specific language governing permissions and | ||
| * limitations under the License. | ||
| */ | ||
| package org.apache.hadoop.hdds.scm.node; | ||
|
|
||
| import com.google.common.base.Preconditions; | ||
| import org.apache.hadoop.hdds.protocol.DatanodeDetails; | ||
| import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.CommandQueueReportFromDatanode; | ||
| import org.apache.hadoop.hdds.server.events.EventHandler; | ||
| import org.apache.hadoop.hdds.server.events.EventPublisher; | ||
|
|
||
| /** | ||
| * Handles QueuedCommand Reports from datanode. | ||
| */ | ||
| public class CommandQueueReportHandler implements | ||
| EventHandler<CommandQueueReportFromDatanode> { | ||
|
|
||
| private final NodeManager nodeManager; | ||
|
|
||
| public CommandQueueReportHandler(NodeManager nodeManager) { | ||
| Preconditions.checkNotNull(nodeManager); | ||
| this.nodeManager = nodeManager; | ||
| } | ||
|
|
||
| @Override | ||
| public void onMessage(CommandQueueReportFromDatanode queueReportFromDatanode, | ||
| EventPublisher publisher) { | ||
| Preconditions.checkNotNull(queueReportFromDatanode); | ||
| DatanodeDetails dn = queueReportFromDatanode.getDatanodeDetails(); | ||
| Preconditions.checkNotNull(dn, "QueueReport is " | ||
| + "missing DatanodeDetails."); | ||
| nodeManager.processNodeCommandQueueReport(dn, | ||
| queueReportFromDatanode.getReport()); | ||
| } | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,16 +22,22 @@ | |
|
|
||
| import com.google.common.annotations.VisibleForTesting; | ||
| import org.apache.hadoop.hdds.protocol.DatanodeDetails; | ||
| import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; | ||
| import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto; | ||
| import org.apache.hadoop.hdds.protocol.proto | ||
| .StorageContainerDatanodeProtocolProtos.LayoutVersionProto; | ||
| import org.apache.hadoop.hdds.protocol.proto | ||
| .StorageContainerDatanodeProtocolProtos.StorageReportProto; | ||
| import org.apache.hadoop.hdds.protocol.proto | ||
| .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; | ||
| import org.apache.hadoop.util.Time; | ||
| import org.slf4j.Logger; | ||
| import org.slf4j.LoggerFactory; | ||
|
|
||
| import java.util.Collections; | ||
| import java.util.HashMap; | ||
| import java.util.List; | ||
| import java.util.Map; | ||
| import java.util.concurrent.locks.ReadWriteLock; | ||
| import java.util.concurrent.locks.ReentrantReadWriteLock; | ||
|
|
||
|
|
@@ -41,6 +47,8 @@ | |
| */ | ||
| public class DatanodeInfo extends DatanodeDetails { | ||
|
|
||
| private static final Logger LOG = LoggerFactory.getLogger(DatanodeInfo.class); | ||
|
|
||
| private final ReadWriteLock lock; | ||
|
|
||
| private volatile long lastHeartbeatTime; | ||
|
|
@@ -49,6 +57,7 @@ public class DatanodeInfo extends DatanodeDetails { | |
| private List<StorageReportProto> storageReports; | ||
| private List<MetadataStorageReportProto> metadataStorageReports; | ||
| private LayoutVersionProto lastKnownLayoutVersion; | ||
| private Map<SCMCommandProto.Type, Integer> commandCounts; | ||
|
|
||
| private NodeStatus nodeStatus; | ||
|
|
||
|
|
@@ -69,6 +78,7 @@ public DatanodeInfo(DatanodeDetails datanodeDetails, NodeStatus nodeStatus, | |
| this.storageReports = Collections.emptyList(); | ||
| this.nodeStatus = nodeStatus; | ||
| this.metadataStorageReports = Collections.emptyList(); | ||
| this.commandCounts = new HashMap<>(); | ||
| } | ||
|
|
||
| /** | ||
|
|
@@ -272,6 +282,57 @@ public void setNodeStatus(NodeStatus newNodeStatus) { | |
| } | ||
| } | ||
|
|
||
| /** | ||
| * Set the current command counts for this datanode, as reported in the last | ||
| * heartbeat. | ||
| * @param cmds Proto message containing a list of command count pairs. | ||
| */ | ||
| public void setCommandCounts(CommandQueueReportProto cmds) { | ||
| try { | ||
| int count = cmds.getCommandCount(); | ||
| lock.writeLock().lock(); | ||
| for (int i = 0; i < count; i++) { | ||
| SCMCommandProto.Type command = cmds.getCommand(i); | ||
| if (command == SCMCommandProto.Type.unknownScmCommand) { | ||
| LOG.warn("Unknown SCM Command received from {} in the " | ||
| + "heartbeat. SCM and the DN may not be at the same version.", | ||
| this); | ||
| continue; | ||
| } | ||
| int cmdCount = cmds.getCount(i); | ||
| if (cmdCount < 0) { | ||
| LOG.warn("Command count of {} from {} should be greater than zero. " + | ||
| "Setting it to zero", cmdCount, this); | ||
| cmdCount = 0; | ||
| } | ||
| commandCounts.put(command, cmdCount); | ||
| } | ||
| } finally { | ||
| lock.writeLock().unlock(); | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Retrieve the number of queued commands of the given type, as reported by | ||
| * the datanode at the last heartbeat. | ||
| * @param cmd The command for which to receive the queued command count | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. if it's -1, we should wait to assign any tasks to this node as we don;t know the actual state?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. -1 means we have not received any data yet. In the case of an upgrade adding a new command (eg SCM upgraded with a new command, but some DNs not upgraded) those DNs will always show a -1 for the new command. I am not sure how we should handle this - possibly we need a fallback position in any code that uses these counts. If it is "-1" then we need to use some other way of limiting the commands sent. The upgrade scenario should be short lived, and then DNs should only have -1 until their first heartbeat. I just thought it was a good idea to include -1 as a different state than zero, so we can tell the difference between the two.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Got it. |
||
| * @return -1 if we have no information about the count, or an integer >= 0 | ||
| * indicating the command count at the last heartbeat. | ||
| */ | ||
| public int getCommandCount(SCMCommandProto.Type cmd) { | ||
| try { | ||
| lock.readLock().lock(); | ||
| Integer count = commandCounts.get(cmd); | ||
| if (count == null) { | ||
| return -1; | ||
| } else { | ||
| return count.intValue(); | ||
| } | ||
| } finally { | ||
| lock.readLock().unlock(); | ||
| } | ||
| } | ||
|
|
||
| @Override | ||
| public int hashCode() { | ||
| return super.hashCode(); | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -27,6 +27,8 @@ | |
| import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; | ||
| import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; | ||
| import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; | ||
| import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; | ||
| import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto; | ||
| import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto; | ||
| import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; | ||
| import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; | ||
|
|
@@ -621,6 +623,48 @@ public void processLayoutVersionReport(DatanodeDetails datanodeDetails, | |
| } | ||
| } | ||
|
|
||
| /** | ||
| * Process Command Queue Reports from the Datanode Heartbeat. | ||
| * | ||
| * @param datanodeDetails | ||
| * @param commandQueueReportProto | ||
| */ | ||
| @Override | ||
| public void processNodeCommandQueueReport(DatanodeDetails datanodeDetails, | ||
| CommandQueueReportProto commandQueueReportProto) { | ||
| LOG.debug("Processing Command Queue Report from [datanode={}]", | ||
| datanodeDetails.getHostName()); | ||
| if (LOG.isTraceEnabled()) { | ||
| LOG.trace("Command Queue Report is received from [datanode={}]: " + | ||
| "<json>{}</json>", datanodeDetails.getHostName(), | ||
| commandQueueReportProto.toString().replaceAll("\n", "\\\\n")); | ||
| } | ||
| try { | ||
| DatanodeInfo datanodeInfo = nodeStateManager.getNode(datanodeDetails); | ||
| if (commandQueueReportProto != null) { | ||
| datanodeInfo.setCommandCounts(commandQueueReportProto); | ||
| metrics.incNumNodeCommandQueueReportProcessed(); | ||
| } | ||
| } catch (NodeNotFoundException e) { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is this metric a "report failed"? or just unknown node report? I am not sure about the definition of this metric here
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. These metrics are copying what is already there for other commands, eg see
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ok. Yeah name is bit misleading a bit. |
||
| metrics.incNumNodeCommandQueueReportProcessingFailed(); | ||
| LOG.warn("Got Command Queue Report from unregistered datanode {}", | ||
| datanodeDetails); | ||
| } | ||
| } | ||
|
|
||
| /** | ||
| * Get the number of commands of the given type queued on the datanode at the | ||
| * last heartbeat. If the Datanode has not reported information for the given | ||
| * command type, -1 will be returned. | ||
| * @param cmdType | ||
| * @return The queued count or -1 if no data has been received from the DN. | ||
| */ | ||
| public int getNodeQueuedCommandCount(DatanodeDetails datanodeDetails, | ||
| SCMCommandProto.Type cmdType) throws NodeNotFoundException { | ||
| DatanodeInfo datanodeInfo = nodeStateManager.getNode(datanodeDetails); | ||
| return datanodeInfo.getCommandCount(cmdType); | ||
| } | ||
|
|
||
| /** | ||
| * Returns the aggregated node stats. | ||
| * | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@sodonnel I am wondering what happens to this DatanodeInfo when it expire due to lack of HB from that node? is this object around or destroyed. I am trying to figure out that particular code part. I have not found so far. Please point me where we remove this nodeInfo object when node expires. Thanks
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
NodeStateManager.checkNodesHealth is what notices the lost heartbeats and triggers events based on that.
The DeadNodeHandler is triggered when the node goes dead (there is also a StaleNodeHandler), and clears out its pipelines etc. Perhaps we should reset the command counts when this happens, or perhaps it is valid to leave them as the last known value. The datanodeInfo object is not removed AFAIK, as it holds the DN service state (in_service, decommissioning, healthy, stale, dead etc). If the DN comes back, it will be reset by the heartbeat processing. If it never comes back, the datanodedetails and datanodeinfo stick around in SCM until it is restarted.
I am not sure if the command counts remaining is a big issue, as we should avoid scheduling commands on dead (and maybe stale) nodes anyway. Eg before scheduling a command for a node, need to check it is HEALTHY, as otherwise the commands will be queued in SCM and never taken by a DN. If something in SCM keeps scheduling commands for dead nodes, it will slowly fill up the SCM memory on the command queue.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yeah, On thinking a bit, I think it's ok to leave the counts as well ( anyway we will not assign tasks to dead DN). when node rejoined, we should receive new HB and counts should get refreshed.
This above questions was just for my clarity what happens to DN object. Thanks for the details