Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ private void addQueuedCommandCounts(
reportProto.addCommand(entry.getKey())
.addCount(entry.getValue());
}
requestBuilder.setQueuedCommandReport(reportProto.build());
requestBuilder.setCommandQueueReport(reportProto.build());
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -256,8 +256,8 @@ public void testheartbeatWithAllReports() throws Exception {
Assert.assertTrue(heartbeat.hasContainerReport());
Assert.assertTrue(heartbeat.getCommandStatusReportsCount() != 0);
Assert.assertTrue(heartbeat.hasContainerActions());
Assert.assertTrue(heartbeat.hasQueuedCommandReport());
CommandQueueReportProto queueCount = heartbeat.getQueuedCommandReport();
Assert.assertTrue(heartbeat.hasCommandQueueReport());
CommandQueueReportProto queueCount = heartbeat.getCommandQueueReport();
Assert.assertEquals(queueCount.getCommandCount(), commands.size());
Assert.assertEquals(queueCount.getCountCount(), commands.size());
for (int i = 0; i < commands.size(); i++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ message SCMHeartbeatRequestProto {
optional PipelineActionsProto pipelineActions = 7;
optional PipelineReportsProto pipelineReports = 8;
optional LayoutVersionProto dataNodeLayoutVersion = 9;
optional CommandQueueReportProto queuedCommandReport = 10;
optional CommandQueueReportProto commandQueueReport = 10;
}

message CommandQueueReportProto {
Expand Down Expand Up @@ -309,6 +309,7 @@ message PipelineAction {
*/
message SCMCommandProto {
enum Type {
unknownScmCommand = 0;
reregisterCommand = 1;
deleteBlocksCommand = 2;
closeContainerCommand = 3;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.CommandQueueReportFromDatanode;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer.NodeRegistrationContainerReport;
import org.apache.hadoop.hdds.server.events.Event;
import org.apache.hadoop.hdds.server.events.TypedEvent;
Expand All @@ -49,6 +50,15 @@ public final class SCMEvents {
public static final TypedEvent<NodeReportFromDatanode> NODE_REPORT =
new TypedEvent<>(NodeReportFromDatanode.class, "Node_Report");

/**
* Queued Command counts are sent out by Datanodes. This report is received by
* SCMDatanodeHeartbeatDispatcher and the COMMAND_QUEUE_REPORT Event is
* generated.
*/
public static final TypedEvent<CommandQueueReportFromDatanode>
COMMAND_QUEUE_REPORT = new TypedEvent<>(
CommandQueueReportFromDatanode.class, "Command_Queue_Report");

/**
* Event generated on DataNode registration.
*/
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm.node;

import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.CommandQueueReportFromDatanode;
import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;

/**
* Handles QueuedCommand Reports from datanode.
*/
public class CommandQueueReportHandler implements
EventHandler<CommandQueueReportFromDatanode> {

private final NodeManager nodeManager;

public CommandQueueReportHandler(NodeManager nodeManager) {
Preconditions.checkNotNull(nodeManager);
this.nodeManager = nodeManager;
}

@Override
public void onMessage(CommandQueueReportFromDatanode queueReportFromDatanode,
EventPublisher publisher) {
Preconditions.checkNotNull(queueReportFromDatanode);
DatanodeDetails dn = queueReportFromDatanode.getDatanodeDetails();
Preconditions.checkNotNull(dn, "QueueReport is "
+ "missing DatanodeDetails.");
nodeManager.processNodeCommandQueueReport(dn,
queueReportFromDatanode.getReport());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,22 @@

import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;

Expand All @@ -41,6 +47,8 @@
*/
public class DatanodeInfo extends DatanodeDetails {

private static final Logger LOG = LoggerFactory.getLogger(DatanodeInfo.class);

private final ReadWriteLock lock;

private volatile long lastHeartbeatTime;
Expand All @@ -49,6 +57,7 @@ public class DatanodeInfo extends DatanodeDetails {
private List<StorageReportProto> storageReports;
private List<MetadataStorageReportProto> metadataStorageReports;
private LayoutVersionProto lastKnownLayoutVersion;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@sodonnel I am wondering what happens to this DatanodeInfo when it expire due to lack of HB from that node? is this object around or destroyed. I am trying to figure out that particular code part. I have not found so far. Please point me where we remove this nodeInfo object when node expires. Thanks

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

NodeStateManager.checkNodesHealth is what notices the lost heartbeats and triggers events based on that.

The DeadNodeHandler is triggered when the node goes dead (there is also a StaleNodeHandler), and clears out its pipelines etc. Perhaps we should reset the command counts when this happens, or perhaps it is valid to leave them as the last known value. The datanodeInfo object is not removed AFAIK, as it holds the DN service state (in_service, decommissioning, healthy, stale, dead etc). If the DN comes back, it will be reset by the heartbeat processing. If it never comes back, the datanodedetails and datanodeinfo stick around in SCM until it is restarted.

I am not sure if the command counts remaining is a big issue, as we should avoid scheduling commands on dead (and maybe stale) nodes anyway. Eg before scheduling a command for a node, need to check it is HEALTHY, as otherwise the commands will be queued in SCM and never taken by a DN. If something in SCM keeps scheduling commands for dead nodes, it will slowly fill up the SCM memory on the command queue.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, On thinking a bit, I think it's ok to leave the counts as well ( anyway we will not assign tasks to dead DN). when node rejoined, we should receive new HB and counts should get refreshed.
This above questions was just for my clarity what happens to DN object. Thanks for the details

private Map<SCMCommandProto.Type, Integer> commandCounts;

private NodeStatus nodeStatus;

Expand All @@ -69,6 +78,7 @@ public DatanodeInfo(DatanodeDetails datanodeDetails, NodeStatus nodeStatus,
this.storageReports = Collections.emptyList();
this.nodeStatus = nodeStatus;
this.metadataStorageReports = Collections.emptyList();
this.commandCounts = new HashMap<>();
}

/**
Expand Down Expand Up @@ -272,6 +282,57 @@ public void setNodeStatus(NodeStatus newNodeStatus) {
}
}

/**
* Set the current command counts for this datanode, as reported in the last
* heartbeat.
* @param cmds Proto message containing a list of command count pairs.
*/
public void setCommandCounts(CommandQueueReportProto cmds) {
try {
int count = cmds.getCommandCount();
lock.writeLock().lock();
for (int i = 0; i < count; i++) {
SCMCommandProto.Type command = cmds.getCommand(i);
if (command == SCMCommandProto.Type.unknownScmCommand) {
LOG.warn("Unknown SCM Command received from {} in the "
+ "heartbeat. SCM and the DN may not be at the same version.",
this);
continue;
}
int cmdCount = cmds.getCount(i);
if (cmdCount < 0) {
LOG.warn("Command count of {} from {} should be greater than zero. " +
"Setting it to zero", cmdCount, this);
cmdCount = 0;
}
commandCounts.put(command, cmdCount);
}
} finally {
lock.writeLock().unlock();
}
}

/**
* Retrieve the number of queued commands of the given type, as reported by
* the datanode at the last heartbeat.
* @param cmd The command for which to receive the queued command count
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if it's -1, we should wait to assign any tasks to this node as we don;t know the actual state?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

-1 means we have not received any data yet. In the case of an upgrade adding a new command (eg SCM upgraded with a new command, but some DNs not upgraded) those DNs will always show a -1 for the new command.

I am not sure how we should handle this - possibly we need a fallback position in any code that uses these counts. If it is "-1" then we need to use some other way of limiting the commands sent. The upgrade scenario should be short lived, and then DNs should only have -1 until their first heartbeat.

I just thought it was a good idea to include -1 as a different state than zero, so we can tell the difference between the two.

Copy link
Contributor

@umamaheswararao umamaheswararao Apr 29, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Got it.

* @return -1 if we have no information about the count, or an integer >= 0
* indicating the command count at the last heartbeat.
*/
public int getCommandCount(SCMCommandProto.Type cmd) {
try {
lock.readLock().lock();
Integer count = commandCounts.get(cmd);
if (count == null) {
return -1;
} else {
return count.intValue();
}
} finally {
lock.readLock().unlock();
}
}

@Override
public int hashCode() {
return super.hashCode();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,11 @@
import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto;

import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
Expand Down Expand Up @@ -296,6 +298,24 @@ void processNodeReport(DatanodeDetails datanodeDetails,
void processLayoutVersionReport(DatanodeDetails datanodeDetails,
LayoutVersionProto layoutReport);

/**
* Process the Command Queue Report sent from datanodes as part of the
* heartbeat message.
* @param datanodeDetails
* @param commandReport
*/
void processNodeCommandQueueReport(DatanodeDetails datanodeDetails,
CommandQueueReportProto commandReport);

/**
* Get the number of commands of the given type queued on the datanode at the
* last heartbeat. If the Datanode has not reported information for the given
* command type, -1 will be returned.
* @param cmdType
* @return The queued count or -1 if no data has been received from the DN.
*/
int getNodeQueuedCommandCount(DatanodeDetails datanodeDetails,
SCMCommandProto.Type cmdType) throws NodeNotFoundException;

/**
* Get list of SCMCommands in the Command Queue for a particular Datanode.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
Expand Down Expand Up @@ -621,6 +623,48 @@ public void processLayoutVersionReport(DatanodeDetails datanodeDetails,
}
}

/**
* Process Command Queue Reports from the Datanode Heartbeat.
*
* @param datanodeDetails
* @param commandQueueReportProto
*/
@Override
public void processNodeCommandQueueReport(DatanodeDetails datanodeDetails,
CommandQueueReportProto commandQueueReportProto) {
LOG.debug("Processing Command Queue Report from [datanode={}]",
datanodeDetails.getHostName());
if (LOG.isTraceEnabled()) {
LOG.trace("Command Queue Report is received from [datanode={}]: " +
"<json>{}</json>", datanodeDetails.getHostName(),
commandQueueReportProto.toString().replaceAll("\n", "\\\\n"));
}
try {
DatanodeInfo datanodeInfo = nodeStateManager.getNode(datanodeDetails);
if (commandQueueReportProto != null) {
datanodeInfo.setCommandCounts(commandQueueReportProto);
metrics.incNumNodeCommandQueueReportProcessed();
}
} catch (NodeNotFoundException e) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this metric a "report failed"? or just unknown node report? I am not sure about the definition of this metric here

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These metrics are copying what is already there for other commands, eg see processNodeReport() and processHeartbeat() - I basically copied this methods structure from there to keep it consistent. In both those cases, the metric is "failedProcessing" but the only failure handled is nodeNotFound, so the name is a bit misleading.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok. Yeah name is bit misleading a bit.

metrics.incNumNodeCommandQueueReportProcessingFailed();
LOG.warn("Got Command Queue Report from unregistered datanode {}",
datanodeDetails);
}
}

/**
* Get the number of commands of the given type queued on the datanode at the
* last heartbeat. If the Datanode has not reported information for the given
* command type, -1 will be returned.
* @param cmdType
* @return The queued count or -1 if no data has been received from the DN.
*/
public int getNodeQueuedCommandCount(DatanodeDetails datanodeDetails,
SCMCommandProto.Type cmdType) throws NodeNotFoundException {
DatanodeInfo datanodeInfo = nodeStateManager.getNode(datanodeDetails);
return datanodeInfo.getCommandCount(cmdType);
}

/**
* Returns the aggregated node stats.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ public final class SCMNodeMetrics implements MetricsSource {
private @Metric MutableCounterLong numHBProcessingFailed;
private @Metric MutableCounterLong numNodeReportProcessed;
private @Metric MutableCounterLong numNodeReportProcessingFailed;
private @Metric MutableCounterLong numNodeCommandQueueReportProcessed;
private @Metric MutableCounterLong numNodeCommandQueueReportProcessingFailed;
private @Metric String textMetric;

private final MetricsRegistry registry;
Expand Down Expand Up @@ -111,6 +113,20 @@ void incNumNodeReportProcessingFailed() {
numNodeReportProcessingFailed.incr();
}

/**
* Increments number of Command Queue reports processed.
*/
void incNumNodeCommandQueueReportProcessed() {
numNodeCommandQueueReportProcessed.incr();
}

/**
* Increments number of Command Queue reports where processing failed.
*/
void incNumNodeCommandQueueReportProcessingFailed() {
numNodeCommandQueueReportProcessingFailed.incr();
}

/**
* Get aggregated counter and gauge metrics.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.CRLStatusReport;
import org.apache.hadoop.hdds.protocol.proto
Expand Down Expand Up @@ -58,6 +59,7 @@
import static org.apache.hadoop.hdds.scm.events.SCMEvents.CMD_STATUS_REPORT;
import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_ACTIONS;
import static org.apache.hadoop.hdds.scm.events.SCMEvents.PIPELINE_REPORT;
import static org.apache.hadoop.hdds.scm.events.SCMEvents.COMMAND_QUEUE_REPORT;
import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.INITIAL_VERSION;
import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto;

Expand Down Expand Up @@ -132,6 +134,13 @@ public List<SCMCommand> dispatch(SCMHeartbeatRequestProto heartbeat) {
heartbeat.getNodeReport()));
}

if (heartbeat.hasCommandQueueReport()) {
LOG.debug("Dispatching Queued Command Report");
eventPublisher.fireEvent(COMMAND_QUEUE_REPORT,
new CommandQueueReportFromDatanode(datanodeDetails,
heartbeat.getCommandQueueReport()));
}

if (heartbeat.hasContainerReport()) {
LOG.debug("Dispatching Container Report.");
eventPublisher.fireEvent(
Expand Down Expand Up @@ -233,6 +242,17 @@ public NodeReportFromDatanode(DatanodeDetails datanodeDetails,
}
}

/**
* Command Queue Report with origin.
*/
public static class CommandQueueReportFromDatanode
extends ReportFromDatanode<CommandQueueReportProto> {
public CommandQueueReportFromDatanode(DatanodeDetails datanodeDetails,
CommandQueueReportProto report) {
super(datanodeDetails, report);
}
}

/**
* Layout report event payload with origin.
*/
Expand Down
Loading