Skip to content

Commit

Permalink
Dev: ui_node: redirect node delete to cluster remove
Browse files Browse the repository at this point in the history
It will also adjust the related cluster configurations and clean up the
leftover (eg. stopping the cluster services) on the removed node.

Close ClusterLabs#891.
  • Loading branch information
nicholasyang2022 committed Dec 15, 2022
1 parent 84e6ea1 commit 66c8884
Show file tree
Hide file tree
Showing 4 changed files with 93 additions and 85 deletions.
13 changes: 6 additions & 7 deletions crmsh/bootstrap.py
Expand Up @@ -39,7 +39,7 @@
from . import qdevice
from . import parallax
from . import log

from .ui_node import NodeMgmt

logger = log.setup_logger(__name__)
logger_utils = log.LoggerUtils(logger)
Expand Down Expand Up @@ -2006,9 +2006,8 @@ def remove_node_from_cluster():

# execute the command : crm node delete $HOSTNAME
logger.info("Removing the node {}".format(node))
rc, _, err = invoke("crm node delete {}".format(node))
if not rc:
utils.fatal("Failed to remove {}: {}".format(node, err))
if not NodeMgmt.call_delnode(node):
utils.fatal("Failed to remove {}.".format(node))

if not invokerc("sed -i /{}/d {}".format(node, CSYNC2_CFG)):
utils.fatal("Removing the node {} from {} failed".format(node, CSYNC2_CFG))
Expand Down Expand Up @@ -2310,7 +2309,7 @@ def bootstrap_remove(context):
if _context.cluster_node == utils.this_node():
if not force_flag:
utils.fatal("Removing self requires --force")
remove_self()
remove_self(force_flag)
elif _context.cluster_node in xmlutil.listnodes():
remove_node_from_cluster()
else:
Expand All @@ -2319,14 +2318,14 @@ def bootstrap_remove(context):
bootstrap_finished()


def remove_self():
def remove_self(force_flag=False):
me = _context.cluster_node
yes_to_all = _context.yes_to_all
nodes = xmlutil.listnodes(include_remote_nodes=False)
othernode = next((x for x in nodes if x != me), None)
if othernode is not None:
# remove from other node
cmd = "crm cluster remove{} -c {}".format(" -y" if yes_to_all else "", me)
cmd = "crm{} cluster remove{} -c {}".format(" -F" if force_flag else "", " -y" if yes_to_all else "", me)
rc = utils.ext_cmd_nosudo("ssh{} {} {} '{}'".format("" if yes_to_all else " -t", SSH_OPTION, othernode, cmd))
if rc != 0:
utils.fatal("Failed to remove this node from {}".format(othernode))
Expand Down
55 changes: 1 addition & 54 deletions crmsh/ui_cluster.py
Expand Up @@ -13,6 +13,7 @@
from . import corosync
from . import qdevice
from .cibconfig import cib_factory
from .ui_node import parse_option_for_nodes
from . import constants


Expand All @@ -37,60 +38,6 @@ def parse_options(parser, args):
return options, args


def parse_option_for_nodes(context, *args):
"""
Parse option for nodes
Return a node list
"""
action_type = context.get_command_name()
action_target = "node" if action_type in ["standby", "online"] else "cluster service"
action = "{} {}".format(action_type, action_target)
usage_template = """
Specify node(s) on which to {action}.
If no nodes are specified, {action} on the local node.
If --all is specified, {action} on all nodes."""
addtion_usage = ""
if action_type == "standby":
usage_template += """
\n\nAdditionally, you may specify a lifetime for the standby---if set to
"reboot", the node will be back online once it reboots. "forever" will
keep the node in standby after reboot. The life time defaults to
"forever"."""
addtion_usage = " [lifetime]"

parser = ArgParser(description=usage_template.format(action=action),
usage="{} [--all | <node>... ]{}".format(action_type, addtion_usage),
add_help=False,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message")
parser.add_argument("--all", help="To {} on all nodes".format(action), action="store_true", dest="all")

options, args = parse_options(parser, args)
if options is None or args is None:
raise utils.TerminateSubCommand
if options.all and args:
context.fatal_error("Should either use --all or specific node(s)")

# return local node
if not options.all and not args:
return [utils.this_node()]
member_list = utils.list_cluster_nodes()
if not member_list:
context.fatal_error("Cannot get the node list from cluster")
for node in args:
if node not in member_list:
context.fatal_error("Node \"{}\" is not a cluster node".format(node))

node_list = member_list if options.all else args
for node in node_list:
try:
utils.ping_node(node)
except ValueError as err:
logger.warning(str(err))
node_list.remove(node)
return node_list


def _remove_completer(args):
try:
n = utils.list_cluster_nodes()
Expand Down
95 changes: 76 additions & 19 deletions crmsh/ui_node.py
Expand Up @@ -3,6 +3,9 @@
# See COPYING for license information.

import re
import subprocess
from argparse import ArgumentParser, RawDescriptionHelpFormatter

from . import config
from . import command
from . import completers as compl
Expand All @@ -14,7 +17,6 @@
from . import term
from .cibconfig import cib_factory
from .ui_resource import rm_meta_attribute
from .ui_cluster import parse_option_for_nodes
from . import log


Expand Down Expand Up @@ -203,6 +205,63 @@ def print_node(uname, ident, node_type, other, inst_attr, offline):
print(term.render("\t%s" % (s)))


def parse_option_for_nodes(context, *args):
"""
Parse option for nodes
Return a node list
"""
action_type = context.get_command_name()
action_target = "node" if action_type in ["standby", "online"] else "cluster service"
action = "{} {}".format(action_type, action_target)
usage_template = """
Specify node(s) on which to {action}.
If no nodes are specified, {action} on the local node.
If --all is specified, {action} on all nodes."""
addtion_usage = ""
if action_type == "standby":
usage_template += """
\n\nAdditionally, you may specify a lifetime for the standby---if set to
"reboot", the node will be back online once it reboots. "forever" will
keep the node in standby after reboot. The life time defaults to
"forever"."""
addtion_usage = " [lifetime]"

parser = ArgumentParser(description=usage_template.format(action=action),
usage="{} [--all | <node>... ]{}".format(action_type, addtion_usage),
add_help=False,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-h", "--help", action="store_true", dest="help", help="Show this help message")
parser.add_argument("--all", help="To {} on all nodes".format(action), action="store_true", dest="all")

options, args = parser.parse_known_args(args)
if options.help:
parser.print_help()
raise utils.TerminateSubCommand
if options is None or args is None:
raise utils.TerminateSubCommand
if options.all and args:
context.fatal_error("Should either use --all or specific node(s)")

# return local node
if not options.all and not args:
return [utils.this_node()]
member_list = utils.list_cluster_nodes()
if not member_list:
context.fatal_error("Cannot get the node list from cluster")
for node in args:
if node not in member_list:
context.fatal_error("Node \"{}\" is not a cluster node".format(node))

node_list = member_list if options.all else args
for node in node_list:
try:
utils.ping_node(node)
except ValueError as err:
logger.warning(str(err))
node_list.remove(node)
return node_list


class NodeMgmt(command.UI):
'''
Nodes management class
Expand Down Expand Up @@ -432,19 +491,20 @@ def do_clearstate(self, context, node=None):
return utils.ext_cmd(self.node_clear_state % ("-M -c", node, node)) == 0 and \
utils.ext_cmd(self.node_clear_state % ("-R", node, node)) == 0

def _call_delnode(self, node):
@classmethod
def call_delnode(cls, node):
"Remove node (how depends on cluster stack)"
rc = True
ec, s = utils.get_stdout("%s -p" % self.crm_node)
ec, s = utils.get_stdout("%s -p" % cls.crm_node)
if not s:
logger.error('%s -p could not list any nodes (rc=%d)', self.crm_node, ec)
logger.error('%s -p could not list any nodes (rc=%d)', cls.crm_node, ec)
rc = False
else:
partition_l = s.split()
if node in partition_l:
logger.error("according to %s, node %s is still active", self.crm_node, node)
logger.error("according to %s, node %s is still active", cls.crm_node, node)
rc = False
cmd = "%s --force -R %s" % (self.crm_node, node)
cmd = "%s --force -R %s" % (cls.crm_node, node)
if not rc:
if config.core.force:
logger.info('proceeding with node %s removal', node)
Expand All @@ -458,24 +518,21 @@ def _call_delnode(self, node):
if rc != 0:
logger.error('"%s" failed, rc=%d, %s', cmd, rc, err)
return False
if utils.ext_cmd(cls.node_delete % node) != 0 or \
utils.ext_cmd(cls.node_delete_status % node) != 0:
logger.error("%s removed from membership, but not from CIB!", node)
return False
return True

@command.completers(compl.nodes)
def do_delete(self, context, node):
'usage: delete <node>'
if not utils.is_name_sane(node):
return False
if not xmlutil.is_our_node(node):
logger.error("node %s not found in the CIB", node)
return False
if not self._call_delnode(node):
return False
if utils.ext_cmd(self.node_delete % node) != 0 or \
utils.ext_cmd(self.node_delete_status % node) != 0:
logger.error("%s removed from membership, but not from CIB!", node)
return False
logger.info("node %s deleted", node)
return True
logger.warning('`crm node delete` is deprecated and will very likely be dropped in the near future. It is auto-replaced as `crm cluster remove -c {}`.'.format(node))
if config.core.force:
rc = subprocess.call(['crm', 'cluster', 'remove', '-F', '-c', node])
else:
rc = subprocess.call(['crm', 'cluster', 'remove', '-c', node])
return rc == 0

@command.wait
@command.completers(compl.nodes, compl.choice(['set', 'delete', 'show']), _find_attr)
Expand Down
15 changes: 10 additions & 5 deletions doc/crm.8.adoc
Expand Up @@ -2077,13 +2077,10 @@ Usage:
clearstate <node>
...............

[[cmdhelp_node_delete,delete node]]
[[cmdhelp_node_delete,delete node (deprecated)]]
==== `delete`

Delete a node. This command will remove the node from the CIB
and, in case the cluster stack is running, use the appropriate
program (`crm_node` or `hb_delnode`) to remove the node from the
membership.
Remove a node from cluster.

If the node is still listed as active and a member of our
partition we refuse to remove it. With the global force option
Expand All @@ -2094,7 +2091,15 @@ Usage:
delete <node>
...............

.Deprecation note
*****
This command is deprecated and in favor of `crm cluster remove [-F] -c <node>`,
which will adjust the related cluster configurations and clean up the leftover
(eg. stopping the cluster services) on the removed node.
*****

[[cmdhelp_node_fence,fence node]]

==== `fence`

Make CRM fence a node. This functionality depends on stonith
Expand Down

0 comments on commit 66c8884

Please sign in to comment.