Skip to content

Commit

Permalink
web UI: add support for SBD
Browse files Browse the repository at this point in the history
  • Loading branch information
ondrejmular committed Jun 9, 2016
1 parent 188ca95 commit c45a2e7
Show file tree
Hide file tree
Showing 13 changed files with 761 additions and 37 deletions.
3 changes: 2 additions & 1 deletion pcs/cli/common/lib_wrapper.py
Expand Up @@ -136,7 +136,8 @@ def load_module(env, middleware_factory, name):
"enable_sbd": sbd.enable_sbd,
"disable_sbd": sbd.disable_sbd,
"get_cluster_sbd_status": sbd.get_cluster_sbd_status,
"get_cluster_sbd_config": sbd.get_cluster_sbd_config
"get_cluster_sbd_config": sbd.get_cluster_sbd_config,
"get_local_sbd_config": sbd.get_local_sbd_config,
}
)

Expand Down
5 changes: 5 additions & 0 deletions pcs/lib/commands/sbd.py
Expand Up @@ -341,6 +341,11 @@ def get_sbd_config(node):
return config_list


def get_local_sbd_config(lib_env):
__ensure_not_cman(lib_env)
return environment_file_to_dict(sbd.get_local_sbd_config())


def _get_cluster_nodes(lib_env):
return lib_env.get_corosync_conf().get_nodes()

Expand Down
6 changes: 6 additions & 0 deletions pcs/stonith.py
Expand Up @@ -455,6 +455,8 @@ def sbd_cmd(lib, argv, modifiers):
sbd_status(lib, argv, modifiers)
elif cmd == "config":
sbd_config(lib, argv, modifiers)
elif cmd == "local_config_in_json":
local_sbd_config(lib, argv, modifiers)
else:
raise CmdLineInputError()
except CmdLineInputError as e:
Expand Down Expand Up @@ -558,3 +560,7 @@ def sbd_config(lib, argv, modifiers):
node=config["node"].label,
watchdog=watchdog
))


def local_sbd_config(lib, argv, modifiers):
print(json.dumps(lib.sbd.get_local_sbd_config()))
74 changes: 73 additions & 1 deletion pcs/test/test_lib_commands_sbd.py
Expand Up @@ -17,7 +17,11 @@
from pcs.test.tools.custom_mock import MockLibraryReportProcessor

from pcs.common import report_codes
from pcs.lib.errors import ReportItemSeverity as Severities
from pcs.lib.errors import (
ReportItemSeverity as Severities,
LibraryError,
ReportItem,
)
from pcs.lib.env import LibraryEnvironment
from pcs.lib.node import (
NodeAddresses,
Expand Down Expand Up @@ -610,3 +614,71 @@ def test_cman_cluster(self, mock_sbd_cfg, mock_get_nodes):
{}
)
)


@mock.patch("pcs.lib.sbd.get_local_sbd_config")
class GetLocalSbdConfigTest(TestCase):
def setUp(self):
self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)

def test_success(self, mock_config):
self.mock_env.is_cman_cluster = False
mock_config.return_value = """
# This file has been generated by pcs.
SBD_OPTS="-n node1"
SBD_WATCHDOG_DEV=/dev/watchdog
SBD_WATCHDOG_TIMEOUT=0
"""

self.assertEqual(
{
"SBD_OPTS": '"-n node1"',
"SBD_WATCHDOG_DEV": "/dev/watchdog",
"SBD_WATCHDOG_TIMEOUT": "0"
},
cmd_sbd.get_local_sbd_config(self.mock_env)
)
self.assertEqual(1, mock_config.call_count)

def test_cman_cluster(self, mock_config):
self.mock_env.is_cman_cluster = True
assert_raise_library_error(
lambda: cmd_sbd.get_local_sbd_config(self.mock_env),
(
Severities.ERROR,
report_codes.CMAN_UNSUPPORTED_COMMAND,
{}
)
)
self.assertEqual(0, mock_config.call_count)

def test_file_error(self, mock_config):
self.mock_env.is_cman_cluster = False
mock_config.side_effect = LibraryError(ReportItem.error(
report_codes.UNABLE_TO_GET_SBD_CONFIG,
"message"
))
assert_raise_library_error(
lambda: cmd_sbd.get_local_sbd_config(self.mock_env),
(
Severities.ERROR,
report_codes.UNABLE_TO_GET_SBD_CONFIG,
{}
)
)

def test_parse_error(self, mock_config):
self.mock_env.is_cman_cluster = False
mock_config.return_value = """
# This file has been generated by pcs.
invalid_file
"""
assert_raise_library_error(
lambda: cmd_sbd.get_local_sbd_config(self.mock_env),
(
Severities.ERROR,
report_codes.PARSE_ERROR_SBD_CONFIG,
{}
)
)

43 changes: 35 additions & 8 deletions pcsd/cluster_entity.rb
Expand Up @@ -1000,7 +1000,7 @@ def initialize(op_element)
class Node < JSONable
attr_accessor :id, :error_list, :warning_list, :status, :quorum, :uptime,
:name, :corosync, :pacemaker, :cman, :corosync_enabled,
:pacemaker_enabled, :pcsd_enabled
:pacemaker_enabled, :pcsd_enabled, :services, :sbd_config

def initialize
@id = nil
Expand All @@ -1010,22 +1010,49 @@ def initialize
@quorum = nil
@uptime = 'unknown'
@name = nil
@services = {
:pacemaker => {
:running => nil,
:enabled => nil
},
:corosync => {
:running => nil,
:enabled => nil
},
:pcsd => {
:running => nil,
:enabled => nil
},
:sbd => {
:installed => nil,
:running => nil,
:enabled => nil
}
}
@corosync = false
@pacemaker = false
@cman = false
@corosync_enabled = false
@pacemaker_enabled = false
@pcsd_enabled = false
@sbd_config = nil
end

def self.load_current_node(crm_dom=nil)
node = ClusterEntity::Node.new
node.corosync = corosync_running?
node.corosync_enabled = corosync_enabled?
node.pacemaker = pacemaker_running?
node.pacemaker_enabled = pacemaker_enabled?
node.cman = cman_running?
node.pcsd_enabled = pcsd_enabled?
node.services.each do |service, info|
info[:running] = is_service_running?(service.to_s)
info[:enabled] = is_service_enabled?(service.to_s)
end
if ISSYSTEMCTL
node.services[:sbd][:installed] = is_service_installed?('sbd')
end
node.corosync = node.services[:corosync][:running]
node.corosync_enabled = node.services[:corosync][:enabled]
node.pacemaker = node.services[:pacemaker][:running]
node.pacemaker_enabled = node.services[:pacemaker][:enabled]
node.cman = is_service_running?('cman')
node.pcsd_enabled = node.services[:pcsd][:enabled]

node_online = (node.corosync and node.pacemaker)
node.status = node_online ? 'online' : 'offline'
Expand All @@ -1044,7 +1071,7 @@ def self.load_current_node(crm_dom=nil)
else
node.status = 'offline'
end

node.sbd_config = get_parsed_local_sbd_config()
return node
end
end
Expand Down
79 changes: 55 additions & 24 deletions pcsd/pcs.rb
Expand Up @@ -408,9 +408,12 @@ def send_request(auth_user, node, request, post=false, data={}, remote=true, raw
end
end

def add_node(auth_user, new_nodename, all=false, auto_start=true)
def add_node(auth_user, new_nodename, all=false, auto_start=true, watchdog=nil)
if all
command = [PCS, "cluster", "node", "add", new_nodename]
if watchdog and not watchdog.strip.empty?
command << "--watchdog=#{watchdog.strip}"
end
if auto_start
command << '--start'
command << '--enable'
Expand Down Expand Up @@ -821,14 +824,6 @@ def disable_cluster(auth_user)
return true
end

def corosync_running?()
is_service_running?('corosync')
end

def corosync_enabled?()
is_service_enabled?('corosync')
end

def get_corosync_version()
begin
stdout, stderror, retval = run_cmd(
Expand All @@ -850,10 +845,6 @@ def pacemaker_running?()
is_service_running?('pacemaker')
end

def pacemaker_enabled?()
is_service_enabled?('pacemaker')
end

def get_pacemaker_version()
begin
stdout, stderror, retval = run_cmd(
Expand All @@ -871,10 +862,6 @@ def get_pacemaker_version()
return nil
end

def cman_running?()
is_service_running?('cman')
end

def get_cman_version()
begin
stdout, stderror, retval = run_cmd(
Expand Down Expand Up @@ -914,10 +901,6 @@ def pcsd_restart()
}
end

def pcsd_enabled?()
is_service_enabled?('pcsd')
end

def get_pcsd_version()
return PCS_VERSION.split(".").collect { | x | x.to_i }
end
Expand Down Expand Up @@ -1398,6 +1381,7 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
:status => 'unknown',
:node_list => [],
:resource_list => [],
:available_features => [],
}

threads = []
Expand Down Expand Up @@ -1426,6 +1410,7 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
}
begin
parsed_response = JSON.parse(response, {:symbolize_names => true})
parsed_response[:available_features] ||= []
if parsed_response[:noresponse]
node_map[node][:node] = {}
node_map[node][:node].update(node_status_unknown)
Expand Down Expand Up @@ -1510,6 +1495,26 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
}
end
status.delete(:node)
sbd_enabled = []
sbd_running = []
node_map.each { |_, cluster_status|
# create set of available features on all nodes
# it is intersection of available features from all nodes
if cluster_status[:node][:status] != 'unknown'
status[:available_features] &= cluster_status[:available_features]
end
if (
cluster_status[:node][:services] and
cluster_status[:node][:services][:sbd]
)
if cluster_status[:node][:services][:sbd][:enabled]
sbd_enabled << cluster_status[:node][:name]
end
if cluster_status[:node][:services][:sbd][:running]
sbd_running << cluster_status[:node][:name]
end
end
}

if status[:quorate]
fence_count = 0
Expand All @@ -1518,9 +1523,9 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
fence_count += 1
end
}
if fence_count == 0
if fence_count == 0 and sbd_enabled.empty?
status[:warning_list] << {
:message => 'No fence devices configured in the cluster',
:message => 'No fencing configured in the cluster',
}
end

Expand All @@ -1530,6 +1535,18 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
:message => 'Stonith is not enabled',
}
end
if sbd_enabled.length == node_map.length and sbd_running.empty?
status[:warning_list] << {
:message => 'SBD is enabled but not running. Restart of cluster is' +
' required.'
}
end
if sbd_running.length == node_map.length and sbd_enabled.empty?
status[:warning_list] << {
:message => 'SBD is disabled but it is still running. Restart of' +
' cluster is required.'
}
end
end

if not_authorized_nodes.length > 0
Expand Down Expand Up @@ -1612,7 +1629,8 @@ def get_node_status(auth_user, cib_dom)
:fence_levels => get_fence_levels(auth_user, cib_dom),
:node_attr => node_attrs_to_v2(get_node_attributes(auth_user, cib_dom)),
:nodes_utilization => get_nodes_utilization(cib_dom),
:known_nodes => []
:known_nodes => [],
:available_features => ['sbd']
}

nodes = get_nodes_status()
Expand Down Expand Up @@ -1890,3 +1908,16 @@ def set_cluster_prop_force(auth_user, prop, val)
end
return (retcode == 0)
end

def get_parsed_local_sbd_config()
cmd = [PCS, 'stonith', 'sbd', 'local_config_in_json']
out, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), *cmd)
if retcode != 0
return nil
end
begin
return JSON.parse(out.join(' '))
rescue JSON::ParserError
return nil
end
end

0 comments on commit c45a2e7

Please sign in to comment.