Skip to content
42 changes: 40 additions & 2 deletions crmsh/bootstrap.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
from . import userdir
from .constants import QDEVICE_HELP_INFO, STONITH_TIMEOUT_DEFAULT,\
REJOIN_COUNT, REJOIN_INTERVAL, PCMK_DELAY_MAX, CSYNC2_SERVICE, WAIT_TIMEOUT_MS_DEFAULT
from . import cluster_fs
from . import qdevice
from . import parallax
from . import log
Expand Down Expand Up @@ -70,7 +71,7 @@
"/etc/samba/smb.conf", SYSCONFIG_NFS, SYSCONFIG_PCMK, SBDManager.SYSCONFIG_SBD, PCMK_REMOTE_AUTH, watchdog.Watchdog.WATCHDOG_CFG,
PROFILES_FILE, CRM_CFG, SBDManager.SBD_SYSTEMD_DELAY_START_DIR)

INIT_STAGES_EXTERNAL = ("ssh", "csync2", "corosync", "sbd", "cluster", "admin", "qdevice")
INIT_STAGES_EXTERNAL = ("ssh", "csync2", "corosync", "sbd", "cluster", "ocfs2", "gfs2", "admin", "qdevice")
INIT_STAGES_INTERNAL = ("csync2_remote", "qnetd_remote")
INIT_STAGES_ALL = INIT_STAGES_EXTERNAL + INIT_STAGES_INTERNAL
JOIN_STAGES_EXTERNAL = ("ssh", "csync2", "ssh_merge", "cluster")
Expand Down Expand Up @@ -111,6 +112,10 @@
self.qdevice_heuristics = None
self.qdevice_heuristics_mode = None
self.qdevice_rm_flag = None
self.ocfs2_devices = []
self.gfs2_devices = []
self.use_cluster_lvm2 = None
self.mount_point = None
self.cluster_node = None
self.force = None
self.arbitrator = None
Expand Down Expand Up @@ -269,7 +274,7 @@
if self.type == "init":
if self.stage not in INIT_STAGES_ALL:
utils.fatal(f"Invalid stage: {self.stage}(available stages: {', '.join(INIT_STAGES_EXTERNAL)})")
if self.stage in ("admin", "qdevice") and not self.cluster_is_running:
if self.stage in ("admin", "qdevice", "ocfs2") and not self.cluster_is_running:
utils.fatal(f"Cluster is inactive, can't run '{self.stage}' stage")
if self.stage in ("corosync", "cluster") and self.cluster_is_running:
utils.fatal(f"Cluster is active, can't run '{self.stage}' stage")
Expand All @@ -288,6 +293,8 @@
"""
if self.qdevice_inst:
self.qdevice_inst.valid_qdevice_options()
if self.ocfs2_devices or self.gfs2_devices or self.stage in ("ocfs2", "gfs2"):
cluster_fs.ClusterFSManager.pre_verify(self)

Check warning on line 297 in crmsh/bootstrap.py

View check run for this annotation

Codecov / codecov/patch

crmsh/bootstrap.py#L297

Added line #L297 was not covered by tests
if not self.skip_csync2 and self.type == "init":
self.skip_csync2 = utils.get_boolean(os.getenv("SKIP_CSYNC2_SYNC"))
if self.skip_csync2 and self.stage:
Expand Down Expand Up @@ -1402,6 +1409,26 @@
_context.sbd_manager.init_and_deploy_sbd()


def init_ocfs2():
"""
OCFS2 configure process
"""
if not _context.ocfs2_devices:
return
ocfs2_manager = cluster_fs.ClusterFSManager(_context)
ocfs2_manager.init()

Check warning on line 1419 in crmsh/bootstrap.py

View check run for this annotation

Codecov / codecov/patch

crmsh/bootstrap.py#L1418-L1419

Added lines #L1418 - L1419 were not covered by tests


def init_gfs2():
"""
GFS2 configure process
"""
if not _context.gfs2_devices:
return
gfs2_manager = cluster_fs.ClusterFSManager(_context)
gfs2_manager.init()

Check warning on line 1429 in crmsh/bootstrap.py

View check run for this annotation

Codecov / codecov/patch

crmsh/bootstrap.py#L1428-L1429

Added lines #L1428 - L1429 were not covered by tests


def init_cluster():
"""
Initial cluster configuration.
Expand Down Expand Up @@ -2187,6 +2214,8 @@
init_cluster()
init_admin()
init_qdevice()
init_ocfs2()
init_gfs2()
except lock.ClaimLockError as err:
utils.fatal(err)

Expand Down Expand Up @@ -2284,6 +2313,7 @@
join_csync2(cluster_node, remote_user)
join_ssh_merge(cluster_node, remote_user)
probe_partitions()
join_cluster_fs(cluster_node, remote_user)
join_cluster(cluster_node, remote_user)
except (lock.SSHError, lock.ClaimLockError) as err:
utils.fatal(err)
Expand All @@ -2295,6 +2325,14 @@
logger.info("Done (log saved to %s on %s)", log.CRMSH_LOG_FILE, utils.this_node())


def join_cluster_fs(peer_host, peer_user):
"""
If init node configured OCFS2/GFS2 device, verify that device on join node
"""
inst = cluster_fs.ClusterFSManager(_context)
inst.join(peer_host)


def remove_qdevice() -> None:
"""
Remove qdevice service and configuration from cluster
Expand Down
Loading