From 26f54ba7bd9e0418cfca5c76752b0381c932fe4f Mon Sep 17 00:00:00 2001 From: mulhern Date: Tue, 2 Jun 2015 15:44:06 -0400 Subject: [PATCH 1/6] Add BTRFSValueError error and use in btrfs related code (#1202877) Related: rhbz#1019685 Related: rhbz#1202877 (cherry picked from commit d11015c37f20fbcc25ec28cb48b50542d8e28141) From BTRFS related methods that have ValueError-like exceptions raise BTRFSValueError instead of ValueError. Note that this will not change the behavior of code that calls these method and catches a ValueError since BTRFSValueError extends ValueError. It can change the behavior of calling code where a BTRFSError is expected; since BTRFSValueError is also a subtype of BTRFSError the formerly ValueError exception will be caught where previously it would not. There are no instances like this in anaconda. There are two in blivet, in devices.py, and in both cases, this change seems to be an improvement. Signed-off-by: mulhern --- blivet/devicelibs/btrfs.py | 20 ++++++++++---------- blivet/devices/btrfs.py | 14 +++++++------- blivet/errors.py | 3 +++ 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/blivet/devicelibs/btrfs.py b/blivet/devicelibs/btrfs.py index eeba83b0b..12612eb5a 100644 --- a/blivet/devicelibs/btrfs.py +++ b/blivet/devicelibs/btrfs.py @@ -25,7 +25,7 @@ from . import raid from .. import util -from ..errors import BTRFSError +from ..errors import BTRFSError, BTRFSValueError import logging log = logging.getLogger("blivet") @@ -64,9 +64,9 @@ def create_volume(devices, label=None, data=None, metadata=None): recognizes the string. """ if not devices: - raise ValueError("no devices specified") + raise BTRFSValueError("no devices specified") elif any([not os.path.exists(d) for d in devices]): - raise ValueError("one or more specified devices not present") + raise BTRFSValueError("one or more specified devices not present") args = [] if data: @@ -90,20 +90,20 @@ def create_volume(devices, label=None, data=None, metadata=None): def add(mountpoint, device): if not os.path.ismount(mountpoint): - raise ValueError("volume not mounted") + raise BTRFSValueError("volume not mounted") return btrfs(["device", "add", device, mountpoint]) def remove(mountpoint, device): if not os.path.ismount(mountpoint): - raise ValueError("volume not mounted") + raise BTRFSValueError("volume not mounted") return btrfs(["device", "delete", device, mountpoint]) def create_subvolume(mountpoint, name): """Create a subvolume named name below mountpoint mountpoint.""" if not os.path.ismount(mountpoint): - raise ValueError("volume not mounted") + raise BTRFSValueError("volume not mounted") path = os.path.normpath("%s/%s" % (mountpoint, name)) args = ["subvol", "create", path] @@ -111,7 +111,7 @@ def create_subvolume(mountpoint, name): def delete_subvolume(mountpoint, name): if not os.path.ismount(mountpoint): - raise ValueError("volume not mounted") + raise BTRFSValueError("volume not mounted") path = os.path.normpath("%s/%s" % (mountpoint, name)) args = ["subvol", "delete", path] @@ -123,7 +123,7 @@ def delete_subvolume(mountpoint, name): # get a list of subvolumes from a mounted btrfs filesystem def list_subvolumes(mountpoint, snapshots_only=False): if not os.path.ismount(mountpoint): - raise ValueError("volume not mounted") + raise BTRFSValueError("volume not mounted") args = ["subvol", "list", "-p", mountpoint] if snapshots_only: @@ -139,7 +139,7 @@ def list_subvolumes(mountpoint, snapshots_only=False): def get_default_subvolume(mountpoint): if not os.path.ismount(mountpoint): - raise ValueError("volume not mounted") + raise BTRFSValueError("volume not mounted") args = ["subvol", "get-default", mountpoint] buf = btrfs(args, capture=True) @@ -170,7 +170,7 @@ def create_snapshot(source, dest, ro=False): :keyword bool ro: whether to create a read-only snapshot """ if not os.path.ismount(source): - raise ValueError("source is not a mounted subvolume") + raise BTRFSValueError("source is not a mounted subvolume") args = ["subvol", "snapshot"] if ro: diff --git a/blivet/devices/btrfs.py b/blivet/devices/btrfs.py index 23874ae10..bd9dd07fa 100644 --- a/blivet/devices/btrfs.py +++ b/blivet/devices/btrfs.py @@ -49,7 +49,7 @@ def __init__(self, *args, **kwargs): args = ("btrfs.%d" % self.id,) if kwargs.get("parents") is None: - raise ValueError("BTRFSDevice must have at least one parent") + raise errors.BTRFSValueError("BTRFSDevice must have at least one parent") self.req_size = kwargs.pop("size", None) super(BTRFSDevice, self).__init__(*args, **kwargs) @@ -263,13 +263,13 @@ def _removeParent(self, member): def _addSubVolume(self, vol): if vol.name in [v.name for v in self.subvolumes]: - raise ValueError("subvolume %s already exists" % vol.name) + raise errors.BTRFSValueError("subvolume %s already exists" % vol.name) self.subvolumes.append(vol) def _removeSubVolume(self, name): if name not in [v.name for v in self.subvolumes]: - raise ValueError("cannot remove non-existent subvolume %s" % name) + raise errors.BTRFSValueError("cannot remove non-existent subvolume %s" % name) names = [v.name for v in self.subvolumes] self.subvolumes.pop(names.index(name)) @@ -566,13 +566,13 @@ def __init__(self, *args, **kwargs): source = kwargs.pop("source", None) if not kwargs.get("exists") and not source: # it is possible to remove a source subvol and keep snapshots of it - raise ValueError("non-existent btrfs snapshots must have a source") + raise errors.BTRFSValueError("non-existent btrfs snapshots must have a source") if source and not isinstance(source, BTRFSDevice): - raise ValueError("btrfs snapshot source must be a btrfs subvolume") + raise errors.BTRFSValueError("btrfs snapshot source must be a btrfs subvolume") if source and not source.exists: - raise ValueError("btrfs snapshot source must already exist") + raise errors.BTRFSValueError("btrfs snapshot source must already exist") self.source = source """ the snapshot's source subvolume """ @@ -584,7 +584,7 @@ def __init__(self, *args, **kwargs): if source and getattr(source, "volume", source) != self.volume: self.volume._removeSubVolume(self.name) self.parents = [] - raise ValueError("btrfs snapshot and source must be in the same volume") + raise errors.BTRFSValueError("btrfs snapshot and source must be in the same volume") def _create(self): log_method_call(self, self.name, status=self.status) diff --git a/blivet/errors.py b/blivet/errors.py index e48b64972..51fd1f01e 100644 --- a/blivet/errors.py +++ b/blivet/errors.py @@ -146,6 +146,9 @@ class LoopError(StorageError): class BTRFSError(StorageError): pass +class BTRFSValueError(BTRFSError, ValueError): + pass + # DeviceTree class DeviceTreeError(StorageError): pass From 44dcff23eeac9ef428fa1f577fe0fe5f6cca20c9 Mon Sep 17 00:00:00 2001 From: mulhern Date: Tue, 2 Jun 2015 17:20:01 -0400 Subject: [PATCH 2/6] Check if device has enough members when setting RAID level (#1202877) Related: rhbz#1019685 Related: rhbz#1202877 (cherry picked from commit 08d1d1042adcd9dda535e4681edb2bf10b7e6fad) RAID levels have minimum member requirements. Level setters now raise a BTRFSValueError if device does not have enough members to meet the minimum requirement. However, if the device exists its members may not be known yet, so the check is omitted. Also, RaidError is rethrown as BTRFSValueError in level setter if the level is not valid for its use. Previously the RaidError was just propagated. Also, self._dataLevel and self._metaDataLevel are explicitly assigned to in __init__ to avoid need to disable pylint attributed-defined-outside-init warning in setter function. The assignment is all on one line, because that is its sole purpose, and with the comment to explain it it still takes up two lines. Signed-off-by: mulhern --- blivet/devices/btrfs.py | 60 +++++++++++++++++++++++++++++++++++------ 1 file changed, 52 insertions(+), 8 deletions(-) diff --git a/blivet/devices/btrfs.py b/blivet/devices/btrfs.py index bd9dd07fa..746898107 100644 --- a/blivet/devices/btrfs.py +++ b/blivet/devices/btrfs.py @@ -161,6 +161,9 @@ def __init__(self, *args, **kwargs): super(BTRFSVolumeDevice, self).__init__(*args, **kwargs) + # avoid attribute-defined-outside-init pylint warning + self._dataLevel = self._metaDataLevel = None + # assign after constructor to avoid AttributeErrors in setter functions self.dataLevel = dataLevel self.metaDataLevel = metaDataLevel @@ -180,6 +183,49 @@ def __init__(self, *args, **kwargs): self._defaultSubVolumeID = None + def _validateRaidLevel(self, level): + """ Returns an error message if the RAID level is invalid for this + device, otherwise None. + + :param level: a RAID level + :type level: :class:`~.devicelibs.raid.RAIDLevel` + :returns: an error message if the RAID level is invalid, else None + :rtype: str or NoneType + """ + if not self.exists and len(self.parents) < level.min_members: + return "RAID level %s requires that device have at least %d members, but device has only %d members." % (level, level.min_members, len(self.parents)) + return None + + def _setLevel(self, value, data): + """ Sets a valid level for this device and level type. + + :param object value: value for this RAID level + :param bool data: True if for data, False if for metadata + + :returns: a valid level for value, if any, else None + :rtype: :class:`~.devicelibs.raid.RAIDLevel` or NoneType + + :raises: :class:`~.errors.BTRFSValueError` if value represents + an invalid level. + """ + level = None + if value: + try: + levels = btrfs.RAID_levels if data else btrfs.metadata_levels + level = levels.raidLevel(value) + except errors.RaidError: + data_type_str = "data" if data else "metadata" + raise errors.BTRFSValueError("%s is an invalid value for %s RAID level." % (value, data_type_str)) + + error_msg = self._validateRaidLevel(level) + if error_msg: + raise errors.BTRFSValueError(error_msg) + + if data: + self._dataLevel = level + else: + self._metaDataLevel = level + @property def dataLevel(self): """ Return the RAID level for data. @@ -193,12 +239,11 @@ def dataLevel(self): def dataLevel(self, value): """ Set the RAID level for data. - :param value: new raid level - :param type: a valid raid level descriptor + :param object value: new raid level :returns: None + :raises: :class:`~.errors.BTRFSValueError` """ - # pylint: disable=attribute-defined-outside-init - self._dataLevel = btrfs.RAID_levels.raidLevel(value) if value else None + self._setLevel(value, True) @property def metaDataLevel(self): @@ -213,12 +258,11 @@ def metaDataLevel(self): def metaDataLevel(self, value): """ Set the RAID level for metadata. - :param value: new raid level - :param type: a valid raid level descriptor + :param object value: new raid level :returns: None + :raises: :class:`~.errors.BTRFSValueError` """ - # pylint: disable=attribute-defined-outside-init - self._metaDataLevel = btrfs.metadata_levels.raidLevel(value) if value else None + self._setLevel(value, False) @property def formatImmutable(self): From d29e0d4fcb443e0922726e8deb198227ca51fb3e Mon Sep 17 00:00:00 2001 From: mulhern Date: Wed, 3 Dec 2014 13:27:52 -0500 Subject: [PATCH 3/6] Change allow_degraded_mdraid flag to allow_imperfect_devices (#1202877) Related: rhbz#1090009 Related: rhbz#1202877 (cherry picked from commit 0eb434ba53d5c122205e56f7bad322780c3b65f8) The only change is a name change. The plan is to use this flag for situations where it is better to use the imperfect device then to deactivate or remove it or otherwise get it out of sight. Generally, when installing, imperfect devices are best avoided, because you don't really want to install onto them. But when in installer rescue mode and many other situations, it is better to expose an imperfect but still usable device. Signed-off-by: mulhern --- blivet/devicetree.py | 6 +++--- blivet/flags.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/blivet/devicetree.py b/blivet/devicetree.py index 760bd3f19..9870348ae 100644 --- a/blivet/devicetree.py +++ b/blivet/devicetree.py @@ -873,7 +873,7 @@ def addUdevMDDevice(self, info): return # try to get the device again now that we've got all the slaves - device = self.getDeviceByName(name, incomplete=flags.allow_degraded_mdraid) + device = self.getDeviceByName(name, incomplete=flags.allow_imperfect_devices) if device is None: try: @@ -881,7 +881,7 @@ def addUdevMDDevice(self, info): except KeyError: log.warning("failed to obtain uuid for mdraid device") else: - device = self.getDeviceByUuid(uuid, incomplete=flags.allow_degraded_mdraid) + device = self.getDeviceByUuid(uuid, incomplete=flags.allow_imperfect_devices) # if we get here, we found all of the slave devices and # something must be wrong -- if all of the slaves are in @@ -1150,7 +1150,7 @@ def addUdevDevice(self, info): if device is None and udev.device_is_md(info): device = self.getDeviceByName( udev.device_get_md_name(info), - incomplete=flags.allow_degraded_mdraid) + incomplete=flags.allow_imperfect_devices) if device and not isinstance(device, MDRaidArrayDevice): # make sure any device we found is an md device device = None diff --git a/blivet/flags.py b/blivet/flags.py index e293794e1..459bd6929 100644 --- a/blivet/flags.py +++ b/blivet/flags.py @@ -63,7 +63,7 @@ def __init__(self): self.boot_cmdline = {} self.update_from_boot_cmdline() - self.allow_degraded_mdraid = True + self.allow_imperfect_devices = True def get_boot_cmdline(self): buf = open("/proc/cmdline").read().strip() @@ -101,7 +101,7 @@ def update_from_anaconda_flags(self, anaconda_flags): self.gpt = anaconda_flags.gpt self.multipath_friendly_names = anaconda_flags.mpathFriendlyNames - self.allow_degraded_mdraid = anaconda_flags.rescue_mode + self.allow_imperfect_devices = anaconda_flags.rescue_mode self.ibft = anaconda_flags.ibft self.dmraid = anaconda_flags.dmraid From 98b3451b514595546529da931aeceb1f5f9ffacb Mon Sep 17 00:00:00 2001 From: mulhern Date: Thu, 11 Dec 2014 15:28:48 -0500 Subject: [PATCH 4/6] Add mdrun method to just start, not assemble, an array. (#1202877) Related: rhbz#1090009 Related: rhbz#1202877 (cherry-picked from commit 56c623ce96a849c62e4cbab1f2dfa6ff62324424) Signed-off-by: mulhern --- blivet/devicelibs/mdraid.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/blivet/devicelibs/mdraid.py b/blivet/devicelibs/mdraid.py index e9a8bcbb9..61978702a 100644 --- a/blivet/devicelibs/mdraid.py +++ b/blivet/devicelibs/mdraid.py @@ -248,6 +248,20 @@ def mddeactivate(device): except MDRaidError as msg: raise MDRaidError("mddeactivate failed for %s: %s" % (device, msg)) +def mdrun(device): + """Start a possibly degraded array. + + :param str device: the device to be started + + :raises :class:`~.errors.MDRaidError`: on failure + """ + args = ["--run", device] + + try: + mdadm(args) + except MDRaidError as msg: + raise MDRaidError("mdrun failed for %s: %s" % (device, msg)) + def process_UUIDS(info, UUID_keys): """ Extract and convert expected UUIDs to canonical form. Reassign canonicalized UUIDs to corresponding keys. From a7a4afdbfbda9ba4abc2f4bf3ebc17062beb5c2d Mon Sep 17 00:00:00 2001 From: mulhern Date: Thu, 11 Dec 2014 16:14:13 -0500 Subject: [PATCH 5/6] Add a method that looks at DEVNAME (#1202877) Related: rhbz#1090009 Related: rhbz#1202877 (cherry-picked from commit bfb93d682e7270fdf73b5fd8b3c80c9ea699d3f2) Signed-off-by: mulhern --- blivet/udev.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/blivet/udev.py b/blivet/udev.py index d84d88ebf..e6294dd6d 100644 --- a/blivet/udev.py +++ b/blivet/udev.py @@ -324,6 +324,9 @@ def device_get_major(info): def device_get_minor(info): return int(info["MINOR"]) +def device_get_devname(info): + return info.get('DEVNAME') + def device_get_md_level(info): """ Returns the RAID level of the array of which this device is a member. From e633c16d62eddace92fcd476f4456f53aff7101a Mon Sep 17 00:00:00 2001 From: mulhern Date: Thu, 11 Dec 2014 16:29:32 -0500 Subject: [PATCH 6/6] If allowing degraded array, attempt to start it (#1202877) Resolves: rhbz#1090009 Related: rhbz#1202877 (cherry picked from commit ddb2e1756600d918df9df10384deb3364a9bbc3b) If we can't get the name for the array, that's probably because the array is in a state such that udevadm doesn't have the info. Try to run the array and get the info again. Signed-off-by: mulhern --- blivet/devicetree.py | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/blivet/devicetree.py b/blivet/devicetree.py index 9870348ae..dbcbfd83a 100644 --- a/blivet/devicetree.py +++ b/blivet/devicetree.py @@ -1148,11 +1148,31 @@ def addUdevDevice(self, info): log.info("scanning %s (%s)...", name, sysfs_path) device = self.getDeviceByName(name) if device is None and udev.device_is_md(info): - device = self.getDeviceByName( - udev.device_get_md_name(info), - incomplete=flags.allow_imperfect_devices) + + # If the md name is None, then some udev info is missing. Likely, + # this is because the array is degraded, and mdadm has deactivated + # it. Try to activate it and re-get the udev info. + if flags.allow_imperfect_devices and udev.device_get_md_name(info) is None: + devname = udev.device_get_devname(info) + if devname: + try: + mdraid.mdrun(devname) + except MDRaidError as e: + log.warning("Failed to start possibly degraded md array: %s", e) + else: + udev.settle() + info = udev.get_device(sysfs_path) + else: + log.warning("Failed to get devname for possibly degraded md array.") + + md_name = udev.device_get_md_name(info) + if md_name is None: + log.warning("No name for possibly degraded md array.") + else: + device = self.getDeviceByName(md_name, incomplete=flags.allow_imperfect_devices) + if device and not isinstance(device, MDRaidArrayDevice): - # make sure any device we found is an md device + log.warning("Found device %s, but it turns out not be an md array device after all.", device.name) device = None if device and device.isDisk and \