Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Merge branch '2.1-devel' into 2.1-release
  • Loading branch information
vojtechtrefny committed Jul 10, 2017
2 parents 8127140 + 0fe46b3 commit 56a1278
Show file tree
Hide file tree
Showing 14 changed files with 183 additions and 123 deletions.
2 changes: 1 addition & 1 deletion CONTRIBUTING
Expand Up @@ -6,7 +6,7 @@ Contributor's Guide

Where the Code Lives
---------------------
The upstream git repo is on `GitHub <https://github.com/rhinstaller/blivet>`_.
The upstream git repo is on `GitHub <https://github.com/storaged-project/blivet>`_.

Branching Model
----------------
Expand Down
2 changes: 1 addition & 1 deletion blivet/deviceaction.py
Expand Up @@ -548,7 +548,7 @@ def __init__(self, device, fmt=None):
raise ValueError("specified format already exists")

if not self._format.formattable:
raise ValueError("resource to create this format %s is unavailable" % fmt)
raise ValueError("resource to create this format %s is unavailable" % self._format.type)

def apply(self):
""" apply changes related to the action to the device(s) """
Expand Down
141 changes: 77 additions & 64 deletions blivet/devicefactory.py
Expand Up @@ -248,6 +248,11 @@ class DeviceFactory(object):
- change the container member device type of a defined device
"""

# This is a bit tricky, the child factory creates devices on top of which
# this (parent) factory builds its devices. E.g. partitions (as PVs) for a
# VG. And such "underlying" devices are called "parent" devices in other
# places in Blivet. So a child factory actually creates parent devices.
child_factory_class = None
child_factory_fstype = None
size_set_class = TotalSizeSet
Expand Down Expand Up @@ -328,16 +333,16 @@ def __init__(self, storage, size, disks, fstype=None, mountpoint=None,

self.container_size = container_size

self.container = None
self.device = device
self.container = None # container for the device
self.device = device # the leaf device this factory modifies or creates (also known as "factory device")

if not self.fstype:
self.fstype = self.storage.get_fstype(mountpoint=self.mountpoint)
if fstype == "swap":
self.mountpoint = None

self.child_factory = None
self.parent_factory = None
self.child_factory = None # factory creating the parent devices (e.g. PVs for a VG)
self.parent_factory = None # factory this factory is a child factory for (e.g. the one creating an LV in a VG)

if min_luks_entropy is None:
self.min_luks_entropy = luks_data.min_entropy
Expand Down Expand Up @@ -438,15 +443,15 @@ def _get_total_space(self):
return size

def _get_device_space(self):
""" The total disk space required for this device. """
""" The total disk space required for the factory device. """
return self.size

def _get_device_size(self):
""" Return the factory device size including container limitations. """
return self.size

def _set_device_size(self):
""" Set the size of a defined factory device. """
""" Set the size of the factory device. """
pass

#
Expand Down Expand Up @@ -1192,6 +1197,10 @@ def __init__(self, *args, **kwargs):
if self.container_raid_level:
self.child_factory_class = MDFactory

@property
def vg(self):
return self.container

#
# methods related to device size and disk space requirements
#
Expand All @@ -1215,25 +1224,39 @@ def _handle_no_size(self):
else:
super(LVMFactory, self)._handle_no_size()

@property
def _pe_size(self):
if self.vg:
return self.vg.pe_size
else:
return lvm.LVM_PE_SIZE

def _get_device_space(self):
# XXX: should respect the real extent size
return blockdev.lvm.get_lv_physical_size(self.size, lvm.LVM_PE_SIZE)
""" The total disk space required for the factory device (LV). """
return blockdev.lvm.get_lv_physical_size(self.size, self._pe_size)

def _get_device_size(self):
""" Return the factory device size including container limitations. """
size = self.size
free = self.container.free_space
free = self.vg.free_space
if self.device:
# self.raw_device == self.device.raw_device
# (i.e. self.device or the backing device in case self.device is encrypted)
free += self.raw_device.size

if free < size:
log.info("adjusting size from %s to %s so it fits "
"in container %s", size, free, self.container.name)
"in container %s", size, free, self.vg.name)
size = free

return size

def _set_device_size(self):
""" Set the size of the factory device. """
size = self._get_device_size()

# self.raw_device == self.device.raw_device
# (i.e. self.device or the backing device in case self.device is encrypted)
if self.device and size != self.raw_device.size:
log.info("adjusting device size from %s to %s",
self.raw_device.size, size)
Expand All @@ -1243,52 +1266,46 @@ def _set_device_size(self):
def _get_total_space(self):
""" Total disk space requirement for this device and its container. """
space = Size(0)
if self.container and self.container.exists:
if self.vg and self.vg.exists:
return space

if self.container_size == SIZE_POLICY_AUTO:
# automatic container size management
if self.container:
space += sum([p.size for p in self.container.parents])
space -= self.container.free_space
if self.vg:
space += sum(p.size for p in self.vg.parents)
space -= self.vg.free_space
# we need to account for the LVM metadata being placed somewhere
space += self.container.lvm_metadata_space
space += self.vg.lvm_metadata_space
else:
# we need to account for the LVM metadata being placed on each disk
# (and thus taking up to one extent from each disk)
space += len(self.disks) * self._pe_size

space += self._get_device_space()
log.debug("size bumped to %s to include new device space", space)
if self.device:
space -= blockdev.lvm.round_size_to_pe(self.device.size, self._pe_size)
log.debug("size cut to %s to omit old device space", space)

# we need to account for the LVM metadata being placed on each disk
# (and thus taking up to one extent from each disk)
space += len(self.disks) * lvm.LVM_PE_SIZE
elif self.container_size == SIZE_POLICY_MAX:
# grow the container as large as possible
if self.container:
space += sum(p.size for p in self.container.parents)
log.debug("size bumped to %s to include container parents", space)
if self.vg:
space += sum(p.size for p in self.vg.parents)
log.debug("size bumped to %s to include VG parents", space)

space += self._get_free_disk_space()
log.debug("size bumped to %s to include free disk space", space)
else:
# container_size is a request for a fixed size for the container
# XXX: should respect the real extent size
space += blockdev.lvm.get_lv_physical_size(self.container_size, lvm.LVM_PE_SIZE)
space += blockdev.lvm.get_lv_physical_size(self.container_size, self._pe_size)

# we need to account for the LVM metadata being placed on each disk
# (and thus taking up to one extent from each disk)
space += len(self.disks) * lvm.LVM_PE_SIZE

# this does not apply if a specific container size was requested
if self.container_size in [SIZE_POLICY_AUTO, SIZE_POLICY_MAX]:
space += self._get_device_space()
log.debug("size bumped to %s to include new device space", space)
if self.device and self.container_size == SIZE_POLICY_AUTO:
# The member count here uses the container's current member set
# since that's the basis for the current device's disk space
# usage.
# XXX: should respect the real extent size
space -= blockdev.lvm.round_size_to_pe(self.device.size, lvm.LVM_PE_SIZE)
log.debug("size cut to %s to omit old device space", space)
space += len(self.disks) * self._pe_size

if self.container_encrypted:
# Add space for LUKS metadata, each parent will be encrypted
space += lvm.LVM_PE_SIZE * len(self.disks)
space += get_format("luks").min_size * len(self.disks)

return space

Expand All @@ -1304,10 +1321,10 @@ def _get_new_container(self, *args, **kwargs):

def _check_container_size(self):
""" Raise an exception if the container cannot hold its devices. """
if not self.container:
if not self.vg:
return

free_space = self.container.free_space + getattr(self.device, "size", 0)
free_space = self.vg.free_space + getattr(self.device, "size", 0)
if free_space < 0:
raise DeviceFactoryError("container changes impossible due to "
"the devices it already contains")
Expand All @@ -1321,9 +1338,9 @@ def _get_child_factory_kwargs(self):
if self.container_raid_level:
# md pv
kwargs["raid_level"] = self.container_raid_level
if self.container and self.container.parents:
kwargs["device"] = self.container.parents[0]
kwargs["name"] = self.container.parents[0].name
if self.vg and self.vg.parents:
kwargs["device"] = self.vg.parents[0]
kwargs["name"] = self.vg.parents[0].name
else:
kwargs["name"] = self.storage.suggest_device_name(prefix="pv")

Expand All @@ -1336,30 +1353,30 @@ def _get_new_device(self, *args, **kwargs):
def _set_name(self):
if not self.device_name:
self.device_name = self.storage.suggest_device_name(
parent=self.container,
parent=self.vg,
swap=(self.fstype == "swap"),
mountpoint=self.mountpoint)

lvname = "%s-%s" % (self.container.name, self.device_name)
lvname = "%s-%s" % (self.vg.name, self.device_name)
safe_new_name = self.storage.safe_device_name(lvname)
if self.device.name != safe_new_name:
if safe_new_name in self.storage.names:
log.error("not renaming '%s' to in-use name '%s'",
self.device.name, safe_new_name)
return

if not safe_new_name.startswith(self.container.name):
if not safe_new_name.startswith(self.vg.name):
log.error("device rename failure (%s)", safe_new_name)
return

# strip off the vg name before setting
safe_new_name = safe_new_name[len(self.container.name) + 1:]
safe_new_name = safe_new_name[len(self.vg.name) + 1:]
log.debug("renaming device '%s' to '%s'",
self.device.name, safe_new_name)
self.device.name = safe_new_name

def _configure(self):
self._set_container()
self._set_container() # just sets self.container based on the specs
if self.container and not self.container.exists:
# If there's already a VG associated with this LV that doesn't have
# MD PVs we need to remove the partition PVs.
Expand Down Expand Up @@ -1438,7 +1455,7 @@ def __init__(self, *args, **kwargs):
# methods related to device size and disk space requirements
#
def _get_device_size(self):
# calculate device size based on space in the pool
""" Calculate device size based on space in the pool. """
pool_size = self.pool.size
log.debug("pool size is %s", pool_size)
free = pool_size - self.pool.used_space
Expand All @@ -1453,22 +1470,17 @@ def _get_device_size(self):

return size

@property
def _pesize(self):
""" The extent size of our vg or the default if we have no vg. """
return getattr(self.container, "pe_size", lvm.LVM_PE_SIZE)

def _get_total_space(self):
""" Calculate and return the total disk space needed for the vg.
Our container will still be None if we are going to create it.
Our container (VG) will still be None if we are going to create it.
"""

# unset the thpool_reserve here so that the previously reserved space is
# considered free space (and thus swallowed) -> we will set it and
# calculate an updated reserve below
if self.container:
self.container.thpool_reserve = None
if self.vg:
self.vg.thpool_reserve = None

size = super(LVMThinPFactory, self)._get_total_space()
# this does not apply if a specific container size was requested
Expand All @@ -1479,8 +1491,8 @@ def _get_total_space(self):
size -= self.pool.free_space
log.debug("size cut to %s to omit pool free space", size)

if self.container:
self.container.thpool_reserve = AUTOPART_THPOOL_RESERVE
if self.vg:
self.vg.thpool_reserve = AUTOPART_THPOOL_RESERVE

# black maths (to make sure there's AUTOPART_THPOOL_RESERVE.percent reserve)
size_with_reserve = size * (1 / (1 - (AUTOPART_THPOOL_RESERVE.percent / 100)))
Expand All @@ -1499,7 +1511,7 @@ def pool_list(self):
return self.storage.thinpools

def get_pool(self):
if not self.container:
if not self.vg:
return None

if self.device:
Expand All @@ -1510,7 +1522,7 @@ def get_pool(self):
# create a new pool to allocate new devices from? Probably not, since
# that would prevent users from setting up custom pools on tty2.
pool = None
pools = [p for p in self.pool_list if p.vg == self.container]
pools = [p for p in self.pool_list if p.vg == self.vg]
pools.sort(key=lambda p: p.free_space, reverse=True)
if pools:
new_pools = [p for p in pools if not p.exists]
Expand Down Expand Up @@ -1551,8 +1563,8 @@ def _get_pool_size(self):
size -= self.device.pool_space_used # don't count our device

# round to nearest extent. free rounds down, size rounds up.
free = self.container.align(free + self.container.free_space)
size = self.container.align(size, roundup=True)
free = self.vg.align(free + self.vg.free_space)
size = self.vg.align(size, roundup=True)

if free < size:
size = free
Expand All @@ -1573,12 +1585,12 @@ def _create_pool(self):
if self.size == Size(0):
return

self.container.thpool_reserve = AUTOPART_THPOOL_RESERVE
self.vg.thpool_reserve = AUTOPART_THPOOL_RESERVE
size = self._get_pool_size()
if size == Size(0):
raise DeviceFactoryError("not enough free space for thin pool")

self.pool = self._get_new_pool(size=size, parents=[self.container])
self.pool = self._get_new_pool(size=size, parents=[self.vg])
self.storage.create_device(self.pool)

# reconfigure the pool here in case its presence in the VG has caused
Expand All @@ -1589,6 +1601,7 @@ def _create_pool(self):
# methods to configure the factory's container (both vg and pool)
#
def _set_container(self):
""" Set this factory's container (VG) device. """
super(LVMThinPFactory, self)._set_container()
self.pool = self.get_pool()
if self.pool:
Expand Down
2 changes: 2 additions & 0 deletions blivet/devices/luks.py
Expand Up @@ -24,6 +24,7 @@

from ..storage_log import log_method_call
from ..size import Size
from ..tasks import availability

import logging
log = logging.getLogger("blivet")
Expand All @@ -38,6 +39,7 @@ class LUKSDevice(DMCryptDevice):
_type = "luks/dm-crypt"
_resizable = True
_packages = ["cryptsetup"]
_external_dependencies = [availability.BLOCKDEV_CRYPTO_PLUGIN]

def __init__(self, name, fmt=None, size=None, uuid=None,
exists=False, sysfs_path='', parents=None):
Expand Down
3 changes: 3 additions & 0 deletions blivet/devices/lvm.py
Expand Up @@ -1534,6 +1534,9 @@ def autoset_md_size(self):
100)) # snapshots
log.debug("Recommended metadata size: %s", self._metadata_size)

self._metadata_size = self.vg.align(self._metadata_size, roundup=True)
log.debug("Rounded metadata size to extents: %s", self._metadata_size)

log.debug("Adjusting size from %s to %s", self.size, self.size - self._metadata_size)
self.size = self.size - self._metadata_size

Expand Down

0 comments on commit 56a1278

Please sign in to comment.