forked from storaged-project/blivet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
lvm.py
1912 lines (1562 loc) · 69.1 KB
/
lvm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# devices/lvm.py
#
# Copyright (C) 2009-2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
#
from decimal import Decimal
from six import add_metaclass
import abc
import copy
import pprint
import re
import os
import time
import gi
gi.require_version("BlockDev", "1.0")
from gi.repository import BlockDev as blockdev
# device backend modules
from ..devicelibs import lvm
from .. import errors
from .. import util
from ..storage_log import log_method_call
from .. import udev
from ..size import Size, KiB, MiB, ROUND_UP, ROUND_DOWN
from ..tasks import availability
import logging
log = logging.getLogger("blivet")
from .lib import LINUX_SECTOR_SIZE, ParentList
from .device import Device
from .storage import StorageDevice
from .container import ContainerDevice
from .dm import DMDevice
from .md import MDRaidArrayDevice
from .cache import Cache, CacheStats, CacheRequest
_INTERNAL_LV_CLASSES = []
def get_internal_lv_class(lv_attr):
if lv_attr[0] == "C":
# cache pools and internal data LV of cache pools need a more complicated check
if lv_attr[6] == "C":
# target type == cache -> cache pool
return LVMCachePoolLogicalVolumeDevice
else:
return LVMDataLogicalVolumeDevice
for cls in _INTERNAL_LV_CLASSES:
if lv_attr[0] in cls.attr_letters:
return cls
return None
class LVMVolumeGroupDevice(ContainerDevice):
""" An LVM Volume Group """
_type = "lvmvg"
_packages = ["lvm2"]
_formatClassName = property(lambda s: "lvmpv")
_formatUUIDAttr = property(lambda s: "vgUuid")
_formatImmutable = True
@staticmethod
def get_supported_pe_sizes():
return [Size(pe_size) for pe_size in blockdev.lvm.get_supported_pe_sizes()]
def __init__(self, name, parents=None, size=None, free=None,
peSize=None, peCount=None, peFree=None, pvCount=None,
uuid=None, exists=False, sysfsPath=''):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword sysfsPath: sysfs device path
:type sysfsPath: str
:keyword peSize: physical extent size
:type peSize: :class:`~.size.Size`
For existing VG's only:
:keyword size: the VG's size
:type size: :class:`~.size.Size`
:keyword free -- amount of free space in the VG
:type free: :class:`~.size.Size`
:keyword peFree: number of free extents
:type peFree: int
:keyword peCount -- total number of extents
:type peCount: int
:keyword pvCount: number of PVs in this VG
:type pvCount: int
:keyword uuid: the VG UUID
:type uuid: str
"""
# These attributes are used by _addParent, so they must be initialized
# prior to instantiating the superclass.
self._lvs = []
self.hasDuplicate = False
self._complete = False # have we found all of this VG's PVs?
self.pvCount = util.numeric_type(pvCount)
if exists and not pvCount:
self._complete = True
super(LVMVolumeGroupDevice, self).__init__(name, parents=parents,
uuid=uuid, size=size,
exists=exists, sysfsPath=sysfsPath)
self.free = util.numeric_type(free)
self.peSize = util.numeric_type(peSize)
self.peCount = util.numeric_type(peCount)
self.peFree = util.numeric_type(peFree)
self.reserved_percent = 0
self.reserved_space = Size(0)
# TODO: validate peSize if given
if not self.peSize:
self.peSize = lvm.LVM_PE_SIZE
if not self.exists:
self.pvCount = len(self.parents)
# >0 is fixed
self.size_policy = self.size
def __repr__(self):
s = super(LVMVolumeGroupDevice, self).__repr__()
s += (" free = %(free)s PE Size = %(peSize)s PE Count = %(peCount)s\n"
" PE Free = %(peFree)s PV Count = %(pvCount)s\n"
" modified = %(modified)s"
" extents = %(extents)s free space = %(freeSpace)s\n"
" free extents = %(freeExtents)s"
" reserved percent = %(rpct)s reserved space = %(res)s\n"
" PVs = %(pvs)s\n"
" LVs = %(lvs)s" %
{"free": self.free, "peSize": self.peSize, "peCount": self.peCount,
"peFree": self.peFree, "pvCount": self.pvCount,
"modified": self.isModified,
"extents": self.extents, "freeSpace": self.freeSpace,
"freeExtents": self.freeExtents,
"rpct": self.reserved_percent, "res": self.reserved_space,
"pvs": pprint.pformat([str(p) for p in self.pvs]),
"lvs": pprint.pformat([str(l) for l in self.lvs])})
return s
@property
def dict(self):
d = super(LVMVolumeGroupDevice, self).dict
d.update({"free": self.free, "peSize": self.peSize,
"peCount": self.peCount, "peFree": self.peFree,
"pvCount": self.pvCount, "extents": self.extents,
"freeSpace": self.freeSpace,
"freeExtents": self.freeExtents,
"reserved_percent": self.reserved_percent,
"reserved_space": self.reserved_space,
"lvNames": [lv.name for lv in self.lvs]})
return d
@property
def mapName(self):
""" This device's device-mapper map name """
# Thank you lvm for this lovely hack.
return self.name.replace("-","--")
@property
def path(self):
""" Device node representing this device. """
return "%s/%s" % (self._devDir, self.mapName)
def updateSysfsPath(self):
""" Update this device's sysfs path. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise errors.DeviceError("device has not been created", self.name)
self.sysfsPath = ''
@property
def status(self):
""" The device's status (True means active). """
if not self.exists:
return False
# certainly if any of this VG's LVs are active then so are we
for lv in self.lvs:
if lv.status:
return True
# if any of our PVs are not active then we cannot be
for pv in self.pvs:
if not pv.status:
return False
# if we are missing some of our PVs we cannot be active
if not self.complete:
return False
return True
def _preSetup(self, orig=False):
if self.exists and not self.complete:
raise errors.DeviceError("cannot activate VG with missing PV(s)", self.name)
return StorageDevice._preSetup(self, orig=orig)
def _teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
blockdev.lvm.vgdeactivate(self.name)
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
pv_list = [pv.path for pv in self.parents]
blockdev.lvm.vgcreate(self.name, pv_list, self.peSize)
def _postCreate(self):
self._complete = True
super(LVMVolumeGroupDevice, self)._postCreate()
def _preDestroy(self):
StorageDevice._preDestroy(self)
# set up the pvs since lvm needs access to them to do the vgremove
self.setupParents(orig=True)
def _destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
if not self.complete:
for pv in self.pvs:
# Remove the PVs from the ignore filter so we can wipe them.
lvm.lvm_cc_removeFilterRejectRegexp(pv.name)
# Don't run vgremove or vgreduce since there may be another VG with
# the same name that we want to keep/use.
return
blockdev.lvm.vgreduce(self.name, None)
blockdev.lvm.vgdeactivate(self.name)
blockdev.lvm.vgremove(self.name)
def _remove(self, member):
status = []
for lv in self.lvs:
status.append(lv.status)
if lv.exists:
lv.setup()
blockdev.lvm.pvmove(member.path)
blockdev.lvm.vgreduce(self.name, member.path)
for (lv, status) in zip(self.lvs, status):
if lv.status and not status:
lv.teardown()
def _add(self, member):
blockdev.lvm.vgextend(self.name, member.path)
def _addLogVol(self, lv):
""" Add an LV to this VG. """
if lv in self._lvs:
raise ValueError("lv is already part of this vg")
# verify we have the space, then add it
# do not verify for growing vg (because of ks)
# FIXME: add a "isthin" property and/or "ispool"?
if not lv.exists and not self.growable and \
not isinstance(lv, LVMThinLogicalVolumeDevice) and \
lv.size > self.freeSpace:
raise errors.DeviceError("new lv is too large to fit in free space", self.name)
log.debug("Adding %s/%s to %s", lv.name, lv.size, self.name)
self._lvs.append(lv)
# snapshot accounting
origin = getattr(lv, "origin", None)
if origin:
origin.snapshots.append(lv)
def _removeLogVol(self, lv):
""" Remove an LV from this VG. """
if lv not in self.lvs:
raise ValueError("specified lv is not part of this vg")
self._lvs.remove(lv)
# snapshot accounting
origin = getattr(lv, "origin", None)
if origin:
origin.snapshots.remove(lv)
def _addParent(self, member):
super(LVMVolumeGroupDevice, self)._addParent(member)
if (self.exists and member.format.exists and
len(self.parents) + 1 == self.pvCount):
self._complete = True
def _removeParent(self, member):
# XXX It would be nice to raise an exception if removing this member
# would not leave enough space, but the devicefactory relies on it
# being possible to _temporarily_ overcommit the VG.
#
# Maybe removeMember could be a wrapper with the checks and the
# devicefactory could call the _ versions to bypass the checks.
super(LVMVolumeGroupDevice, self)._removeParent(member)
# We can't rely on lvm to tell us about our size, free space, &c
# since we could have modifications queued, unless the VG and all of
# its PVs already exist.
@property
def isModified(self):
""" Return True if the VG has changes queued that LVM is unaware of. """
modified = True
if self.exists and not [d for d in self.pvs if not d.exists]:
modified = False
return modified
@property
def reservedSpace(self):
""" Reserved space in this VG """
reserved = Size(0)
if self.reserved_percent > 0:
reserved = self.reserved_percent * Decimal('0.01') * self.size
elif self.reserved_space > Size(0):
reserved = self.reserved_space
# reserve space for the pmspare LV LVM creates behind our back
reserved += self.pmSpareSize
return self.align(reserved, roundup=True)
@property
def lvm_metadata_size(self):
"""The amount of the space reserved for LVM metadata in this VG's PVs"""
# NOTE: we either specify data alignment in a PV or the default is used
# which is both handled by pv.format.peStart, but LVM takes into
# account also the underlying block device which means that e.g.
# for an MD RAID device, it tries to align everything also to chunk
# size and alignment offset of such device which may result in up
# to a twice as big non-data area
# TODO: move this to either LVMPhysicalVolume's peStart property once
# formats know about their devices or to a new LVMPhysicalVolumeDevice
# class once it exists
diff = Size(0)
for pv in self.pvs:
if isinstance(pv, MDRaidArrayDevice):
diff += self.align(pv.size) - self.align(pv.size - 2 * pv.format.peStart)
else:
diff += self.align(pv.size) - self.align(pv.size - pv.format.peStart)
return diff
@property
def size(self):
""" The size of this VG """
# TODO: just ask lvm if isModified returns False
# sum up the sizes of the PVs, subtract the unusable (meta data) space
size = sum(pv.size for pv in self.pvs)
size -= self.lvm_metadata_size
return size
@property
def extents(self):
""" Number of extents in this VG """
# TODO: just ask lvm if isModified returns False
return int(self.size / self.peSize)
@property
def freeSpace(self):
""" The amount of free space in this VG. """
# TODO: just ask lvm if isModified returns False
# get the number of disks used by PVs on RAID (if any)
raid_disks = 0
for pv in self.pvs:
if isinstance(pv, MDRaidArrayDevice):
raid_disks = max([raid_disks, len(pv.disks)])
# total the sizes of any LVs
log.debug("%s size is %s", self.name, self.size)
used = sum((lv.vgSpaceUsed for lv in self.lvs), Size(0))
used += self.reservedSpace
free = self.size - used
log.debug("vg %s has %s free", self.name, free)
return free
@property
def freeExtents(self):
""" The number of free extents in this VG. """
# TODO: just ask lvm if isModified returns False
return int(self.freeSpace / self.peSize)
def align(self, size, roundup=False):
""" Align a size to a multiple of physical extent size. """
size = util.numeric_type(size)
return size.roundToNearest(self.peSize, rounding=ROUND_UP if roundup else ROUND_DOWN)
@property
def pvs(self):
""" A list of this VG's PVs """
return self.parents[:]
@property
def lvs(self):
""" A list of this VG's LVs """
return self._lvs[:]
@property
def thinpools(self):
return [l for l in self._lvs if isinstance(l, LVMThinPoolDevice)]
@property
def thinlvs(self):
return [l for l in self._lvs if isinstance(l, LVMThinLogicalVolumeDevice)]
@property
def cachedLVs(self):
return [l for l in self._lvs if l.cached]
@property
def pmSpareSize(self):
"""Size of the pmspare LV LVM creates in every VG that contains some metadata
(even internal) LV. The size of such LV is equal to the size of the
biggest metadata LV in the VG.
"""
# TODO: report correctly/better for existing VGs
return max([lv.metaDataSize for lv in self.lvs] + [Size(0)])
@property
def complete(self):
"""Check if the vg has all its pvs in the system
Return True if complete.
"""
# vgs with duplicate names are overcomplete, which is not what we want
if self.hasDuplicate:
return False
return self._complete or not self.exists
@property
def direct(self):
""" Is this device directly accessible? """
return False
def populateKSData(self, data):
super(LVMVolumeGroupDevice, self).populateKSData(data)
data.vgname = self.name
data.physvols = ["pv.%d" % p.id for p in self.parents]
data.preexist = self.exists
if not self.exists:
data.pesize = self.peSize.convertTo(KiB)
# reserved percent/space
@classmethod
def isNameValid(cls, name):
# No . or ..
if name == '.' or name == '..':
return False
# Check that all characters are in the allowed set and that the name
# does not start with a -
if not re.match('^[a-zA-Z0-9+_.][a-zA-Z0-9+_.-]*$', name):
return False
# According to the LVM developers, vgname + lvname is limited to 126 characters
# minus the number of hyphens, and possibly minus up to another 8 characters
# in some unspecified set of situations. Instead of figuring all of that out,
# no one gets a vg or lv name longer than, let's say, 55.
if len(name) > 55:
return False
return True
class LVMLogicalVolumeDevice(DMDevice):
""" An LVM Logical Volume """
_type = "lvmlv"
_resizable = True
_packages = ["lvm2"]
_containerClass = LVMVolumeGroupDevice
_external_dependencies = [availability.BLOCKDEV_LVM_PLUGIN]
def __init__(self, name, parents=None, size=None, uuid=None, segType=None,
fmt=None, exists=False, sysfsPath='', grow=None, maxsize=None,
percent=None, cacheRequest=None):
"""
:param name: the device name (generally a device node's basename)
:type name: str
:keyword exists: does this device exist?
:type exists: bool
:keyword size: the device's size
:type size: :class:`~.size.Size`
:keyword parents: a list of parent devices
:type parents: list of :class:`StorageDevice`
:keyword fmt: this device's formatting
:type fmt: :class:`~.formats.DeviceFormat` or a subclass of it
:keyword sysfsPath: sysfs device path
:type sysfsPath: str
:keyword uuid: the device UUID
:type uuid: str
For existing LVs only:
:keyword segType: segment type (eg: "linear", "raid1")
:type segType: str
For non-existent LVs only:
:keyword grow: whether to grow this LV
:type grow: bool
:keyword maxsize: maximum size for growable LV
:type maxsize: :class:`~.size.Size`
:keyword percent: percent of VG space to take
:type percent: int
:keyword cacheRequest: parameters of requested cache (if any)
:type cacheRequest: :class:`~.devices.lvm.LVMCacheRequest`
"""
# When this device's format is set in the superclass constructor it will
# try to access self.snapshots.
self.snapshots = []
DMDevice.__init__(self, name, size=size, fmt=fmt,
sysfsPath=sysfsPath, parents=parents,
exists=exists)
self.uuid = uuid
self.segType = segType or "linear"
self.req_grow = None
self.req_max_size = Size(0)
self.req_size = Size(0)
self.req_percent = 0
if not self.exists:
self.req_grow = grow
self.req_max_size = Size(util.numeric_type(maxsize))
# XXX should we enforce that req_size be pe-aligned?
self.req_size = self._size
self.req_percent = util.numeric_type(percent)
# check that we got parents as expected and add this device to them
self._check_parents()
self._add_to_parents()
self._metaDataSize = Size(0)
self._internal_lvs = []
self._cache = None
if cacheRequest and not self.exists:
self._cache = LVMCache(self, size=cacheRequest.size, exists=False,
fast_pvs=cacheRequest.fast_devs, mode=cacheRequest.mode)
def _check_parents(self):
"""Check that this device has parents as expected"""
if isinstance(self.parents, (list, ParentList)):
if len(self.parents) != 1:
raise ValueError("constructor requires a single %s instance" % self._containerClass.__name__)
container = self.parents[0]
else:
container = self.parents
if not isinstance(container, self._containerClass):
raise ValueError("constructor requires a %s instance" % self._containerClass.__name__)
def _add_to_parents(self):
"""Add this device to its parents"""
# a normal LV has only exactly parent -- the VG it belongs to
self._parents[0]._addLogVol(self)
@property
def copies(self):
image_lvs = [int_lv for int_lv in self._internal_lvs if isinstance(int_lv, LVMImageLogicalVolumeDevice)]
return len(image_lvs) or 1
@property
def logSize(self):
log_lvs = (int_lv for int_lv in self._internal_lvs if isinstance(int_lv, LVMLogLogicalVolumeDevice))
return Size(sum(lv.size for lv in log_lvs))
@property
def metaDataSize(self):
if self._metaDataSize:
return self._metaDataSize
elif self.cached:
return self.cache.md_size
md_lvs = (int_lv for int_lv in self._internal_lvs if isinstance(int_lv, LVMMetadataLogicalVolumeDevice))
return Size(sum(lv.size for lv in md_lvs))
def __repr__(self):
s = DMDevice.__repr__(self)
s += (" VG device = %(vgdev)r\n"
" segment type = %(type)s percent = %(percent)s\n"
" mirror copies = %(copies)d"
" VG space used = %(vgspace)s" %
{"vgdev": self.vg, "percent": self.req_percent,
"copies": self.copies, "type": self.segType,
"vgspace": self.vgSpaceUsed })
return s
@property
def dict(self):
d = super(LVMLogicalVolumeDevice, self).dict
if self.exists:
d.update({"copies": self.copies,
"vgspace": self.vgSpaceUsed})
else:
d.update({"percent": self.req_percent})
return d
@property
def mirrored(self):
return self.copies > 1
def _setSize(self, size):
if not isinstance(size, Size):
raise ValueError("new size must of type Size")
size = self.vg.align(size)
log.debug("trying to set lv %s size to %s", self.name, size)
# Don't refuse to set size if we think there's not enough space in the
# VG for an existing LV, since it's existence proves there is enough
# space for it. A similar reasoning applies to shrinking the LV.
if not self.exists and \
not isinstance(self, LVMThinLogicalVolumeDevice) and \
size > self.size and size > self.vg.freeSpace + self.vgSpaceUsed:
log.error("failed to set size: %s short", size - (self.vg.freeSpace + self.vgSpaceUsed))
raise ValueError("not enough free space in volume group")
super(LVMLogicalVolumeDevice, self)._setSize(size)
size = property(StorageDevice._getSize, _setSize)
@property
def maxSize(self):
""" The maximum size this lv can be. """
max_lv = self.size + self.vg.freeSpace
max_format = self.format.maxSize
return min(max_lv, max_format) if max_format else max_lv
@property
def vgSpaceUsed(self):
""" Space occupied by this LV, not including snapshots. """
if self.cached:
cache_size = self.cache.size
else:
cache_size = Size(0)
return (self.vg.align(self.size, roundup=True) * self.copies
+ self.logSize + self.metaDataSize + cache_size)
def _setFormat(self, fmt):
super(LVMLogicalVolumeDevice, self)._setFormat(fmt)
for snapshot in (s for s in self.snapshots if not s.exists):
snapshot._updateFormatFromOrigin()
@property
def vg(self):
""" This Logical Volume's Volume Group. """
return self.parents[0]
@property
def container(self):
return self.vg
@property
def mapName(self):
""" This device's device-mapper map name """
# Thank you lvm for this lovely hack.
return "%s-%s" % (self.vg.mapName, self._name.replace("-","--"))
@property
def path(self):
""" Device node representing this device. """
return "%s/%s" % (self._devDir, self.mapName)
def getDMNode(self):
""" Return the dm-X (eg: dm-0) device node for this device. """
log_method_call(self, self.name, status=self.status)
if not self.exists:
raise errors.DeviceError("device has not been created", self.name)
return blockdev.dm.node_from_name(self.mapName)
def _getName(self):
""" This device's name. """
return "%s-%s" % (self.vg.name, self._name)
@property
def lvname(self):
""" The LV's name (not including VG name). """
return self._name
@property
def complete(self):
""" Test if vg exits and if it has all pvs. """
return self.vg.complete
def setupParents(self, orig=False):
# parent is a vg, which has no formatting (or device for that matter)
Device.setupParents(self, orig=orig)
def _preSetup(self, orig=False):
# If the lvmetad socket exists and any PV is inactive before we call
# setupParents (via _preSetup, below), we should wait for auto-
# activation before trying to manually activate this LV.
auto_activate = (lvm.lvmetad_socket_exists() and
any(not pv.status for pv in self.vg.pvs))
if not super(LVMLogicalVolumeDevice, self)._preSetup(orig=orig):
return False
if auto_activate:
log.debug("waiting for lvm auto-activation of %s", self.name)
# Wait for auto-activation for up to 30 seconds. If this LV hasn't
# been activated when the timeout is reached, there may be some
# lvm.conf content preventing auto-activation of this LV, so we
# have to do it ourselves.
# The timeout value of 30 seconds was suggested by prajnoha. He
# noted that udev uses the same value, for whatever that's worth.
timeout = 30 # seconds
start = time.time()
while time.time() - start < timeout:
if self.status:
# already active -- don't try to activate it manually
log.debug("%s has been auto-activated", self.name)
return False
else:
log.debug("%s not active yet; sleeping...", self.name)
time.sleep(0.5)
log.debug("lvm auto-activation timeout reached for %s", self.name)
return True
def _setup(self, orig=False):
""" Open, or set up, a device. """
log_method_call(self, self.name, orig=orig, status=self.status,
controllable=self.controllable)
blockdev.lvm.lvactivate(self.vg.name, self._name)
def _teardown(self, recursive=None):
""" Close, or tear down, a device. """
log_method_call(self, self.name, status=self.status,
controllable=self.controllable)
blockdev.lvm.lvdeactivate(self.vg.name, self._name)
def _postTeardown(self, recursive=False):
try:
# It's likely that teardown of a VG will fail due to other
# LVs being active (filesystems mounted, &c), so don't let
# it bring everything down.
StorageDevice._postTeardown(self, recursive=recursive)
except errors.StorageError:
if recursive:
log.debug("vg %s teardown failed; continuing", self.vg.name)
else:
raise
def _preCreate(self):
super(LVMLogicalVolumeDevice, self)._preCreate()
try:
vg_info = blockdev.lvm.vginfo(self.vg.name)
except blockdev.LVMError as lvmerr:
log.error("Failed to get free space for the %s VG: %s", self.vg.name, lvmerr)
# nothing more can be done, we don't know the VG's free space
return
extent_size = Size(vg_info.extent_size)
extents_free = vg_info.free_count
can_use = extent_size * extents_free
if self.size > can_use:
msg = ("%s LV's size (%s) exceeds the VG's usable free space (%s),"
"shrinking the LV") % (self.name, self.size, can_use)
log.warning(msg)
self.size = can_use
def _create(self):
""" Create the device. """
log_method_call(self, self.name, status=self.status)
# should we use --zero for safety's sake?
if not self.cache:
# just a plain LV
blockdev.lvm.lvcreate(self.vg.name, self._name, self.size)
else:
mode = blockdev.lvm.cache_get_mode_from_str(self.cache.mode)
# prepare the list of fast PV devices
fast_pvs = []
for pv_name in (pv.name for pv in self.cache.fast_pvs):
# make sure we have the full device paths
if not pv_name.startswith("/dev/"):
fast_pvs.append("/dev/%s" % pv_name)
else:
fast_pvs.append(pv_name)
# get the list of all fast PV devices used in the VG so that we can
# consider the rest to be slow PVs and generate a list of them
all_fast_pvs_names = set()
for lv in self.vg.lvs:
if lv.cached and lv.cache.fast_pvs:
all_fast_pvs_names |= set(pv.name for pv in lv.cache.fast_pvs)
slow_pvs = [pv.path for pv in self.vg.pvs if pv.name not in all_fast_pvs_names]
# VG name, LV name, data size, cache size, metadata size, mode, flags, slow PVs, fast PVs
# XXX: we need to pass slow_pvs+fast_pvs as slow PVs because parts
# of the fast PVs may be required for allocation of the LV (it may
# span over the slow PVs and parts of fast PVs)
blockdev.lvm.cache_create_cached_lv(self.vg.name, self._name, self.size, self.cache.size, self.cache.md_size,
mode, 0, slow_pvs+fast_pvs, fast_pvs)
def _preDestroy(self):
StorageDevice._preDestroy(self)
# set up the vg's pvs so lvm can remove the lv
self.vg.setupParents(orig=True)
def _destroy(self):
""" Destroy the device. """
log_method_call(self, self.name, status=self.status)
blockdev.lvm.lvremove(self.vg.name, self._name)
def resize(self):
log_method_call(self, self.name, status=self.status)
# Setup VG parents (in case they are dmraid partitions for example)
self.vg.setupParents(orig=True)
if self.originalFormat.exists:
self.originalFormat.teardown()
if self.format.exists:
self.format.teardown()
udev.settle()
blockdev.lvm.lvresize(self.vg.name, self._name, self.size)
@property
def isleaf(self):
# Thin snapshots do not need to be removed prior to removal of the
# origin, but the old snapshots do.
non_thin_snapshots = any(s for s in self.snapshots
if not isinstance(s, LVMThinSnapShotDevice))
return (super(LVMLogicalVolumeDevice, self).isleaf and
not non_thin_snapshots)
@property
def direct(self):
""" Is this device directly accessible? """
# an LV can contain a direct filesystem if it is a leaf device or if
# its only dependent devices are snapshots
return super(LVMLogicalVolumeDevice, self).isleaf
def dracutSetupArgs(self):
# Note no mapName usage here, this is a lvm cmdline name, which
# is different (ofcourse)
return set(["rd.lvm.lv=%s/%s" % (self.vg.name, self._name)])
def checkSize(self):
""" Check to make sure the size of the device is allowed by the
format used.
Returns:
0 - ok
1 - Too large
-1 - Too small
"""
if self.format.maxSize and self.size > self.format.maxSize:
return 1
elif (self.format.minSize and
(not self.req_grow and
self.size < self.format.minSize) or
(self.req_grow and self.req_max_size and
self.req_max_size < self.format.minSize)):
return -1
return 0
def removeHook(self, modparent=True):
if modparent:
self.vg._removeLogVol(self)
super(LVMLogicalVolumeDevice, self).removeHook(modparent=modparent)
def addHook(self, new=True):
super(LVMLogicalVolumeDevice, self).addHook(new=new)
if new:
return
if self not in self.vg.lvs:
self.vg._addLogVol(self)
def populateKSData(self, data):
super(LVMLogicalVolumeDevice, self).populateKSData(data)
data.vgname = self.vg.name
data.name = self.lvname
data.preexist = self.exists
data.resize = (self.exists and self.targetSize and
self.targetSize != self.currentSize)
if not self.exists:
data.grow = self.req_grow
if self.req_grow:
data.size = self.req_size.convertTo(MiB)
data.maxSizeMB = self.req_max_size.convertTo(MiB)
else:
data.size = self.size.convertTo(MiB)
data.percent = self.req_percent
elif data.resize:
data.size = self.targetSize.convertTo(MiB)
@classmethod
def isNameValid(cls, name):
# Check that the LV name is valid
# Start with the checks shared with volume groups
if not LVMVolumeGroupDevice.isNameValid(name):
return False
# And now the ridiculous ones
# These strings are taken from apply_lvname_restrictions in lib/misc/lvm-string.c
reserved_prefixes = set(['pvmove', 'snapshot'])
reserved_substrings = set(['_cdata', '_cmeta', '_mimage', '_mlog', '_pmspare', '_rimage',
'_rmeta', '_tdata', '_tmeta', '_vorigin'])
for prefix in reserved_prefixes:
if name.startswith(prefix):
return False
for substring in reserved_substrings:
if substring in name:
return False
return True
def addInternalLV(self, int_lv):
if int_lv not in self._internal_lvs:
self._internal_lvs.append(int_lv)
def removeInternalLV(self, int_lv):
if int_lv in self._internal_lvs:
self._internal_lvs.remove(int_lv)
else:
msg = "the specified internal LV '%s' doesn't belong to this LV ('%s')" % (int_lv.lv_name,
self.name)
raise ValueError(msg)
@property
def cached(self):
return bool(self.cache)
@property
def cache(self):
if self.exists and not self._cache:
# check if we have a cache pool internal LV
pool = None
for lv in self._internal_lvs:
if isinstance(lv, LVMCachePoolLogicalVolumeDevice):
pool = lv
if pool is not None:
self._cache = LVMCache(self, size=pool.size, exists=True)
return self._cache
def attach_cache(self, cache_pool_lv):
blockdev.lvm.cache_attach(self.vg.name, self.lvname, cache_pool_lv.lvname)
self._cache = LVMCache(self, size=cache_pool_lv.size, exists=True)
@add_metaclass(abc.ABCMeta)
class LVMInternalLogicalVolumeDevice(LVMLogicalVolumeDevice):
"""Abstract base class for internal LVs