/
driver.py
11496 lines (9983 loc) · 513 KB
/
driver.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, and Parallels.
"""
import binascii
import collections
from collections import deque
import contextlib
import copy
import errno
import functools
import glob
import grp
import itertools
import operator
import os
import pwd
import random
import shutil
import sys
import tempfile
import threading
import time
import typing as ty
import uuid
from castellan import key_manager
from copy import deepcopy
import eventlet
from eventlet import greenthread
from eventlet import tpool
from lxml import etree
from os_brick import encryptors
from os_brick.encryptors import luks as luks_encryptor
from os_brick import exception as brick_exception
from os_brick.initiator import connector
import os_resource_classes as orc
import os_traits as ot
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_serialization import base64
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import fileutils
from oslo_utils import importutils
from oslo_utils import netutils as oslo_netutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
from nova.api.metadata import base as instance_metadata
from nova.api.metadata import password
from nova import block_device
from nova.compute import power_state
from nova.compute import provider_tree
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
import nova.conf
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import context as nova_context
from nova import crypto
from nova.db import constants as db_const
from nova import exception
from nova.i18n import _
from nova.image import glance
from nova.network import model as network_model
from nova import objects
from nova.objects import diagnostics as diagnostics_obj
from nova.objects import fields
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
import nova.privsep.libvirt
import nova.privsep.path
import nova.privsep.utils
from nova.storage import rbd_utils
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt.disk import api as disk_api
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import event as virtevent
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import designer
from nova.virt.libvirt import event as libvirtevent
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import instancejobtracker
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt.storage import dmcrypt
from nova.virt.libvirt.storage import lvm
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt.libvirt.volume import fs
from nova.virt.libvirt.volume import mount
from nova.virt.libvirt.volume import remotefs
from nova.virt.libvirt.volume import volume
from nova.virt import netutils
from nova.volume import cinder
libvirt: ty.Any = None
uefi_logged = False
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
DEFAULT_UEFI_LOADER_PATH = {
"x86_64": ['/usr/share/OVMF/OVMF_CODE.fd',
'/usr/share/OVMF/OVMF_CODE.secboot.fd',
'/usr/share/qemu/ovmf-x86_64-code.bin'],
"aarch64": ['/usr/share/AAVMF/AAVMF_CODE.fd',
'/usr/share/qemu/aavmf-aarch64-code.bin']
}
MAX_CONSOLE_BYTES = 100 * units.Ki
VALID_DISK_CACHEMODES = [
"default", "none", "writethrough", "writeback", "directsync", "unsafe",
]
# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
# Disable reason for the service which was enabled or disabled without reason
DISABLE_REASON_UNDEFINED = None
# Guest config console string
CONSOLE = "console=tty0 console=ttyS0 console=hvc0"
GuestNumaConfig = collections.namedtuple(
'GuestNumaConfig', ['cpuset', 'cputune', 'numaconfig', 'numatune'])
class InjectionInfo(collections.namedtuple(
'InjectionInfo', ['network_info', 'files', 'admin_pass'])):
__slots__ = ()
def __repr__(self):
return ('InjectionInfo(network_info=%r, files=%r, '
'admin_pass=<SANITIZED>)') % (self.network_info, self.files)
# NOTE(lyarwood): Dict of volume drivers supported by the libvirt driver, keyed
# by the connection_info['driver_volume_type'] returned by Cinder for each
# volume type it supports
# TODO(lyarwood): Add host configurables to allow this list to be changed.
# Allowing native iSCSI to be reintroduced etc.
VOLUME_DRIVERS = {
'iscsi': 'nova.virt.libvirt.volume.iscsi.LibvirtISCSIVolumeDriver',
'iser': 'nova.virt.libvirt.volume.iser.LibvirtISERVolumeDriver',
'local': 'nova.virt.libvirt.volume.volume.LibvirtVolumeDriver',
'fake': 'nova.virt.libvirt.volume.volume.LibvirtFakeVolumeDriver',
'rbd': 'nova.virt.libvirt.volume.net.LibvirtNetVolumeDriver',
'nfs': 'nova.virt.libvirt.volume.nfs.LibvirtNFSVolumeDriver',
'smbfs': 'nova.virt.libvirt.volume.smbfs.LibvirtSMBFSVolumeDriver',
'fibre_channel': 'nova.virt.libvirt.volume.fibrechannel.LibvirtFibreChannelVolumeDriver', # noqa:E501
'gpfs': 'nova.virt.libvirt.volume.gpfs.LibvirtGPFSVolumeDriver',
'quobyte': 'nova.virt.libvirt.volume.quobyte.LibvirtQuobyteVolumeDriver',
'scaleio': 'nova.virt.libvirt.volume.scaleio.LibvirtScaleIOVolumeDriver',
'vzstorage': 'nova.virt.libvirt.volume.vzstorage.LibvirtVZStorageVolumeDriver', # noqa:E501
'storpool': 'nova.virt.libvirt.volume.storpool.LibvirtStorPoolVolumeDriver', # noqa:E501
'nvmeof': 'nova.virt.libvirt.volume.nvme.LibvirtNVMEVolumeDriver',
}
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
# For information about when MIN_{LIBVIRT,QEMU}_VERSION and
# NEXT_MIN_{LIBVIRT,QEMU}_VERSION can be changed, consult the following:
#
# doc/source/reference/libvirt-distro-support-matrix.rst
#
# DO NOT FORGET to update this document when touching any versions below!
MIN_LIBVIRT_VERSION = (6, 0, 0)
MIN_QEMU_VERSION = (4, 2, 0)
NEXT_MIN_LIBVIRT_VERSION = (7, 0, 0)
NEXT_MIN_QEMU_VERSION = (5, 2, 0)
# Virtuozzo driver support
MIN_VIRTUOZZO_VERSION = (7, 0, 0)
# Names of the types that do not get compressed during migration
NO_COMPRESSION_TYPES = ('qcow2',)
# number of serial console limit
QEMU_MAX_SERIAL_PORTS = 4
# Qemu supports 4 serial consoles, we remove 1 because of the PTY one defined
ALLOWED_QEMU_SERIAL_PORTS = QEMU_MAX_SERIAL_PORTS - 1
VGPU_RESOURCE_SEMAPHORE = 'vgpu_resources'
LIBVIRT_PERF_EVENT_PREFIX = 'VIR_PERF_PARAM_'
class AsyncDeviceEventsHandler:
"""A synchornization point between libvirt events an clients waiting for
such events.
It provides an interface for the clients to wait for one or more libvirt
event types. It implements event delivery by expecting the libvirt driver
to forward libvirt specific events to notify_waiters()
It handles multiple clients for the same instance, device and event
type and delivers the event to each clients.
"""
class Waiter:
def __init__(
self,
instance_uuid: str,
device_name: str,
event_types: ty.Set[ty.Type[libvirtevent.DeviceEvent]]
):
self.instance_uuid = instance_uuid
self.device_name = device_name
self.event_types = event_types
self.threading_event = threading.Event()
self.result: ty.Optional[libvirtevent.DeviceEvent] = None
def matches(self, event: libvirtevent.DeviceEvent) -> bool:
"""Returns true if the event is one of the expected event types
for the given instance and device.
"""
return (
self.instance_uuid == event.uuid and
self.device_name == event.dev and
isinstance(event, tuple(self.event_types)))
def __repr__(self) -> str:
return (
"AsyncDeviceEventsHandler.Waiter("
f"instance_uuid={self.instance_uuid}, "
f"device_name={self.device_name}, "
f"event_types={self.event_types})")
def __init__(self):
self._lock = threading.Lock()
# Ongoing device operations in libvirt where we wait for the events
# about success or failure.
self._waiters: ty.Set[AsyncDeviceEventsHandler.Waiter] = set()
def create_waiter(
self,
instance_uuid: str,
device_name: str,
event_types: ty.Set[ty.Type[libvirtevent.DeviceEvent]]
) -> 'AsyncDeviceEventsHandler.Waiter':
"""Returns an opaque token the caller can use in wait() to
wait for the libvirt event
:param instance_uuid: The UUID of the instance.
:param device_name: The device name alias used by libvirt for this
device.
:param event_type: A set of classes derived from DeviceEvent
specifying which event types the caller waits for. Specifying more
than one event type means waiting for either of the events to be
received.
:returns: an opaque token to be used with wait_for_event().
"""
waiter = AsyncDeviceEventsHandler.Waiter(
instance_uuid, device_name, event_types)
with self._lock:
self._waiters.add(waiter)
return waiter
def delete_waiter(self, token: 'AsyncDeviceEventsHandler.Waiter'):
"""Deletes the waiter
:param token: the opaque token returned by create_waiter() to be
deleted
"""
with self._lock:
self._waiters.remove(token)
def wait(
self, token: 'AsyncDeviceEventsHandler.Waiter', timeout: float,
) -> ty.Optional[libvirtevent.DeviceEvent]:
"""Blocks waiting for the libvirt event represented by the opaque token
:param token: A token created by calling create_waiter()
:param timeout: Maximum number of seconds this call blocks waiting for
the event to be received
:returns: The received libvirt event, or None in case of timeout
"""
token.threading_event.wait(timeout)
with self._lock:
self._waiters.remove(token)
return token.result
def notify_waiters(self, event: libvirtevent.DeviceEvent) -> bool:
"""Unblocks the client waiting for this event.
:param event: the libvirt event that is received
:returns: True if there was a client waiting and False otherwise.
"""
dispatched = False
with self._lock:
for waiter in self._waiters:
if waiter.matches(event):
waiter.result = event
waiter.threading_event.set()
dispatched = True
return dispatched
def cleanup_waiters(self, instance_uuid: str) -> None:
"""Deletes all waiters and unblock all clients related to the specific
instance.
param instance_uuid: The instance UUID for which the cleanup is
requested
"""
with self._lock:
instance_waiters = set()
for waiter in self._waiters:
if waiter.instance_uuid == instance_uuid:
# unblock any waiting thread
waiter.threading_event.set()
instance_waiters.add(waiter)
self._waiters -= instance_waiters
if instance_waiters:
LOG.debug(
'Cleaned up device related libvirt event waiters: %s',
instance_waiters)
class LibvirtDriver(driver.ComputeDriver):
def __init__(self, virtapi, read_only=False):
# NOTE(aspiers) Some of these are dynamic, so putting
# capabilities on the instance rather than on the class.
# This prevents the risk of one test setting a capability
# which bleeds over into other tests.
# LVM and RBD require raw images. If we are not configured to
# force convert images into raw format, then we _require_ raw
# images only.
raw_only = ('rbd', 'lvm')
requires_raw_image = (CONF.libvirt.images_type in raw_only and
not CONF.force_raw_images)
requires_ploop_image = CONF.libvirt.virt_type == 'parallels'
self.capabilities = {
"has_imagecache": True,
"supports_evacuate": True,
"supports_migrate_to_same_host": False,
"supports_attach_interface": True,
"supports_device_tagging": True,
"supports_tagged_attach_interface": True,
"supports_tagged_attach_volume": True,
"supports_extend_volume": True,
"supports_multiattach": True,
"supports_trusted_certs": True,
# Supported image types
"supports_image_type_aki": True,
"supports_image_type_ari": True,
"supports_image_type_ami": True,
"supports_image_type_raw": True,
"supports_image_type_iso": True,
# NOTE(danms): Certain backends do not work with complex image
# formats. If we are configured for those backends, then we
# should not expose the corresponding support traits.
"supports_image_type_qcow2": not requires_raw_image,
"supports_image_type_ploop": requires_ploop_image,
"supports_pcpus": True,
"supports_accelerators": True,
"supports_bfv_rescue": True,
"supports_vtpm": CONF.libvirt.swtpm_enabled,
}
super(LibvirtDriver, self).__init__(virtapi)
if not sys.platform.startswith('linux'):
raise exception.InternalError(
_('The libvirt driver only works on Linux'))
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
libvirt_migrate.libvirt = libvirt
self._host = host.Host(self._uri(), read_only,
lifecycle_event_handler=self.emit_event,
conn_event_handler=self._handle_conn_event)
self._supported_perf_events = []
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver()
# NOTE(lyarwood): Volume drivers are loaded on-demand
self.volume_drivers: ty.Dict[str, volume.LibvirtBaseVolumeDriver] = {}
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in VALID_DISK_CACHEMODES:
LOG.warning('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.',
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = cinder.API()
self._image_api = glance.API()
# The default choice for the sysinfo_serial config option is "unique"
# which does not have a special function since the value is just the
# instance.uuid.
sysinfo_serial_funcs = {
'none': lambda: None,
'hardware': self._get_host_sysinfo_serial_hardware,
'os': self._get_host_sysinfo_serial_os,
'auto': self._get_host_sysinfo_serial_auto,
}
self._sysinfo_serial_func = sysinfo_serial_funcs.get(
CONF.libvirt.sysinfo_serial, lambda: None)
self.job_tracker = instancejobtracker.InstanceJobTracker()
self._remotefs = remotefs.RemoteFilesystem()
self._live_migration_flags = self._block_migration_flags = 0
self.active_migrations = {}
# Compute reserved hugepages from conf file at the very
# beginning to ensure any syntax error will be reported and
# avoid any re-calculation when computing resources.
self._reserved_hugepages = hardware.numa_get_reserved_huge_pages()
# Copy of the compute service ProviderTree object that is updated
# every time update_provider_tree() is called.
# NOTE(sbauza): We only want a read-only cache, this attribute is not
# intended to be updatable directly
self.provider_tree: provider_tree.ProviderTree = None
# driver traits will not change during the runtime of the agent
# so calcuate them once and save them
self._static_traits = None
# The CPU models in the configuration are case-insensitive, but the CPU
# model in the libvirt is case-sensitive, therefore create a mapping to
# map the lower case CPU model name to normal CPU model name.
self.cpu_models_mapping = {}
self.cpu_model_flag_mapping = {}
self._vpmems_by_name, self._vpmems_by_rc = self._discover_vpmems(
vpmem_conf=CONF.libvirt.pmem_namespaces)
# We default to not support vGPUs unless the configuration is set.
self.pgpu_type_mapping = collections.defaultdict(str)
self.supported_vgpu_types = self._get_supported_vgpu_types()
def _discover_vpmems(self, vpmem_conf=None):
"""Discover vpmems on host and configuration.
:param vpmem_conf: pmem namespaces configuration from CONF
:returns: a dict of vpmem keyed by name, and
a dict of vpmem list keyed by resource class
:raises: exception.InvalidConfiguration if Libvirt or QEMU version
does not meet requirement.
"""
if not vpmem_conf:
return {}, {}
# vpmem keyed by name {name: objects.LibvirtVPMEMDevice,...}
vpmems_by_name: ty.Dict[str, 'objects.LibvirtVPMEMDevice'] = {}
# vpmem list keyed by resource class
# {'RC_0': [objects.LibvirtVPMEMDevice, ...], 'RC_1': [...]}
vpmems_by_rc: ty.Dict[str, ty.List['objects.LibvirtVPMEMDevice']] = (
collections.defaultdict(list)
)
vpmems_host = self._get_vpmems_on_host()
for ns_conf in vpmem_conf:
try:
ns_label, ns_names = ns_conf.split(":", 1)
except ValueError:
reason = _("The configuration doesn't follow the format")
raise exception.PMEMNamespaceConfigInvalid(
reason=reason)
ns_names = ns_names.split("|")
for ns_name in ns_names:
if ns_name not in vpmems_host:
reason = _("The PMEM namespace %s isn't on host") % ns_name
raise exception.PMEMNamespaceConfigInvalid(
reason=reason)
if ns_name in vpmems_by_name:
reason = (_("Duplicated PMEM namespace %s configured") %
ns_name)
raise exception.PMEMNamespaceConfigInvalid(
reason=reason)
pmem_ns_updated = vpmems_host[ns_name]
pmem_ns_updated.label = ns_label
vpmems_by_name[ns_name] = pmem_ns_updated
rc = orc.normalize_name(
"PMEM_NAMESPACE_%s" % ns_label)
vpmems_by_rc[rc].append(pmem_ns_updated)
return vpmems_by_name, vpmems_by_rc
def _get_vpmems_on_host(self):
"""Get PMEM namespaces on host using ndctl utility."""
try:
output = nova.privsep.libvirt.get_pmem_namespaces()
except Exception as e:
reason = _("Get PMEM namespaces by ndctl utility, "
"please ensure ndctl is installed: %s") % e
raise exception.GetPMEMNamespacesFailed(reason=reason)
if not output:
return {}
namespaces = jsonutils.loads(output)
vpmems_host = {} # keyed by namespace name
for ns in namespaces:
# store namespace info parsed from ndctl utility return
if not ns.get('name'):
# The name is used to identify namespaces, it's optional
# config when creating namespace. If an namespace don't have
# name, it can not be used by Nova, we will skip it.
continue
vpmems_host[ns['name']] = objects.LibvirtVPMEMDevice(
name=ns['name'],
devpath= '/dev/' + ns['daxregion']['devices'][0]['chardev'],
size=ns['size'],
align=ns['daxregion']['align'])
return vpmems_host
@property
def disk_cachemode(self):
# It can be confusing to understand the QEMU cache mode
# behaviour, because each cache=$MODE is a convenient shorthand
# to toggle _three_ cache.* booleans. Consult the below table
# (quoting from the QEMU man page):
#
# | cache.writeback | cache.direct | cache.no-flush
# --------------------------------------------------------------
# writeback | on | off | off
# none | on | on | off
# writethrough | off | off | off
# directsync | off | on | off
# unsafe | on | off | on
#
# Where:
#
# - 'cache.writeback=off' means: QEMU adds an automatic fsync()
# after each write request.
#
# - 'cache.direct=on' means: Use Linux's O_DIRECT, i.e. bypass
# the kernel page cache. Caches in any other layer (disk
# cache, QEMU metadata caches, etc.) can still be present.
#
# - 'cache.no-flush=on' means: Ignore flush requests, i.e.
# never call fsync(), even if the guest explicitly requested
# it.
#
# Use cache mode "none" (cache.writeback=on, cache.direct=on,
# cache.no-flush=off) for consistent performance and
# migration correctness. Some filesystems don't support
# O_DIRECT, though. For those we fallback to the next
# reasonable option that is "writeback" (cache.writeback=on,
# cache.direct=off, cache.no-flush=off).
if self._disk_cachemode is None:
self._disk_cachemode = "none"
if not nova.privsep.utils.supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writeback"
return self._disk_cachemode
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
# Shareable disks like for a multi-attach volume need to have the
# driver cache disabled.
if getattr(conf, 'shareable', False):
conf.driver_cache = 'none'
else:
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
# NOTE(acewit): If the [libvirt]disk_cachemodes is set as
# `block=writeback` or `block=writethrough` or `block=unsafe`,
# whose correponding Linux's IO semantic is not O_DIRECT in
# file nova.conf, then it will result in an attachment failure
# because of the libvirt bug
# (https://bugzilla.redhat.com/show_bug.cgi?id=1086704)
if ((getattr(conf, 'driver_io', None) == "native") and
conf.driver_cache not in [None, 'none', 'directsync']):
conf.driver_io = "threads"
LOG.warning("The guest disk driver io mode has fallen back "
"from 'native' to 'threads' because the "
"disk cache mode is set as %(cachemode)s, which does "
"not use O_DIRECT. See the following bug report "
"for more details: https://launchpad.net/bugs/1841363",
{'cachemode': conf.driver_cache})
def _do_quality_warnings(self):
"""Warn about potential configuration issues.
This will log a warning message for things such as untested driver or
host arch configurations in order to indicate potential issues to
administrators.
"""
if CONF.libvirt.virt_type not in ('qemu', 'kvm'):
LOG.warning(
"Support for the '%(type)s' libvirt backend has been "
"deprecated and will be removed in a future release.",
{'type': CONF.libvirt.virt_type},
)
caps = self._host.get_capabilities()
hostarch = caps.host.cpu.arch
if hostarch not in (
fields.Architecture.I686, fields.Architecture.X86_64,
):
LOG.warning(
'The libvirt driver is not tested on %(arch)s by the '
'OpenStack project and thus its quality can not be ensured. '
'For more information, see: https://docs.openstack.org/'
'nova/latest/user/support-matrix.html',
{'arch': hostarch},
)
def _handle_conn_event(self, enabled, reason):
LOG.info("Connection event '%(enabled)d' reason '%(reason)s'",
{'enabled': enabled, 'reason': reason})
self._set_host_enabled(enabled, reason)
def init_host(self, host):
self._host.initialize()
self._check_cpu_set_configuration()
self._do_quality_warnings()
self._parse_migration_flags()
self._supported_perf_events = self._get_supported_perf_events()
self._check_file_backed_memory_support()
self._check_my_ip()
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
LOG.warning("Running libvirt-lxc without user namespaces is "
"dangerous. Containers spawned by Nova will be run "
"as the host's root user. It is highly suggested "
"that user namespaces be used in a public or "
"multi-tenant environment.")
# Stop libguestfs using KVM unless we're also configured
# to use this. This solves problem where people need to
# stop Nova use of KVM because nested-virt is broken
if CONF.libvirt.virt_type != "kvm":
guestfs.force_tcg()
if not self._host.has_min_version(MIN_LIBVIRT_VERSION):
raise exception.InternalError(
_('Nova requires libvirt version %s or greater.') %
libvirt_utils.version_to_string(MIN_LIBVIRT_VERSION))
if CONF.libvirt.virt_type in ("qemu", "kvm"):
if not self._host.has_min_version(hv_ver=MIN_QEMU_VERSION):
raise exception.InternalError(
_('Nova requires QEMU version %s or greater.') %
libvirt_utils.version_to_string(MIN_QEMU_VERSION))
if CONF.libvirt.virt_type == 'parallels':
if not self._host.has_min_version(hv_ver=MIN_VIRTUOZZO_VERSION):
raise exception.InternalError(
_('Nova requires Virtuozzo version %s or greater.') %
libvirt_utils.version_to_string(MIN_VIRTUOZZO_VERSION))
# Give the cloud admin a heads up if we are intending to
# change the MIN_LIBVIRT_VERSION in the next release.
if not self._host.has_min_version(NEXT_MIN_LIBVIRT_VERSION):
LOG.warning('Running Nova with a libvirt version less than '
'%(version)s is deprecated. The required minimum '
'version of libvirt will be raised to %(version)s '
'in the next release.',
{'version': libvirt_utils.version_to_string(
NEXT_MIN_LIBVIRT_VERSION)})
if (CONF.libvirt.virt_type in ("qemu", "kvm") and
not self._host.has_min_version(hv_ver=NEXT_MIN_QEMU_VERSION)):
LOG.warning('Running Nova with a QEMU version less than '
'%(version)s is deprecated. The required minimum '
'version of QEMU will be raised to %(version)s '
'in the next release.',
{'version': libvirt_utils.version_to_string(
NEXT_MIN_QEMU_VERSION)})
# Allowing both "tunnelling via libvirtd" (which will be
# deprecated once the MIN_{LIBVIRT,QEMU}_VERSION is sufficiently
# new enough) and "native TLS" options at the same time is
# nonsensical.
if (CONF.libvirt.live_migration_tunnelled and
CONF.libvirt.live_migration_with_native_tls):
msg = _("Setting both 'live_migration_tunnelled' and "
"'live_migration_with_native_tls' at the same "
"time is invalid. If you have the relevant "
"libvirt and QEMU versions, and TLS configured "
"in your environment, pick "
"'live_migration_with_native_tls'.")
raise exception.Invalid(msg)
# Some imagebackends are only able to import raw disk images,
# and will fail if given any other format. See the bug
# https://bugs.launchpad.net/nova/+bug/1816686 for more details.
if CONF.libvirt.images_type in ('rbd',):
if not CONF.force_raw_images:
msg = _("'[DEFAULT]/force_raw_images = False' is not "
"allowed with '[libvirt]/images_type = rbd'. "
"Please check the two configs and if you really "
"do want to use rbd as images_type, set "
"force_raw_images to True.")
raise exception.InvalidConfiguration(msg)
# TODO(sbauza): Remove this code once mediated devices are persisted
# across reboots.
self._recreate_assigned_mediated_devices()
self._check_cpu_compatibility()
self._check_vtpm_support()
self._register_instance_machine_type()
def _register_instance_machine_type(self):
"""Register the machine type of instances on this host
For each instance found on this host by InstanceList.get_by_host ensure
a machine type is registered within the system metadata of the instance
"""
context = nova_context.get_admin_context()
hostname = self._host.get_hostname()
for instance in objects.InstanceList.get_by_host(context, hostname):
# NOTE(lyarwood): Skip if hw_machine_type is set already in the
# image_meta of the instance. Note that this value comes from the
# system metadata of the instance where it is stored under the
# image_hw_machine_type key.
if instance.image_meta.properties.get('hw_machine_type'):
continue
# Fetch and record the machine type from the config
hw_machine_type = libvirt_utils.get_machine_type(
instance.image_meta)
# NOTE(lyarwood): As above this updates
# image_meta.properties.hw_machine_type within the instance and
# will be returned the next time libvirt_utils.get_machine_type is
# called for the instance image meta.
instance.system_metadata['image_hw_machine_type'] = hw_machine_type
instance.save()
LOG.debug("Instance machine_type updated to %s", hw_machine_type,
instance=instance)
def _prepare_cpu_flag(self, flag):
# NOTE(kchamart) This helper method will be used while computing
# guest CPU compatibility. It will take into account a
# comma-separated list of CPU flags from
# `[libvirt]cpu_model_extra_flags`. If the CPU flag starts
# with '+', it is enabled for the guest; if it starts with '-',
# it is disabled. If neither '+' nor '-' is specified, the CPU
# flag is enabled.
if flag.startswith('-'):
flag = flag.lstrip('-')
policy_value = 'disable'
else:
flag = flag.lstrip('+')
policy_value = 'require'
cpu_feature = vconfig.LibvirtConfigGuestCPUFeature(
flag, policy=policy_value)
return cpu_feature
def _check_cpu_compatibility(self):
mode = CONF.libvirt.cpu_mode
models = CONF.libvirt.cpu_models
if (CONF.libvirt.virt_type not in ("kvm", "qemu") and
mode not in (None, 'none')):
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt.virt_type
raise exception.Invalid(msg)
if mode != "custom":
if not models:
return
msg = _("The cpu_models option is not required when "
"cpu_mode!=custom")
raise exception.Invalid(msg)
if not models:
msg = _("The cpu_models option is required when cpu_mode=custom")
raise exception.Invalid(msg)
cpu = vconfig.LibvirtConfigGuestCPU()
for model in models:
cpu.model = self._get_cpu_model_mapping(model)
try:
self._compare_cpu(cpu, self._get_cpu_info(), None)
except exception.InvalidCPUInfo as e:
msg = (_("Configured CPU model: %(model)s is not "
"compatible with host CPU. Please correct your "
"config and try again. %(e)s") % {
'model': model, 'e': e})
raise exception.InvalidCPUInfo(msg)
# Use guest CPU model to check the compatibility between guest CPU and
# configured extra_flags
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.model = self._host.get_capabilities().host.cpu.model
for flag in set(x.lower() for x in CONF.libvirt.cpu_model_extra_flags):
cpu_feature = self._prepare_cpu_flag(flag)
cpu.add_feature(cpu_feature)
try:
self._compare_cpu(cpu, self._get_cpu_info(), None)
except exception.InvalidCPUInfo as e:
msg = (_("Configured extra flag: %(flag)s it not correct, or "
"the host CPU does not support this flag. Please "
"correct the config and try again. %(e)s") % {
'flag': flag, 'e': e})
raise exception.InvalidCPUInfo(msg)
def _check_vtpm_support(self) -> None:
# TODO(efried): A key manager must be configured to create/retrieve
# secrets. Is there a way to check that one is set up correctly?
# CONF.key_manager.backend is optional :(
if not CONF.libvirt.swtpm_enabled:
return
if CONF.libvirt.virt_type not in ('qemu', 'kvm'):
msg = _(
"vTPM support requires '[libvirt] virt_type' of 'qemu' or "
"'kvm'; found '%s'.")
raise exception.InvalidConfiguration(msg % CONF.libvirt.virt_type)
# These executables need to be installed for libvirt to make use of
# emulated TPM.
# NOTE(stephenfin): This checks using the PATH of the user running
# nova-compute rather than the libvirtd service, meaning it's an
# imperfect check but the best we can do
if not any(shutil.which(cmd) for cmd in ('swtpm_setup', 'swtpm')):
msg = _(
"vTPM support is configured but the 'swtpm' and "
"'swtpm_setup' binaries could not be found on PATH.")
raise exception.InvalidConfiguration(msg)
# The user and group must be valid on this host for cold migration and
# resize to function.
try:
pwd.getpwnam(CONF.libvirt.swtpm_user)
except KeyError:
msg = _(
"The user configured in '[libvirt] swtpm_user' does not exist "
"on this host; expected '%s'.")
raise exception.InvalidConfiguration(msg % CONF.libvirt.swtpm_user)
try:
grp.getgrnam(CONF.libvirt.swtpm_group)
except KeyError:
msg = _(
"The group configured in '[libvirt] swtpm_group' does not "
"exist on this host; expected '%s'.")
raise exception.InvalidConfiguration(
msg % CONF.libvirt.swtpm_group)
LOG.debug('Enabling emulated TPM support')
@staticmethod
def _is_existing_mdev(uuid):
# FIXME(sbauza): Some kernel can have a uevent race meaning that the
# libvirt daemon won't know when a mediated device is created unless
# you restart that daemon. Until all kernels we support are not having
# that possible race, check the sysfs directly instead of asking the
# libvirt API.
# See https://bugzilla.redhat.com/show_bug.cgi?id=1376907 for ref.
return os.path.exists('/sys/bus/mdev/devices/{0}'.format(uuid))
def _recreate_assigned_mediated_devices(self):
"""Recreate assigned mdevs that could have disappeared if we reboot
the host.
"""
# NOTE(sbauza): This method just calls sysfs to recreate mediated
# devices by looking up existing guest XMLs and doesn't use
# the Placement API so it works with or without a vGPU reshape.
mdevs = self._get_all_assigned_mediated_devices()
for (mdev_uuid, instance_uuid) in mdevs.items():
if not self._is_existing_mdev(mdev_uuid):
dev_name = libvirt_utils.mdev_uuid2name(mdev_uuid)
dev_info = self._get_mediated_device_information(dev_name)
parent = dev_info['parent']
parent_type = self._get_vgpu_type_per_pgpu(parent)
if dev_info['type'] != parent_type:
# NOTE(sbauza): The mdev was created by using a different
# vGPU type. We can't recreate the mdev until the operator
# modifies the configuration.
parent = "{}:{}:{}.{}".format(*parent[4:].split('_'))
msg = ("The instance UUID %(inst)s uses a VGPU that "
"its parent pGPU %(parent)s no longer "
"supports as the instance vGPU type %(type)s "
"is not accepted for the pGPU. Please correct "
"the configuration accordingly." %
{'inst': instance_uuid,
'parent': parent,
'type': dev_info['type']})
raise exception.InvalidLibvirtGPUConfig(reason=msg)
self._create_new_mediated_device(parent, uuid=mdev_uuid)
def _check_file_backed_memory_support(self):
if not CONF.libvirt.file_backed_memory:
return
# file_backed_memory is only compatible with qemu/kvm virts
if CONF.libvirt.virt_type not in ("qemu", "kvm"):
raise exception.InternalError(
_('Running Nova with file_backed_memory and virt_type '
'%(type)s is not supported. file_backed_memory is only '
'supported with qemu and kvm types.') %
{'type': CONF.libvirt.virt_type})
# file-backed memory doesn't work with memory overcommit.
# Block service startup if file-backed memory is enabled and
# ram_allocation_ratio is not 1.0
if CONF.ram_allocation_ratio != 1.0:
raise exception.InternalError(
'Running Nova with file_backed_memory requires '
'ram_allocation_ratio configured to 1.0')
if CONF.reserved_host_memory_mb:
# this is a hard failure as placement won't allow total < reserved
if CONF.reserved_host_memory_mb >= CONF.libvirt.file_backed_memory:
msg = _(
"'[libvirt] file_backed_memory', which represents total "
"memory reported to placement, must be greater than "