From bae6152d4269bd0b3f5fd8ea4f0abe483a6d3061 Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Mon, 13 Nov 2023 15:21:31 -0800 Subject: [PATCH 1/6] [ironic] Partition & use cache for list_instance* list_instances and list_instance_uuids, as written in the Ironic driver, do not currently respect conductor_group paritioning. Given a nova compute is intended to limit it's scope of work to the conductor group it is configured to work with; this is a bug. Additionally, this should be a significant performance boost for a couple of reasons; firstly, instead of calling the Ironic API and getting all nodes, instead of the subset (when using conductor group), we're now properly getting the subset of nodes -- this is the optimized path in the Ironic DB and API code. Secondly, we're now using the driver's node cache to respond to these requests. Since list_instances and list_instance_uuids is used by periodic tasks, these operating with data that may be slightly stale should have minimal impact compared to the performance benefits. Closes-bug: #2043036 Change-Id: If31158e3269e5e06848c29294fdaa147beedb5a5 (cherry picked from commit fa3cf7d50cba921ea67eb161e6a199067ea62deb) (cherry picked from commit 555d7d0ad02d31476e2d751aa52be4087878f1a2) (cherry picked from commit 3226318b534847a5c553f2d94f9df19a631fd617) --- nova/tests/unit/virt/ironic/test_driver.py | 65 ++++++++----------- nova/virt/ironic/driver.py | 33 +++++++--- ...espect-partition-key-339ff653eaa00753.yaml | 5 ++ 3 files changed, 54 insertions(+), 49 deletions(-) create mode 100644 releasenotes/notes/ironic-list-instance-respect-partition-key-339ff653eaa00753.yaml diff --git a/nova/tests/unit/virt/ironic/test_driver.py b/nova/tests/unit/virt/ironic/test_driver.py index 52aa37ac134..ea40d73f599 100644 --- a/nova/tests/unit/virt/ironic/test_driver.py +++ b/nova/tests/unit/virt/ironic/test_driver.py @@ -582,71 +582,58 @@ def test__get_node_list_fail(self): @mock.patch.object(objects.Instance, 'get_by_uuid') def test_list_instances(self, mock_inst_by_uuid): - nodes = [] + nodes = {} instances = [] for i in range(2): uuid = uuidutils.generate_uuid() + node_uuid = uuidutils.generate_uuid() instances.append(fake_instance.fake_instance_obj(self.ctx, id=i, uuid=uuid)) - nodes.append(ironic_utils.get_test_node(instance_id=uuid, - fields=['instance_id'])) + nodes[node_uuid] = ironic_utils.get_test_node( + id=node_uuid, instance_id=uuid, fields=('instance_id',)) mock_inst_by_uuid.side_effect = instances - self.mock_conn.nodes.return_value = iter(nodes) + self.driver.node_cache = nodes response = self.driver.list_instances() - self.mock_conn.nodes.assert_called_with(associated=True, - fields=['instance_uuid']) expected_calls = [mock.call(mock.ANY, instances[0].uuid), mock.call(mock.ANY, instances[1].uuid)] mock_inst_by_uuid.assert_has_calls(expected_calls) self.assertEqual(['instance-00000000', 'instance-00000001'], sorted(response)) - # NOTE(dustinc) This test ensures we use instance_uuid not instance_id in - # 'fields' when calling ironic. + @mock.patch.object(ironic_driver.IronicDriver, '_refresh_cache') @mock.patch.object(objects.Instance, 'get_by_uuid') - def test_list_instances_uses_instance_uuid(self, mock_inst_by_uuid): - self.driver.list_instances() - - self.mock_conn.nodes.assert_called_with(associated=True, - fields=['instance_uuid']) - - @mock.patch.object(objects.Instance, 'get_by_uuid') - def test_list_instances_fail(self, mock_inst_by_uuid): - self.mock_conn.nodes.side_effect = exception.NovaException + def test_list_instances_fail(self, mock_inst_by_uuid, mock_cache): + mock_cache.side_effect = exception.VirtDriverNotReady self.assertRaises(exception.VirtDriverNotReady, self.driver.list_instances) - self.mock_conn.nodes.assert_called_with(associated=True, - fields=['instance_uuid']) self.assertFalse(mock_inst_by_uuid.called) def test_list_instance_uuids(self): num_nodes = 2 - nodes = [] + nodes = {} for n in range(num_nodes): - nodes.append(ironic_utils.get_test_node( - instance_id=uuidutils.generate_uuid(), - fields=['instance_id'])) - self.mock_conn.nodes.return_value = iter(nodes) + node_uuid = uuidutils.generate_uuid() + instance_uuid = uuidutils.generate_uuid() + nodes[instance_uuid] = ironic_utils.get_test_node( + id=node_uuid, + instance_id=instance_uuid, + fields=('instance_id',)) + self.driver.node_cache = nodes + instance_uuids = self.driver.list_instance_uuids() + expected = nodes.keys() + + self.assertEqual(sorted(expected), sorted(instance_uuids)) + + @mock.patch.object(ironic_driver.IronicDriver, '_refresh_cache') + def test_list_instance_uuids_fail(self, mock_cache): + mock_cache.side_effect = exception.VirtDriverNotReady - uuids = self.driver.list_instance_uuids() - - self.mock_conn.nodes.assert_called_with(associated=True, - fields=['instance_uuid']) - expected = [n.instance_id for n in nodes] - self.assertEqual(sorted(expected), sorted(uuids)) - - # NOTE(dustinc) This test ensures we use instance_uuid not instance_id in - # 'fields' when calling ironic. - @mock.patch.object(objects.Instance, 'get_by_uuid') - def test_list_instance_uuids_uses_instance_uuid(self, mock_inst_by_uuid): - self.driver.list_instance_uuids() - - self.mock_conn.nodes.assert_called_with(associated=True, - fields=['instance_uuid']) + self.assertRaises(exception.VirtDriverNotReady, + self.driver.list_instance_uuids) @mock.patch.object(objects.InstanceList, 'get_uuids_by_host') @mock.patch.object(objects.ServiceList, 'get_all_computes_by_hv_type') diff --git a/nova/virt/ironic/driver.py b/nova/virt/ironic/driver.py index 117294ff897..9d49b96b0e1 100644 --- a/nova/virt/ironic/driver.py +++ b/nova/virt/ironic/driver.py @@ -647,13 +647,21 @@ def list_instances(self): :raises: VirtDriverNotReady """ - # NOTE(dustinc): The SDK returns an object with instance_id, - # but the Ironic API expects instance_uuid in query. + # NOTE(JayF): As of this writing, November 2023, this is only called + # one place; in compute/manager.py, and only if + # list_instance_uuids is not implemented. This means that + # this is effectively dead code in the Ironic driver. + if not self.node_cache: + # Empty cache, try to populate it. If we cannot populate it, this + # is OK. This information is only used to cleanup deleted nodes; + # if Ironic has no deleted nodes; we're good. + self._refresh_cache() + context = nova_context.get_admin_context() - return [objects.Instance.get_by_uuid(context, i.instance_id).name - for i in self._get_node_list(return_generator=True, - associated=True, - fields=['instance_uuid'])] + + return [objects.Instance.get_by_uuid(context, node.instance_id).name + for node in self.node_cache.values() + if node.instance_id is not None] def list_instance_uuids(self): """Return the IDs of all the instances provisioned. @@ -662,10 +670,15 @@ def list_instance_uuids(self): :raises: VirtDriverNotReady """ - # NOTE(dustinc): The SDK returns an object with instance_id, - # but the Ironic API expects instance_uuid in query. - return [node.instance_id for node in self._get_node_list( - return_generator=True, associated=True, fields=['instance_uuid'])] + if not self.node_cache: + # Empty cache, try to populate it. If we cannot populate it, this + # is OK. This information is only used to cleanup deleted nodes; + # if Ironic has no deleted nodes; we're good. + self._refresh_cache() + + return [node.instance_id + for node in self.node_cache.values() + if node.instance_id is not None] def node_is_available(self, nodename): """Confirms a Nova hypervisor node exists in the Ironic inventory. diff --git a/releasenotes/notes/ironic-list-instance-respect-partition-key-339ff653eaa00753.yaml b/releasenotes/notes/ironic-list-instance-respect-partition-key-339ff653eaa00753.yaml new file mode 100644 index 00000000000..96f2a12b8ef --- /dev/null +++ b/releasenotes/notes/ironic-list-instance-respect-partition-key-339ff653eaa00753.yaml @@ -0,0 +1,5 @@ +fixes: + - Ironic virt driver now uses the node cache and respects partition keys, + such as conductor group, for list_instances and list_instance_uuids calls. + This fix will improve performance of the periodic queries which use these + driver methods and reduce API and DB load on the backing Ironic service. From da352edceb74dbd715268f94516503042b48cc90 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 17 Apr 2024 07:06:13 -0700 Subject: [PATCH 2/6] Check images with format_inspector for safety It has been asserted that we should not be calling qemu-img info on untrusted files. That means we need to know if they have a backing_file, data_file or other unsafe configuration *before* we use qemu-img to probe or convert them. This grafts glance's format_inspector module into nova/images so we can use it to check the file early for safety. The expectation is that this will be moved to oslo.utils (or something) later and thus we will just delete the file from nova and change our import when that happens. NOTE: This includes whitespace changes from the glance version of format_inspector.py because of autopep8 demands. Change-Id: Iaefbe41b4c4bf0cf95d8f621653fdf65062aaa59 Closes-Bug: #2059809 (cherry picked from commit 9cdce715945619fc851ab3f43c97fab4bae4e35a) (cherry picked from commit f07fa55fd86726eeafcd4c0c687bc49dd4df9f4c) (cherry picked from commit 0acf5ee7b5dfb6ff0f9a9745f5ad2a0ed2bf65bf) (cherry picked from commit 67e5376dd64407f5aaf1ea5f8c896e356064a2c9) --- nova/conf/workarounds.py | 10 + nova/image/format_inspector.py | 889 +++++++++++++++++++++ nova/tests/unit/virt/libvirt/test_utils.py | 48 +- nova/tests/unit/virt/test_images.py | 136 +++- nova/virt/images.py | 47 +- 5 files changed, 1121 insertions(+), 9 deletions(-) create mode 100644 nova/image/format_inspector.py diff --git a/nova/conf/workarounds.py b/nova/conf/workarounds.py index e485ae673a5..924e799b620 100644 --- a/nova/conf/workarounds.py +++ b/nova/conf/workarounds.py @@ -438,6 +438,16 @@ Howerver, if you don't use automatic cleaning, it can cause an extra delay before and Ironic node is available for building a new Nova instance. +"""), + cfg.BoolOpt( + 'disable_deep_image_inspection', + default=False, + help=""" +This disables the additional deep image inspection that the compute node does +when downloading from glance. This includes backing-file, data-file, and +known-features detection *before* passing the image to qemu-img. Generally, +this inspection should be enabled for maximum safety, but this workaround +option allows disabling it if there is a compatibility concern. """), ] diff --git a/nova/image/format_inspector.py b/nova/image/format_inspector.py new file mode 100644 index 00000000000..268c98b99cb --- /dev/null +++ b/nova/image/format_inspector.py @@ -0,0 +1,889 @@ +# Copyright 2020 Red Hat, Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +This is a python implementation of virtual disk format inspection routines +gathered from various public specification documents, as well as qemu disk +driver code. It attempts to store and parse the minimum amount of data +required, and in a streaming-friendly manner to collect metadata about +complex-format images. +""" + +import struct + +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +def chunked_reader(fileobj, chunk_size=512): + while True: + chunk = fileobj.read(chunk_size) + if not chunk: + break + yield chunk + + +class CaptureRegion(object): + """Represents a region of a file we want to capture. + + A region of a file we want to capture requires a byte offset into + the file and a length. This is expected to be used by a data + processing loop, calling capture() with the most recently-read + chunk. This class handles the task of grabbing the desired region + of data across potentially multiple fractional and unaligned reads. + + :param offset: Byte offset into the file starting the region + :param length: The length of the region + """ + + def __init__(self, offset, length): + self.offset = offset + self.length = length + self.data = b'' + + @property + def complete(self): + """Returns True when we have captured the desired data.""" + return self.length == len(self.data) + + def capture(self, chunk, current_position): + """Process a chunk of data. + + This should be called for each chunk in the read loop, at least + until complete returns True. + + :param chunk: A chunk of bytes in the file + :param current_position: The position of the file processed by the + read loop so far. Note that this will be + the position in the file *after* the chunk + being presented. + """ + read_start = current_position - len(chunk) + if (read_start <= self.offset <= current_position or + self.offset <= read_start <= (self.offset + self.length)): + if read_start < self.offset: + lead_gap = self.offset - read_start + else: + lead_gap = 0 + self.data += chunk[lead_gap:] + self.data = self.data[:self.length] + + +class ImageFormatError(Exception): + """An unrecoverable image format error that aborts the process.""" + pass + + +class TraceDisabled(object): + """A logger-like thing that swallows tracing when we do not want it.""" + + def debug(self, *a, **k): + pass + + info = debug + warning = debug + error = debug + + +class FileInspector(object): + """A stream-based disk image inspector. + + This base class works on raw images and is subclassed for more + complex types. It is to be presented with the file to be examined + one chunk at a time, during read processing and will only store + as much data as necessary to determine required attributes of + the file. + """ + + def __init__(self, tracing=False): + self._total_count = 0 + + # NOTE(danms): The logging in here is extremely verbose for a reason, + # but should never really be enabled at that level at runtime. To + # retain all that work and assist in future debug, we have a separate + # debug flag that can be passed from a manual tool to turn it on. + if tracing: + self._log = logging.getLogger(str(self)) + else: + self._log = TraceDisabled() + self._capture_regions = {} + + def _capture(self, chunk, only=None): + for name, region in self._capture_regions.items(): + if only and name not in only: + continue + if not region.complete: + region.capture(chunk, self._total_count) + + def eat_chunk(self, chunk): + """Call this to present chunks of the file to the inspector.""" + pre_regions = set(self._capture_regions.keys()) + + # Increment our position-in-file counter + self._total_count += len(chunk) + + # Run through the regions we know of to see if they want this + # data + self._capture(chunk) + + # Let the format do some post-read processing of the stream + self.post_process() + + # Check to see if the post-read processing added new regions + # which may require the current chunk. + new_regions = set(self._capture_regions.keys()) - pre_regions + if new_regions: + self._capture(chunk, only=new_regions) + + def post_process(self): + """Post-read hook to process what has been read so far. + + This will be called after each chunk is read and potentially captured + by the defined regions. If any regions are defined by this call, + those regions will be presented with the current chunk in case it + is within one of the new regions. + """ + pass + + def region(self, name): + """Get a CaptureRegion by name.""" + return self._capture_regions[name] + + def new_region(self, name, region): + """Add a new CaptureRegion by name.""" + if self.has_region(name): + # This is a bug, we tried to add the same region twice + raise ImageFormatError('Inspector re-added region %s' % name) + self._capture_regions[name] = region + + def has_region(self, name): + """Returns True if named region has been defined.""" + return name in self._capture_regions + + @property + def format_match(self): + """Returns True if the file appears to be the expected format.""" + return True + + @property + def virtual_size(self): + """Returns the virtual size of the disk image, or zero if unknown.""" + return self._total_count + + @property + def actual_size(self): + """Returns the total size of the file, usually smaller than + virtual_size. NOTE: this will only be accurate if the entire + file is read and processed. + """ + return self._total_count + + @property + def complete(self): + """Returns True if we have all the information needed.""" + return all(r.complete for r in self._capture_regions.values()) + + def __str__(self): + """The string name of this file format.""" + return 'raw' + + @property + def context_info(self): + """Return info on amount of data held in memory for auditing. + + This is a dict of region:sizeinbytes items that the inspector + uses to examine the file. + """ + return {name: len(region.data) for name, region in + self._capture_regions.items()} + + @classmethod + def from_file(cls, filename): + """Read as much of a file as necessary to complete inspection. + + NOTE: Because we only read as much of the file as necessary, the + actual_size property will not reflect the size of the file, but the + amount of data we read before we satisfied the inspector. + + Raises ImageFormatError if we cannot parse the file. + """ + inspector = cls() + with open(filename, 'rb') as f: + for chunk in chunked_reader(f): + inspector.eat_chunk(chunk) + if inspector.complete: + # No need to eat any more data + break + if not inspector.complete or not inspector.format_match: + raise ImageFormatError('File is not in requested format') + return inspector + + def safety_check(self): + """Perform some checks to determine if this file is safe. + + Returns True if safe, False otherwise. It may raise ImageFormatError + if safety cannot be guaranteed because of parsing or other errors. + """ + return True + + +# The qcow2 format consists of a big-endian 72-byte header, of which +# only a small portion has information we care about: +# +# Dec Hex Name +# 0 0x00 Magic 4-bytes 'QFI\xfb' +# 4 0x04 Version (uint32_t, should always be 2 for modern files) +# . . . +# 8 0x08 Backing file offset (uint64_t) +# 24 0x18 Size in bytes (unint64_t) +# . . . +# 72 0x48 Incompatible features bitfield (6 bytes) +# +# https://gitlab.com/qemu-project/qemu/-/blob/master/docs/interop/qcow2.txt +class QcowInspector(FileInspector): + """QEMU QCOW2 Format + + This should only require about 32 bytes of the beginning of the file + to determine the virtual size, and 104 bytes to perform the safety check. + """ + + BF_OFFSET = 0x08 + BF_OFFSET_LEN = 8 + I_FEATURES = 0x48 + I_FEATURES_LEN = 8 + I_FEATURES_DATAFILE_BIT = 3 + I_FEATURES_MAX_BIT = 4 + + def __init__(self, *a, **k): + super(QcowInspector, self).__init__(*a, **k) + self.new_region('header', CaptureRegion(0, 512)) + + def _qcow_header_data(self): + magic, version, bf_offset, bf_sz, cluster_bits, size = ( + struct.unpack('>4sIQIIQ', self.region('header').data[:32])) + return magic, size + + @property + def has_header(self): + return self.region('header').complete + + @property + def virtual_size(self): + if not self.region('header').complete: + return 0 + if not self.format_match: + return 0 + magic, size = self._qcow_header_data() + return size + + @property + def format_match(self): + if not self.region('header').complete: + return False + magic, size = self._qcow_header_data() + return magic == b'QFI\xFB' + + @property + def has_backing_file(self): + if not self.region('header').complete: + return None + if not self.format_match: + return False + bf_offset_bytes = self.region('header').data[ + self.BF_OFFSET:self.BF_OFFSET + self.BF_OFFSET_LEN] + # nonzero means "has a backing file" + bf_offset, = struct.unpack('>Q', bf_offset_bytes) + return bf_offset != 0 + + @property + def has_unknown_features(self): + if not self.region('header').complete: + return None + if not self.format_match: + return False + i_features = self.region('header').data[ + self.I_FEATURES:self.I_FEATURES + self.I_FEATURES_LEN] + + # This is the maximum byte number we should expect any bits to be set + max_byte = self.I_FEATURES_MAX_BIT // 8 + + # The flag bytes are in big-endian ordering, so if we process + # them in index-order, they're reversed + for i, byte_num in enumerate(reversed(range(self.I_FEATURES_LEN))): + if byte_num == max_byte: + # If we're in the max-allowed byte, allow any bits less than + # the maximum-known feature flag bit to be set + allow_mask = ((1 << self.I_FEATURES_MAX_BIT) - 1) + elif byte_num > max_byte: + # If we're above the byte with the maximum known feature flag + # bit, then we expect all zeroes + allow_mask = 0x0 + else: + # Any earlier-than-the-maximum byte can have any of the flag + # bits set + allow_mask = 0xFF + + if i_features[i] & ~allow_mask: + LOG.warning('Found unknown feature bit in byte %i: %s/%s', + byte_num, bin(i_features[byte_num] & ~allow_mask), + bin(allow_mask)) + return True + + return False + + @property + def has_data_file(self): + if not self.region('header').complete: + return None + if not self.format_match: + return False + i_features = self.region('header').data[ + self.I_FEATURES:self.I_FEATURES + self.I_FEATURES_LEN] + + # First byte of bitfield, which is i_features[7] + byte = self.I_FEATURES_LEN - 1 - self.I_FEATURES_DATAFILE_BIT // 8 + # Third bit of bitfield, which is 0x04 + bit = 1 << (self.I_FEATURES_DATAFILE_BIT - 1 % 8) + return bool(i_features[byte] & bit) + + def __str__(self): + return 'qcow2' + + def safety_check(self): + return (not self.has_backing_file and + not self.has_data_file and + not self.has_unknown_features) + + +# The VHD (or VPC as QEMU calls it) format consists of a big-endian +# 512-byte "footer" at the beginning of the file with various +# information, most of which does not matter to us: +# +# Dec Hex Name +# 0 0x00 Magic string (8-bytes, always 'conectix') +# 40 0x28 Disk size (uint64_t) +# +# https://github.com/qemu/qemu/blob/master/block/vpc.c +class VHDInspector(FileInspector): + """Connectix/MS VPC VHD Format + + This should only require about 512 bytes of the beginning of the file + to determine the virtual size. + """ + + def __init__(self, *a, **k): + super(VHDInspector, self).__init__(*a, **k) + self.new_region('header', CaptureRegion(0, 512)) + + @property + def format_match(self): + return self.region('header').data.startswith(b'conectix') + + @property + def virtual_size(self): + if not self.region('header').complete: + return 0 + + if not self.format_match: + return 0 + + return struct.unpack('>Q', self.region('header').data[40:48])[0] + + def __str__(self): + return 'vhd' + + +# The VHDX format consists of a complex dynamic little-endian +# structure with multiple regions of metadata and data, linked by +# offsets with in the file (and within regions), identified by MSFT +# GUID strings. The header is a 320KiB structure, only a few pieces of +# which we actually need to capture and interpret: +# +# Dec Hex Name +# 0 0x00000 Identity (Technically 9-bytes, padded to 64KiB, the first +# 8 bytes of which are 'vhdxfile') +# 196608 0x30000 The Region table (64KiB of a 32-byte header, followed +# by up to 2047 36-byte region table entry structures) +# +# The region table header includes two items we need to read and parse, +# which are: +# +# 196608 0x30000 4-byte signature ('regi') +# 196616 0x30008 Entry count (uint32-t) +# +# The region table entries follow the region table header immediately +# and are identified by a 16-byte GUID, and provide an offset of the +# start of that region. We care about the "metadata region", identified +# by the METAREGION class variable. The region table entry is (offsets +# from the beginning of the entry, since it could be in multiple places): +# +# 0 0x00000 16-byte MSFT GUID +# 16 0x00010 Offset of the actual metadata region (uint64_t) +# +# When we find the METAREGION table entry, we need to grab that offset +# and start examining the region structure at that point. That +# consists of a metadata table of structures, which point to places in +# the data in an unstructured space that follows. The header is +# (offsets relative to the region start): +# +# 0 0x00000 8-byte signature ('metadata') +# . . . +# 16 0x00010 2-byte entry count (up to 2047 entries max) +# +# This header is followed by the specified number of metadata entry +# structures, identified by GUID: +# +# 0 0x00000 16-byte MSFT GUID +# 16 0x00010 4-byte offset (uint32_t, relative to the beginning of +# the metadata region) +# +# We need to find the "Virtual Disk Size" metadata item, identified by +# the GUID in the VIRTUAL_DISK_SIZE class variable, grab the offset, +# add it to the offset of the metadata region, and examine that 8-byte +# chunk of data that follows. +# +# The "Virtual Disk Size" is a naked uint64_t which contains the size +# of the virtual disk, and is our ultimate target here. +# +# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-vhdx/83e061f8-f6e2-4de1-91bd-5d518a43d477 +class VHDXInspector(FileInspector): + """MS VHDX Format + + This requires some complex parsing of the stream. The first 256KiB + of the image is stored to get the header and region information, + and then we capture the first metadata region to read those + records, find the location of the virtual size data and parse + it. This needs to store the metadata table entries up until the + VDS record, which may consist of up to 2047 32-byte entries at + max. Finally, it must store a chunk of data at the offset of the + actual VDS uint64. + + """ + METAREGION = '8B7CA206-4790-4B9A-B8FE-575F050F886E' + VIRTUAL_DISK_SIZE = '2FA54224-CD1B-4876-B211-5DBED83BF4B8' + VHDX_METADATA_TABLE_MAX_SIZE = 32 * 2048 # From qemu + + def __init__(self, *a, **k): + super(VHDXInspector, self).__init__(*a, **k) + self.new_region('ident', CaptureRegion(0, 32)) + self.new_region('header', CaptureRegion(192 * 1024, 64 * 1024)) + + def post_process(self): + # After reading a chunk, we may have the following conditions: + # + # 1. We may have just completed the header region, and if so, + # we need to immediately read and calculate the location of + # the metadata region, as it may be starting in the same + # read we just did. + # 2. We may have just completed the metadata region, and if so, + # we need to immediately calculate the location of the + # "virtual disk size" record, as it may be starting in the + # same read we just did. + if self.region('header').complete and not self.has_region('metadata'): + region = self._find_meta_region() + if region: + self.new_region('metadata', region) + elif self.has_region('metadata') and not self.has_region('vds'): + region = self._find_meta_entry(self.VIRTUAL_DISK_SIZE) + if region: + self.new_region('vds', region) + + @property + def format_match(self): + return self.region('ident').data.startswith(b'vhdxfile') + + @staticmethod + def _guid(buf): + """Format a MSFT GUID from the 16-byte input buffer.""" + guid_format = '= 2048: + raise ImageFormatError('Region count is %i (limit 2047)' % count) + + # Process the regions until we find the metadata one; grab the + # offset and return + self._log.debug('Region entry first is %x', region_entry_first) + self._log.debug('Region entries %i', count) + meta_offset = 0 + for i in range(0, count): + entry_start = region_entry_first + (i * 32) + entry_end = entry_start + 32 + entry = self.region('header').data[entry_start:entry_end] + self._log.debug('Entry offset is %x', entry_start) + + # GUID is the first 16 bytes + guid = self._guid(entry[:16]) + if guid == self.METAREGION: + # This entry is the metadata region entry + meta_offset, meta_len, meta_req = struct.unpack( + '= 2048: + raise ImageFormatError( + 'Metadata item count is %i (limit 2047)' % count) + + for i in range(0, count): + entry_offset = 32 + (i * 32) + guid = self._guid(meta_buffer[entry_offset:entry_offset + 16]) + if guid == desired_guid: + # Found the item we are looking for by id. + # Stop our region from capturing + item_offset, item_length, _reserved = struct.unpack( + ' Date: Mon, 24 Jun 2024 09:09:36 -0700 Subject: [PATCH 3/6] Additional qemu safety checking on base images There is an additional way we can be fooled into using a qcow2 file with a data-file, which is uploading it as raw to glance and then booting an instance from it. Because when we go to create the ephemeral disk from a cached base image, we've lost the information about the original source's format, we probe the image's file type without a strict format specified. If a qcow2 file is listed in glance as a raw, we won't notice it until it is too late. This brings over another piece of code (proposed against) glance's format inspector which provides a safe format detection routine. This patch uses that to detect the format of and run a safety check on the base image each time we go to use it to create an ephemeral disk image from it. This also detects QED files and always marks them as unsafe as we do not support that format at all. Since we could be fooled into downloading one and passing it to qemu-img if we don't recognize it, we need to detect and reject it as unsafe. Conflicts: nova/tests/unit/virt/libvirt/test_utils.py nova/virt/libvirt/utils.py NOTE(elod.illes): conflicts are due to encryption support adding patch I5d6d2a7b03b5ace0826af80c4004de852579ff12 was introduced in zed. Change-Id: I4881c8cbceb30c1ff2d2b859c554e0d02043f1f5 (cherry picked from commit b1b88bf001757546fbbea959f4b73cb344407dfb) (cherry picked from commit 8a0d5f2afaf40c4554419a0b2488ce092eda7a1a) (cherry picked from commit 0269234dc42fe2c320dc4696123cf5132642f9b7) (cherry picked from commit 9e10ac25490e7b5353cb01e768d22eb5a1f92825) --- nova/image/format_inspector.py | 70 ++++++++++++++++--- nova/tests/unit/virt/libvirt/test_driver.py | 7 +- .../unit/virt/libvirt/test_imagebackend.py | 45 ++++++++++-- nova/tests/unit/virt/libvirt/test_utils.py | 39 ++++++++++- nova/virt/libvirt/imagebackend.py | 15 ++++ nova/virt/libvirt/utils.py | 28 ++++++++ 6 files changed, 185 insertions(+), 19 deletions(-) diff --git a/nova/image/format_inspector.py b/nova/image/format_inspector.py index 268c98b99cb..8e57d7ed2c4 100644 --- a/nova/image/format_inspector.py +++ b/nova/image/format_inspector.py @@ -368,6 +368,23 @@ def safety_check(self): not self.has_unknown_features) +class QEDInspector(FileInspector): + def __init__(self, tracing=False): + super().__init__(tracing) + self.new_region('header', CaptureRegion(0, 512)) + + @property + def format_match(self): + if not self.region('header').complete: + return False + return self.region('header').data.startswith(b'QED\x00') + + def safety_check(self): + # QED format is not supported by anyone, but we want to detect it + # and mark it as just always unsafe. + return False + + # The VHD (or VPC as QEMU calls it) format consists of a big-endian # 512-byte "footer" at the beginning of the file with various # information, most of which does not matter to us: @@ -871,19 +888,52 @@ def close(self): self._source.close() +ALL_FORMATS = { + 'raw': FileInspector, + 'qcow2': QcowInspector, + 'vhd': VHDInspector, + 'vhdx': VHDXInspector, + 'vmdk': VMDKInspector, + 'vdi': VDIInspector, + 'qed': QEDInspector, +} + + def get_inspector(format_name): """Returns a FormatInspector class based on the given name. :param format_name: The name of the disk_format (raw, qcow2, etc). :returns: A FormatInspector or None if unsupported. """ - formats = { - 'raw': FileInspector, - 'qcow2': QcowInspector, - 'vhd': VHDInspector, - 'vhdx': VHDXInspector, - 'vmdk': VMDKInspector, - 'vdi': VDIInspector, - } - - return formats.get(format_name) + + return ALL_FORMATS.get(format_name) + + +def detect_file_format(filename): + """Attempts to detect the format of a file. + + This runs through a file one time, running all the known inspectors in + parallel. It stops reading the file once one of them matches or all of + them are sure they don't match. + + Returns the FileInspector that matched, if any. None if 'raw'. + """ + inspectors = {k: v() for k, v in ALL_FORMATS.items()} + with open(filename, 'rb') as f: + for chunk in chunked_reader(f): + for format, inspector in list(inspectors.items()): + try: + inspector.eat_chunk(chunk) + except ImageFormatError: + # No match, so stop considering this format + inspectors.pop(format) + continue + if (inspector.format_match and inspector.complete and + format != 'raw'): + # First complete match (other than raw) wins + return inspector + if all(i.complete for i in inspectors.values()): + # If all the inspectors are sure they are not a match, avoid + # reading to the end of the file to settle on 'raw'. + break + return inspectors['raw'] diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py index 36fdae65d9d..a34a20619f0 100644 --- a/nova/tests/unit/virt/libvirt/test_driver.py +++ b/nova/tests/unit/virt/libvirt/test_driver.py @@ -14275,10 +14275,11 @@ def test_create_images_and_backing_images_exist( '/fake/instance/dir', disk_info) self.assertFalse(mock_fetch_image.called) + @mock.patch('nova.image.format_inspector.detect_file_format') @mock.patch('nova.privsep.path.utime') @mock.patch('nova.virt.libvirt.utils.create_image') def test_create_images_and_backing_ephemeral_gets_created( - self, mock_create_cow_image, mock_utime): + self, mock_create_cow_image, mock_utime, mock_detect): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) base_dir = os.path.join(CONF.instances_path, @@ -16018,11 +16019,13 @@ def test_create_ephemeral_specified_fs(self, fake_mkfs): fake_mkfs.assert_has_calls([mock.call('ext4', '/dev/something', 'myVol')]) + @mock.patch('nova.image.format_inspector.detect_file_format') @mock.patch('nova.privsep.path.utime') @mock.patch('nova.virt.libvirt.utils.fetch_image') @mock.patch('nova.virt.libvirt.utils.create_image') def test_create_ephemeral_specified_fs_not_valid( - self, mock_create_cow_image, mock_fetch_image, mock_utime): + self, mock_create_cow_image, mock_fetch_image, mock_utime, + mock_detect): CONF.set_override('default_ephemeral_format', 'ext4') ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', diff --git a/nova/tests/unit/virt/libvirt/test_imagebackend.py b/nova/tests/unit/virt/libvirt/test_imagebackend.py index 0dc1009c920..853c5a200c8 100644 --- a/nova/tests/unit/virt/libvirt/test_imagebackend.py +++ b/nova/tests/unit/virt/libvirt/test_imagebackend.py @@ -524,13 +524,15 @@ def test_cache_template_exists(self, mock_exists): mock_exists.assert_has_calls(exist_calls) + @mock.patch('nova.image.format_inspector.detect_file_format') @mock.patch.object(imagebackend.utils, 'synchronized') @mock.patch('nova.virt.libvirt.utils.create_image') @mock.patch.object(os.path, 'exists', side_effect=[]) @mock.patch.object(imagebackend.Image, 'verify_base_size') @mock.patch('nova.privsep.path.utime') def test_create_image( - self, mock_utime, mock_verify, mock_exist, mock_create, mock_sync + self, mock_utime, mock_verify, mock_exist, mock_create, mock_sync, + mock_detect_format ): mock_sync.side_effect = lambda *a, **kw: self._fake_deco fn = mock.MagicMock() @@ -551,7 +553,10 @@ def test_create_image( mock_exist.assert_has_calls(exist_calls) self.assertTrue(mock_sync.called) mock_utime.assert_called() + mock_detect_format.assert_called_once() + mock_detect_format.return_value.safety_check.assert_called_once_with() + @mock.patch('nova.image.format_inspector.detect_file_format') @mock.patch.object(imagebackend.utils, 'synchronized') @mock.patch('nova.virt.libvirt.utils.create_image') @mock.patch.object(imagebackend.disk, 'extend') @@ -559,7 +564,8 @@ def test_create_image( @mock.patch.object(imagebackend.Qcow2, 'get_disk_size') @mock.patch('nova.privsep.path.utime') def test_create_image_too_small(self, mock_utime, mock_get, mock_exist, - mock_extend, mock_create, mock_sync): + mock_extend, mock_create, mock_sync, + mock_detect_format): mock_sync.side_effect = lambda *a, **kw: self._fake_deco mock_get.return_value = self.SIZE fn = mock.MagicMock() @@ -576,7 +582,9 @@ def test_create_image_too_small(self, mock_utime, mock_get, mock_exist, self.assertTrue(mock_sync.called) self.assertFalse(mock_create.called) self.assertFalse(mock_extend.called) + mock_detect_format.assert_called_once() + @mock.patch('nova.image.format_inspector.detect_file_format') @mock.patch.object(imagebackend.utils, 'synchronized') @mock.patch('nova.virt.libvirt.utils.create_image') @mock.patch('nova.virt.libvirt.utils.get_disk_backing_file') @@ -588,7 +596,8 @@ def test_create_image_too_small(self, mock_utime, mock_get, mock_exist, def test_generate_resized_backing_files(self, mock_utime, mock_copy, mock_verify, mock_exist, mock_extend, mock_get, - mock_create, mock_sync): + mock_create, mock_sync, + mock_detect_format): mock_sync.side_effect = lambda *a, **kw: self._fake_deco mock_get.return_value = self.QCOW2_BASE fn = mock.MagicMock() @@ -615,7 +624,9 @@ def test_generate_resized_backing_files(self, mock_utime, mock_copy, self.assertTrue(mock_sync.called) self.assertFalse(mock_create.called) mock_utime.assert_called() + mock_detect_format.assert_called_once() + @mock.patch('nova.image.format_inspector.detect_file_format') @mock.patch.object(imagebackend.utils, 'synchronized') @mock.patch('nova.virt.libvirt.utils.create_image') @mock.patch('nova.virt.libvirt.utils.get_disk_backing_file') @@ -626,7 +637,8 @@ def test_generate_resized_backing_files(self, mock_utime, mock_copy, def test_qcow2_exists_and_has_no_backing_file(self, mock_utime, mock_verify, mock_exist, mock_extend, mock_get, - mock_create, mock_sync): + mock_create, mock_sync, + mock_detect_format): mock_sync.side_effect = lambda *a, **kw: self._fake_deco mock_get.return_value = None fn = mock.MagicMock() @@ -647,6 +659,31 @@ def test_qcow2_exists_and_has_no_backing_file(self, mock_utime, self.assertTrue(mock_sync.called) self.assertFalse(mock_create.called) self.assertFalse(mock_extend.called) + mock_detect_format.assert_called_once() + + @mock.patch('nova.image.format_inspector.detect_file_format') + @mock.patch.object(imagebackend.utils, 'synchronized') + @mock.patch('nova.virt.libvirt.utils.create_image') + @mock.patch('nova.virt.libvirt.utils.get_disk_backing_file') + @mock.patch.object(imagebackend.disk, 'extend') + @mock.patch.object(os.path, 'exists', side_effect=[]) + @mock.patch.object(imagebackend.Image, 'verify_base_size') + def test_qcow2_exists_and_fails_safety_check(self, + mock_verify, mock_exist, + mock_extend, mock_get, + mock_create, mock_sync, + mock_detect_format): + mock_detect_format.return_value.safety_check.return_value = False + mock_sync.side_effect = lambda *a, **kw: self._fake_deco + mock_get.return_value = None + fn = mock.MagicMock() + mock_exist.side_effect = [False, True, False, True, True] + image = self.image_class(self.INSTANCE, self.NAME) + + self.assertRaises(exception.InvalidDiskInfo, + image.create_image, fn, self.TEMPLATE_PATH, + self.SIZE) + mock_verify.assert_not_called() def test_resolve_driver_format(self): image = self.image_class(self.INSTANCE, self.NAME) diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py index 68e24b7b6fc..0ab00717d28 100644 --- a/nova/tests/unit/virt/libvirt/test_utils.py +++ b/nova/tests/unit/virt/libvirt/test_utils.py @@ -106,15 +106,27 @@ def test_valid_hostname_bad(self): @mock.patch('oslo_concurrency.processutils.execute') @mock.patch('nova.virt.images.qemu_img_info') + @mock.patch('nova.image.format_inspector.detect_file_format') def _test_create_image( - self, path, disk_format, disk_size, mock_info, mock_execute, - backing_file=None + self, path, disk_format, disk_size, mock_detect, mock_info, + mock_execute, backing_file=None, safety_check=True ): + if isinstance(backing_file, dict): + backing_info = backing_file + backing_file = backing_info.pop('file', None) + else: + backing_info = {} + backing_backing_file = backing_info.pop('backing_file', None) + mock_info.return_value = mock.Mock( file_format=mock.sentinel.backing_fmt, cluster_size=mock.sentinel.cluster_size, + backing_file=backing_backing_file, + format_specific=backing_info, ) + mock_detect.return_value.safety_check.return_value = safety_check + libvirt_utils.create_image( path, disk_format, disk_size, backing_file=backing_file) @@ -126,7 +138,7 @@ def _test_create_image( mock_info.assert_called_once_with(backing_file) cow_opts = [ '-o', - f'backing_file={mock.sentinel.backing_file},' + f'backing_file={backing_file},' f'backing_fmt={mock.sentinel.backing_fmt},' f'cluster_size={mock.sentinel.cluster_size}', ] @@ -139,6 +151,8 @@ def _test_create_image( expected_args += (disk_size,) self.assertEqual([(expected_args,)], mock_execute.call_args_list) + if backing_file: + mock_detect.return_value.safety_check.assert_called_once_with() def test_create_image_raw(self): self._test_create_image('/some/path', 'raw', '10G') @@ -154,6 +168,25 @@ def test_create_image_backing_file(self): backing_file=mock.sentinel.backing_file, ) + def test_create_image_base_has_backing_file(self): + self.assertRaises( + exception.InvalidDiskInfo, + self._test_create_image, + '/some/stuff', 'qcow2', '1234567891234', + backing_file={'file': mock.sentinel.backing_file, + 'backing_file': mock.sentinel.backing_backing_file}, + ) + + def test_create_image_base_has_data_file(self): + self.assertRaises( + exception.InvalidDiskInfo, + self._test_create_image, + '/some/stuff', 'qcow2', '1234567891234', + backing_file={'file': mock.sentinel.backing_file, + 'backing_file': mock.sentinel.backing_backing_file, + 'data': {'data-file': mock.sentinel.data_file}}, + ) + def test_create_image_size_none(self): self._test_create_image( '/some/stuff', 'qcow2', None, diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index 534cc60759b..e9e62095680 100644 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -34,6 +34,7 @@ import nova.conf from nova import exception from nova.i18n import _ +from nova.image import format_inspector from nova.image import glance import nova.privsep.libvirt import nova.privsep.path @@ -660,6 +661,20 @@ def create_qcow2_image(base, target, size): if not os.path.exists(base): prepare_template(target=base, *args, **kwargs) + # NOTE(danms): We need to perform safety checks on the base image + # before we inspect it for other attributes. We do this each time + # because additional safety checks could have been added since we + # downloaded the image. + if not CONF.workarounds.disable_deep_image_inspection: + inspector = format_inspector.detect_file_format(base) + if not inspector.safety_check(): + LOG.warning('Base image %s failed safety check', base) + # NOTE(danms): This is the same exception as would be raised + # by qemu_img_info() if the disk format was unreadable or + # otherwise unsuitable. + raise exception.InvalidDiskInfo( + reason=_('Base image failed safety check')) + # NOTE(ankit): Update the mtime of the base file so the image # cache manager knows it is in use. _update_utime_ignore_eacces(base) diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py index 0675e4ac146..7637930b6a6 100644 --- a/nova/virt/libvirt/utils.py +++ b/nova/virt/libvirt/utils.py @@ -34,6 +34,7 @@ from nova import context as nova_context from nova import exception from nova.i18n import _ +from nova.image import format_inspector from nova import objects from nova.objects import fields as obj_fields import nova.privsep.fs @@ -132,7 +133,34 @@ def create_image( cow_opts = [] if backing_file: + # NOTE(danms): We need to perform safety checks on the base image + # before we inspect it for other attributes. We do this each time + # because additional safety checks could have been added since we + # downloaded the image. + if not CONF.workarounds.disable_deep_image_inspection: + inspector = format_inspector.detect_file_format(backing_file) + if not inspector.safety_check(): + LOG.warning('Base image %s failed safety check', backing_file) + # NOTE(danms): This is the same exception as would be raised + # by qemu_img_info() if the disk format was unreadable or + # otherwise unsuitable. + raise exception.InvalidDiskInfo( + reason=_('Base image failed safety check')) + base_details = images.qemu_img_info(backing_file) + if base_details.backing_file is not None: + LOG.warning('Base image %s failed safety check', backing_file) + raise exception.InvalidDiskInfo( + reason=_('Base image failed safety check')) + try: + data_file = base_details.format_specific['data']['data-file'] + except (KeyError, TypeError, AttributeError): + data_file = None + if data_file is not None: + LOG.warning('Base image %s failed safety check', backing_file) + raise exception.InvalidDiskInfo( + reason=_('Base image failed safety check')) + cow_opts += [ f'backing_file={backing_file}', f'backing_fmt={base_details.file_format}' From a2acb31d790e6cb41c067bfc0343bde274c9428c Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Mon, 1 Jul 2024 09:06:40 -0700 Subject: [PATCH 4/6] Fix vmdk_allowed_types checking This restores the vmdk_allowed_types checking in create_image() that was unintentionally lost by tightening the qemu-type-matches-glance code in the fetch patch recently. Since we are still detecting the format of base images without metadata, we would have treated a vmdk file that claims to be raw as raw in fetch, but then read it like a vmdk once it was used as a base image for something else. Conflicts: nova/tests/unit/virt/libvirt/test_utils.py NOTE(elod.illes): conflicts are due to encryption support adding patch I5d6d2a7b03b5ace0826af80c4004de852579ff12 was introduced in zed. Change-Id: I07b332a7edb814f6a91661651d9d24bfd6651ae7 Related-Bug: #2059809 (cherry picked from commit 08be7b2a0dc1d7728d8034bc2aab0428c4fb642e) (cherry picked from commit 11301e7e3f0d81a3368632f90608e30d9c647111) (cherry picked from commit 70a435fd519a0ebcc3ac9ad5254fefbf19c93e48) (cherry picked from commit f732f8476851e6272d8ad9937f54b918795844e8) --- nova/tests/unit/virt/libvirt/test_utils.py | 25 ++++++++++++++++++++-- nova/virt/libvirt/utils.py | 2 ++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/nova/tests/unit/virt/libvirt/test_utils.py b/nova/tests/unit/virt/libvirt/test_utils.py index 0ab00717d28..26de82790d9 100644 --- a/nova/tests/unit/virt/libvirt/test_utils.py +++ b/nova/tests/unit/virt/libvirt/test_utils.py @@ -117,9 +117,11 @@ def _test_create_image( else: backing_info = {} backing_backing_file = backing_info.pop('backing_file', None) + backing_fmt = backing_info.pop('backing_fmt', + mock.sentinel.backing_fmt) mock_info.return_value = mock.Mock( - file_format=mock.sentinel.backing_fmt, + file_format=backing_fmt, cluster_size=mock.sentinel.cluster_size, backing_file=backing_backing_file, format_specific=backing_info, @@ -139,7 +141,7 @@ def _test_create_image( cow_opts = [ '-o', f'backing_file={backing_file},' - f'backing_fmt={mock.sentinel.backing_fmt},' + f'backing_fmt={backing_fmt},' f'cluster_size={mock.sentinel.cluster_size}', ] @@ -193,6 +195,25 @@ def test_create_image_size_none(self): backing_file=mock.sentinel.backing_file, ) + def test_create_image_vmdk(self): + self._test_create_image( + '/some/vmdk', 'vmdk', '1234567891234', + backing_file={'file': mock.sentinel.backing_file, + 'backing_fmt': 'vmdk', + 'backing_file': None, + 'data': {'create-type': 'monolithicSparse'}} + ) + + def test_create_image_vmdk_invalid_type(self): + self.assertRaises(exception.ImageUnacceptable, + self._test_create_image, + '/some/vmdk', 'vmdk', '1234567891234', + backing_file={'file': mock.sentinel.backing_file, + 'backing_fmt': 'vmdk', + 'backing_file': None, + 'data': {'create-type': 'monolithicFlat'}} + ) + @ddt.unpack @ddt.data({'fs_type': 'some_fs_type', 'default_eph_format': None, diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py index 7637930b6a6..6dcc928a28d 100644 --- a/nova/virt/libvirt/utils.py +++ b/nova/virt/libvirt/utils.py @@ -148,6 +148,8 @@ def create_image( reason=_('Base image failed safety check')) base_details = images.qemu_img_info(backing_file) + if base_details.file_format == 'vmdk': + images.check_vmdk_image('base', base_details) if base_details.backing_file is not None: LOG.warning('Base image %s failed safety check', backing_file) raise exception.InvalidDiskInfo( From 1be000939585270e9241b860a1d84e8cc209654a Mon Sep 17 00:00:00 2001 From: Elod Illes Date: Wed, 10 Jul 2024 15:51:26 +0200 Subject: [PATCH 5/6] [CI][zed-only] Use zed-last version of tempest in nova-next And exclude a flaky test case[1] as well to make the gate more stable. [1] test_instances_with_cinder_volumes_on_all_compute_nodes Change-Id: I3c69a20993bca066ece46eed16d4e897144524cd --- .zuul.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.zuul.yaml b/.zuul.yaml index 550332abc7d..a46c6fce662 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -339,6 +339,8 @@ post-run: playbooks/nova-next/post.yaml required-projects: - novnc/novnc + - name: openstack/tempest + override-checkout: zed-last vars: # We use the "all" environment for tempest_test_regex and # tempest_exclude_regex. @@ -352,7 +354,7 @@ # tempest_test_exclude_list. # FIXME(lyarwood): The tempest.api.compute.admin.test_volume_swap tests # are skipped until bug #1929710 is resolved. - tempest_exclude_regex: ^tempest\.(scenario\.test_network_(?!qos)|api\.compute\.admin\.test_volume_swap)|tempest.api.compute.servers.test_device_tagging.TaggedAttachmentsTest.test_tagged_attachment + tempest_exclude_regex: ^tempest\.(scenario\.test_network_(?!qos)|api\.compute\.admin\.test_volume_swap)|tempest.api.compute.servers.test_device_tagging.TaggedAttachmentsTest.test_tagged_attachment|test_instances_with_cinder_volumes_on_all_compute_nodes devstack_local_conf: post-config: $NOVA_CPU_CONF: From 3a7e58b06a2a89dbc4d4644c9c63bab28325ecab Mon Sep 17 00:00:00 2001 From: Elod Illes Date: Mon, 8 Jul 2024 10:51:39 +0200 Subject: [PATCH 6/6] [CI][zed-only] Ceph minimum client on cinder-plugin-ceph-tempest job enable Since e222cc976918a331bacff150e84069fda8f4960a, it is possible to set the minimum client version. The goal of this patch is to enable the *mimic* client version for the current cinder-plugin-ceph-tempest job. As a result, we will be able to ensure that snapshots can be deleted when a volume is cloned from them. So that we can reduce the excluded test cases while keeping the gate functional. Change-Id: I441d0513a6547b2fbae011b7e9dad7d6a51398a6 --- .zuul.yaml | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/.zuul.yaml b/.zuul.yaml index a46c6fce662..15e44f81fe7 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -598,16 +598,9 @@ pre-run: - playbooks/ceph/glance-copy-policy.yaml vars: - # NOTE(elod.illes): this job started to break with the following five - # test cases, somewhere around merging cinder-tempest-plugin patch - # I281f881ad565e565839522ddf02057f7545c7146 so let's just exclude - # them to unblock the gate. - tempest_exclude_regex: "\ - (test_delete_dep_chain)|\ - (test_delete_dep_chain_2)|\ - (test_delete_source_snapshot)|\ - (test_delete_source_volume)|\ - (test_nova_image_snapshot_dependency)" + # NOTE(elod.illes): this job is breaking with the following test case on + # unmaintained/yoga, so let's just exclude it to unblock the gate + tempest_exclude_regex: test_nova_image_snapshot_dependency # NOTE(danms): These tests create an empty non-raw image, which nova # will refuse because we set never_download_image_if_on_rbd in this job. # Just skip these tests for this case. @@ -615,6 +608,7 @@ GLANCE_STANDALONE: True GLANCE_USE_IMPORT_WORKFLOW: True DEVSTACK_PARALLEL: True + CEPH_MIN_CLIENT_VERSION: "mimic" # NOTE(danms): This job is pretty heavy as it is, so we disable some # services that are not relevant to the nova-glance-ceph scenario # that this job is intended to validate.