Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Xen virtual machine disk fixes #58400

Merged
merged 10 commits into from
Sep 16, 2020
1 change: 1 addition & 0 deletions changelog/58333.fixed
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Convert disks of volume type to file or block disks on Xen
247 changes: 152 additions & 95 deletions salt/modules/virt.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,6 +458,8 @@ def _get_disks(conn, dom):
"""
disks = {}
doc = ElementTree.fromstring(dom.XMLDesc(0))
# Get the path, pool, volume name of each volume we can
all_volumes = _get_all_volumes_paths(conn)
for elem in doc.findall("devices/disk"):
source = elem.find("source")
if source is None:
Expand All @@ -470,13 +472,61 @@ def _get_disks(conn, dom):
extra_properties = None
if "dev" in target.attrib:
disk_type = elem.get("type")

def _get_disk_volume_data(pool_name, volume_name):
qemu_target = "{}/{}".format(pool_name, volume_name)
pool = conn.storagePoolLookupByName(pool_name)
vol = pool.storageVolLookupByName(volume_name)
vol_info = vol.info()
extra_properties = {
"virtual size": vol_info[1],
"disk size": vol_info[2],
}

backing_files = [
{
"file": node.find("source").get("file"),
"file format": node.find("format").get("type"),
}
for node in elem.findall(".//backingStore[source]")
]

if backing_files:
# We had the backing files in a flat list, nest them again.
extra_properties["backing file"] = backing_files[0]
parent = extra_properties["backing file"]
for sub_backing_file in backing_files[1:]:
parent["backing file"] = sub_backing_file
parent = sub_backing_file

else:
# In some cases the backing chain is not displayed by the domain definition
# Try to see if we have some of it in the volume definition.
vol_desc = ElementTree.fromstring(vol.XMLDesc())
backing_path = vol_desc.find("./backingStore/path")
backing_format = vol_desc.find("./backingStore/format")
if backing_path is not None:
extra_properties["backing file"] = {"file": backing_path.text}
if backing_format is not None:
extra_properties["backing file"][
"file format"
] = backing_format.get("type")
return (qemu_target, extra_properties)

if disk_type == "file":
qemu_target = source.get("file", "")
if qemu_target.startswith("/dev/zvol/"):
disks[target.get("dev")] = {"file": qemu_target, "zfs": True}
continue
# Extract disk sizes, snapshots, backing files
if elem.get("device", "disk") != "cdrom":

if qemu_target in all_volumes.keys():
# If the qemu_target is a known path, output a volume
volume = all_volumes[qemu_target]
qemu_target, extra_properties = _get_disk_volume_data(
volume["pool"], volume["name"]
)
elif elem.get("device", "disk") != "cdrom":
# Extract disk sizes, snapshots, backing files
try:
stdout = subprocess.Popen(
[
Expand All @@ -498,6 +548,12 @@ def _get_disks(conn, dom):
disk.update({"file": "Does not exist"})
elif disk_type == "block":
qemu_target = source.get("dev", "")
# If the qemu_target is a known path, output a volume
if qemu_target in all_volumes.keys():
volume = all_volumes[qemu_target]
qemu_target, extra_properties = _get_disk_volume_data(
volume["pool"], volume["name"]
)
elif disk_type == "network":
qemu_target = source.get("protocol")
source_name = source.get("name")
Expand Down Expand Up @@ -536,43 +592,9 @@ def _get_disks(conn, dom):
elif disk_type == "volume":
pool_name = source.get("pool")
volume_name = source.get("volume")
qemu_target = "{}/{}".format(pool_name, volume_name)
pool = conn.storagePoolLookupByName(pool_name)
vol = pool.storageVolLookupByName(volume_name)
vol_info = vol.info()
extra_properties = {
"virtual size": vol_info[1],
"disk size": vol_info[2],
}

backing_files = [
{
"file": node.find("source").get("file"),
"file format": node.find("format").get("type"),
}
for node in elem.findall(".//backingStore[source]")
]

if backing_files:
# We had the backing files in a flat list, nest them again.
extra_properties["backing file"] = backing_files[0]
parent = extra_properties["backing file"]
for sub_backing_file in backing_files[1:]:
parent["backing file"] = sub_backing_file
parent = sub_backing_file

else:
# In some cases the backing chain is not displayed by the domain definition
# Try to see if we have some of it in the volume definition.
vol_desc = ElementTree.fromstring(vol.XMLDesc())
backing_path = vol_desc.find("./backingStore/path")
backing_format = vol_desc.find("./backingStore/format")
if backing_path is not None:
extra_properties["backing file"] = {"file": backing_path.text}
if backing_format is not None:
extra_properties["backing file"][
"file format"
] = backing_format.get("type")
qemu_target, extra_properties = _get_disk_volume_data(
pool_name, volume_name
)

if not qemu_target:
continue
Expand Down Expand Up @@ -767,6 +789,62 @@ def _migrate(dom, dst_uri, **kwargs):
raise CommandExecutionError(err.get_error_message())


def _disk_from_pool(conn, pool, pool_xml, volume_name):
"""
Create a disk definition out of the pool XML and volume name.
The aim of this function is to replace the volume-based definition when not handled by libvirt.
It returns the disk Jinja context to be used when creating the VM
"""
pool_type = pool_xml.get("type")
disk_context = {}

# handle dir, fs and netfs
if pool_type in ["dir", "netfs", "fs"]:
disk_context["type"] = "file"
volume = pool.storageVolLookupByName(volume_name)
volume_xml = ElementTree.fromstring(volume.XMLDesc())
disk_context["source_file"] = volume_xml.find("./target/path").text

elif pool_type in ["logical", "disk", "iscsi", "scsi"]:
disk_context["type"] = "block"
disk_context["format"] = "raw"
volume = pool.storageVolLookupByName(volume_name)
volume_xml = ElementTree.fromstring(volume.XMLDesc())
disk_context["source_file"] = volume_xml.find("./target/path").text

elif pool_type in ["rbd", "gluster", "sheepdog"]:
# libvirt can't handle rbd, gluster and sheepdog as volumes
disk_context["type"] = "network"
disk_context["protocol"] = pool_type
# Copy the hosts from the pool definition
disk_context["hosts"] = [
{"name": host.get("name"), "port": host.get("port")}
for host in pool_xml.findall(".//host")
]
dir_node = pool_xml.find("./source/dir")
# Gluster and RBD need pool/volume name
name_node = pool_xml.find("./source/name")
if name_node is not None:
disk_context["volume"] = "{}/{}".format(name_node.text, volume_name)
# Copy the authentication if any for RBD
auth_node = pool_xml.find("./source/auth")
if auth_node is not None:
username = auth_node.get("username")
secret_node = auth_node.find("./secret")
usage = secret_node.get("usage")
if not usage:
# Get the usage from the UUID
uuid = secret_node.get("uuid")
usage = conn.secretLookupByUUIDString(uuid).usageID()
disk_context["auth"] = {
"type": "ceph",
"username": username,
"usage": usage,
}

return disk_context


def _gen_xml(
conn,
name,
Expand Down Expand Up @@ -872,41 +950,16 @@ def _gen_xml(
elif disk.get("pool"):
disk_context["volume"] = disk["filename"]
# If we had no source_file, then we want a volume
pool_xml = ElementTree.fromstring(
conn.storagePoolLookupByName(disk["pool"]).XMLDesc()
)
pool = conn.storagePoolLookupByName(disk["pool"])
pool_xml = ElementTree.fromstring(pool.XMLDesc())
pool_type = pool_xml.get("type")
if pool_type in ["rbd", "gluster", "sheepdog"]:
# libvirt can't handle rbd, gluster and sheepdog as volumes
disk_context["type"] = "network"
disk_context["protocol"] = pool_type
# Copy the hosts from the pool definition
disk_context["hosts"] = [
{"name": host.get("name"), "port": host.get("port")}
for host in pool_xml.findall(".//host")
]
dir_node = pool_xml.find("./source/dir")
# Gluster and RBD need pool/volume name
name_node = pool_xml.find("./source/name")
if name_node is not None:
disk_context["volume"] = "{}/{}".format(
name_node.text, disk_context["volume"]
)
# Copy the authentication if any for RBD
auth_node = pool_xml.find("./source/auth")
if auth_node is not None:
username = auth_node.get("username")
secret_node = auth_node.find("./secret")
usage = secret_node.get("usage")
if not usage:
# Get the usage from the UUID
uuid = secret_node.get("uuid")
usage = conn.secretLookupByUUIDString(uuid).usageID()
disk_context["auth"] = {
"type": "ceph",
"username": username,
"usage": usage,
}

# TODO For Xen VMs convert all pool types (issue #58333)
if hypervisor == "xen" or pool_type in ["rbd", "gluster", "sheepdog"]:
disk_context.update(
_disk_from_pool(conn, pool, pool_xml, disk_context["volume"])
)

else:
if pool_type in ["disk", "logical"]:
# The volume format for these types doesn't match the driver format in the VM
Expand Down Expand Up @@ -4261,7 +4314,7 @@ def purge(vm_, dirs=False, removables=False, **kwargs):
directories.add(os.path.dirname(disks[disk]["file"]))
else:
# We may have a volume to delete here
matcher = re.match("^(?P<pool>[^/]+)/(?P<volume>.*)$", disks[disk]["file"])
matcher = re.match("^(?P<pool>[^/]+)/(?P<volume>.*)$", disks[disk]["file"],)
if matcher:
pool_name = matcher.group("pool")
pool = None
Expand Down Expand Up @@ -6779,29 +6832,33 @@ def discarder(ctxt, error): # pylint: disable=unused-argument

def _get_all_volumes_paths(conn):
"""
Extract the path and backing stores path of all volumes.
Extract the path, name, pool name and backing stores path of all volumes.

:param conn: libvirt connection to use
"""
volumes = [
vol
for l in [
obj.listAllVolumes()
for obj in conn.listAllStoragePools()
if obj.info()[0] == libvirt.VIR_STORAGE_POOL_RUNNING
]
for vol in l
pools = [
pool
for pool in conn.listAllStoragePools()
if pool.info()[0] == libvirt.VIR_STORAGE_POOL_RUNNING
]
return {
vol.path(): [
path.text
for path in ElementTree.fromstring(vol.XMLDesc()).findall(
".//backingStore/path"
)
]
for vol in volumes
if _is_valid_volume(vol)
}
volumes = {}
for pool in pools:
pool_volumes = {
volume.path(): {
"pool": pool.name(),
"name": volume.name(),
"backing_stores": [
path.text
for path in ElementTree.fromstring(volume.XMLDesc()).findall(
".//backingStore/path"
)
],
}
for volume in pool.listAllVolumes()
if _is_valid_volume(volume)
}
volumes.update(pool_volumes)
return volumes


def volume_infos(pool=None, volume=None, **kwargs):
Expand Down Expand Up @@ -6872,8 +6929,8 @@ def _volume_extract_infos(vol):
if vol.path():
as_backing_store = {
path
for (path, all_paths) in backing_stores.items()
if vol.path() in all_paths
for (path, volume) in backing_stores.items()
if vol.path() in volume.get("backing_stores")
}
used_by = [
vm_name
Expand Down
12 changes: 12 additions & 0 deletions salt/templates/virt/libvirt_disks.jinja
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
{% macro network_source(disk) -%}
<source protocol='{{ disk.protocol }}' name='{{ disk.volume }}'{% if disk.get('query') %} query='{{ disk.query }}'{% endif %}>
{%- for host in disk.get('hosts') %}
<host name='{{ host.name }}'{% if host.get("port") %} port='{{ host.port }}'{% endif %}/>
{%- endfor %}
{%- if disk.get("auth") %}
<auth username='{{ disk.auth.username }}'>
<secret type='{{ disk.auth.type }}' usage='{{ disk.auth.usage}}'/>
</auth>
{%- endif %}
</source>
{%- endmacro %}
17 changes: 5 additions & 12 deletions salt/templates/virt/libvirt_domain.jinja
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
{%- import 'libvirt_disks.jinja' as libvirt_disks -%}
<domain type='{{ hypervisor }}'>
<name>{{ name }}</name>
<vcpu>{{ cpu }}</vcpu>
Expand Down Expand Up @@ -32,21 +33,13 @@
{% if disk.type == 'file' and 'source_file' in disk -%}
<source file='{{ disk.source_file }}' />
{% endif %}
{% if disk.type == 'block' -%}
<source dev='{{ disk.source_file }}' />
{% endif %}
{% if disk.type == 'volume' and 'pool' in disk -%}
<source pool='{{ disk.pool }}' volume='{{ disk.volume }}' />
{% endif %}
{%- if disk.type == 'network' %}
<source protocol='{{ disk.protocol }}' name='{{ disk.volume }}'{% if disk.get('query') %} query='{{ disk.query }}'{% endif %}>
{%- for host in disk.get('hosts') %}
<host name='{{ host.name }}'{% if host.get("port") %} port='{{ host.port }}'{% endif %}/>
{%- endfor %}
{%- if disk.get("auth") %}
<auth username='{{ disk.auth.username }}'>
<secret type='{{ disk.auth.type }}' usage='{{ disk.auth.usage}}'/>
</auth>
{%- endif %}
</source>
{%- endif %}
{%- if disk.type == 'network' %}{{ libvirt_disks.network_source(disk) }}{%- endif %}
<target dev='{{ disk.target_dev }}' bus='{{ disk.disk_bus }}' />
{% if disk.address -%}
<address type='drive' controller='0' bus='0' target='0' unit='{{ disk.index }}' />
Expand Down
Empty file.
Loading