Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions plugins/module_utils/disk.py
Original file line number Diff line number Diff line change
Expand Up @@ -198,9 +198,10 @@ def needs_reboot(self, action: str, desired_disk=None) -> bool:
# Delete and change type.
if desired_disk and action == "update" and self.type != desired_disk.type:
return True
if (
action == "delete" and self.type == "ide_cdrom"
): # ide_cdrom can never be deleted when VM is running.
if action == "delete" and self.type == "ide_cdrom":
# ide_cdrom can never be deleted when VM is running.
# Also other disks types cannot be deleted when VM is running
# if HyperCore thinks disk is being "used".
return True
return False

Expand Down
14 changes: 13 additions & 1 deletion plugins/module_utils/errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@

__metaclass__ = type

from typing import Union
import json
from typing import Union, Dict, Any
from ansible.module_utils.urls import Request


Expand Down Expand Up @@ -111,3 +112,14 @@ class ScaleTimeoutError(ScaleComputingError):
def __init__(self, data: Union[str, Exception]):
self.message = f"Request timed out: {data}."
super(ScaleTimeoutError, self).__init__(self.message)


class TaskTagError(ScaleComputingError):
def __init__(self, task_status: Dict[Any, Any]):
# task_status is dict returned by GET /rest/v1/TaskTag
message = "There was a problem during this task execution."
message += f" Task details: {json.dumps(task_status)}"
self.message = message
self.task_status_state = task_status["state"]
self.task_status = task_status
super().__init__(self.message)
5 changes: 1 addition & 4 deletions plugins/module_utils/task_tag.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@

__metaclass__ = type

import json
from time import sleep

from ..module_utils import errors
Expand Down Expand Up @@ -47,9 +46,7 @@ def wait_task(
"ERROR",
"UNINITIALIZED",
): # TaskTag has finished unsucessfully or was never initialized, both are errors.
msg = "There was a problem during this task execution."
msg += f" Task details: {json.dumps(task_status)}"
raise errors.ScaleComputingError(msg)
raise errors.TaskTagError(task_status)
if task_status.get("state", "") not in (
"RUNNING",
"QUEUED",
Expand Down
48 changes: 47 additions & 1 deletion plugins/module_utils/vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1091,6 +1091,10 @@ def _delete_not_used_disks(cls, module, rest_client, vm, changed, disk_key):
):
to_delete = False
if to_delete:
# HyperCore is sometimes able to delete disk on running VM,
# but sometimes we need to shutdown VM to remove disk.
# It is hard to know in advance if shutdown is required.
# We try to remove disk without shutdown, if delete fails, we shutdown VM and try again.
if existing_disk.needs_reboot("delete"):
vm.do_shutdown_steps(module, rest_client)
task_tag = rest_client.delete_record(
Expand All @@ -1099,10 +1103,51 @@ def _delete_not_used_disks(cls, module, rest_client, vm, changed, disk_key):
),
module.check_mode,
)
TaskTag.wait_task(rest_client, task_tag, module.check_mode)
try:
TaskTag.wait_task(rest_client, task_tag, module.check_mode)
except errors.TaskTagError as ex:
# Delete failed, maybe because VM was running and disk was in use.
# If VM is running, shutdown VM and retry delete.
if ex.task_status_state != "ERROR":
raise
if not cls._disk_remove_failed_because_vm_running(ex.task_status):
raise
vm_fresh_data = rest_client.get_record(
f"/rest/v1/VirDomain/{vm.uuid}", must_exist=True
)
if vm_fresh_data["state"] != "RUNNING":
raise
# shutdown and retry remove
vm.do_shutdown_steps(module, rest_client)
task_tag = rest_client.delete_record(
"{0}/{1}".format(
"/rest/v1/VirDomainBlockDevice", existing_disk.uuid
),
module.check_mode,
)
TaskTag.wait_task(rest_client, task_tag, module.check_mode)
changed = True
return changed

@staticmethod
def _disk_remove_failed_because_vm_running(task_status: Dict):
# Look at task_tag dict returned by HyperCore to decide if disk remove failed
# because VM is running, and VM shutdown will allow us to remove the disk.
# What we search for in formattedMessage is HyperCore version dependent:
# 9.2.17 - "Unable to delete block device from VM '%@': Still in use"
# 9.1.14 - "Virt Exception, code: 84, domain 10: Operation not supported: This type of disk cannot be hot unplugged"

if (
task_status["formattedMessage"]
== "Unable to delete block device from VM '%@': Still in use"
):
return True
if task_status["formattedMessage"].endswith(
"Operation not supported: This type of disk cannot be hot unplugged"
):
return True
return False

@staticmethod
def _force_remove_all_disks(module, rest_client, vm, disks_before):
# It's important to check if items is equal to empty list and empty list only (no None-s)
Expand Down Expand Up @@ -1205,6 +1250,7 @@ def ensure_present_or_set(cls, module, rest_client, module_path):
changed = cls._delete_not_used_disks(
module, rest_client, vm_before, changed, disk_key
)
vm_before.vm_power_up(module, rest_client)
if called_from_vm_disk:
vm_after, disks_after = cls.get_vm_by_name(module, rest_client)
return (
Expand Down
7 changes: 7 additions & 0 deletions plugins/modules/vm.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,13 @@
description:
- Use this module to create, update or delete a VM. When creating or
updating a VM, setting the disks, network nics and boot order is possible.

- Module tries to remove disks from a running VM.
If disk cannot be removed from running VM,
then VM will be shutdown, disk will be removed, and VM is started back.
- VM has C(shutdown_timeout) time to respond to shutdown request.
If VM is not shutoff within I(shutdown_timeout),
then a force shutdown will be issued if C(force_reboot=True).
version_added: 1.0.0
extends_documentation_fragment:
- scale_computing.hypercore.cluster_instance
Expand Down
41 changes: 23 additions & 18 deletions plugins/modules/vm_disk.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,21 +15,28 @@
- Tjaž Eržen (@tjazsch)
short_description: Manage VM's disks
description:
Use this module to add, delete or set disks to the VM.
The module can also remove all disks from a VM,
attach and/or detach ISO image to the VM by ISO's name,
detach ISO image from the VM by disk's disk slot,
or update the existing disks (disk size etc.).

For a given VM, a particular disk is selected by combination of (I(type), I(disk_slot)).
I(disk_slot) means slot on bus (IDE, virtio or SCSI bus).

Changing disk I(type) can change its I(disk_slot).
For example, VM has one IDE CD-ROM and one virtio_disk.
The disk will have C(type=virtio_disk) and C(disk_slot=0),
and CD-ROM will have C(type=ide_cdrom) and C(disk_slot=0).
Changing disk I(type) to C(ide_disk) will as place disk on IDE bus,
after the CD-ROM, and disk will get C(disk_slot=1).
- Use this module to add, delete or set disks to the VM.
The module can also remove all disks from a VM,
attach and/or detach ISO image to the VM by ISO's name,
detach ISO image from the VM by disk's disk slot,
or update the existing disks (disk size etc.).

- For a given VM, a particular disk is selected by combination of (I(type), I(disk_slot)).
I(disk_slot) means slot on bus (IDE, virtio or SCSI bus).

- Changing disk I(type) can change its I(disk_slot).
For example, VM has one IDE CD-ROM and one virtio_disk.
The disk will have C(type=virtio_disk) and C(disk_slot=0),
and CD-ROM will have C(type=ide_cdrom) and C(disk_slot=0).
Changing disk I(type) to C(ide_disk) will as place disk on IDE bus,
after the CD-ROM, and disk will get C(disk_slot=1).

- Module tries to remove disks from a running VM.
If disk cannot be removed from running VM,
then VM will be shutdown, disk will be removed, and VM is started back.
- VM has C(shutdown_timeout) time to respond to shutdown request.
If VM is not shutoff within I(shutdown_timeout),
then a force shutdown will be issued if C(force_reboot=True).

version_added: 1.0.0
extends_documentation_fragment:
Expand Down Expand Up @@ -325,6 +332,7 @@ def ensure_absent(module, rest_client):
)
TaskTag.wait_task(rest_client, task_tag, module.check_mode)
changed = True
vm_before.vm_power_up(module, rest_client)
vm_after, disks_after = ManageVMDisks.get_vm_by_name(module, rest_client)
return (
changed,
Expand All @@ -337,15 +345,12 @@ def ensure_absent(module, rest_client):
def run(module, rest_client):
# ensure_absent is located in modules/vm_disk.py, since it's only used here
# ensure_present_or_set is located in module_utils/vm.py, since it's also used in module vm.
vm, disks = ManageVMDisks.get_vm_by_name(module, rest_client)
if module.params["state"] == "absent":
changed, records, diff, reboot = ensure_absent(module, rest_client)
else:
changed, records, diff, reboot = ManageVMDisks.ensure_present_or_set(
module, rest_client, MODULE_PATH
)
if vm:
vm.vm_power_up(module, rest_client)
return changed, records, diff, reboot


Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
---
# ------------------------------------------------------------------------------------------------------------------
# Cleanup

- name: Delete the VMs, if they exist from before
scale_computing.hypercore.vm:
vm_name: "{{ item }}"
state: absent
loop: "{{ vm_names_all }}"

# ------------------------------------------------------------------------------------------------------------------
# Prepare
- name: Dummy task to store VM definition {{ vm_name_a }}
scale_computing.hypercore.vm: &vm_a_definition
vm_name: "{{ vm_name_a }}"
state: present
description: VM remove disk CI test
tags:
- Xlab
memory: "{{ '512 MB' | human_to_bytes }}"
vcpu: 1
attach_guest_tools_iso: false
power_state: stop
nics: []
boot_devices: []
when: False

# Create VM a
- name: Create the VM {{ vm_name_a }}
scale_computing.hypercore.vm:
<<: *vm_a_definition
disks:
- type: ide_disk
disk_slot: 0
size: "{{ '10 GB' | human_to_bytes }}"
- type: ide_disk
disk_slot: 1
size: "{{ '11 GB' | human_to_bytes }}"
register: vm_result
- name: Get info about VM {{ vm_name_a }}
scale_computing.hypercore.vm_info:
vm_name: "{{ vm_name_a }}"
register: vm_info_a_initial_result
- ansible.builtin.assert:
that:
- vm_result is changed
- vm_result.record.0.description == "VM remove disk CI test"
- vm_result.record.0.vm_name == "{{ vm_name_a }}"
- vm_result.record.0.disks | length == 2
- vm_result.vm_rebooted == False
- vm_info_a_initial_result.records.0.description == "VM remove disk CI test"
- vm_info_a_initial_result.records.0.vm_name == "{{ vm_name_a }}"
- vm_info_a_initial_result.records.0.power_state == "stopped"
- vm_info_a_initial_result.records.0.disks | length == 2

# ------------------------------------------------------------------------------------------------------------------
# Remove disk from stopped VM
# Shutdown is not allowed, and also not needed.
- name: Remove disk from stopped VM
block: &remove_disk_from_vm
- name: Remove disk from stopped VM {{ vm_name_a }}
scale_computing.hypercore.vm:
<<: *vm_a_definition
disks:
# remove 1st disk, keep 2nd disk
- type: ide_disk
disk_slot: 1
size: "{{ '11 GB' | human_to_bytes }}"
force_reboot: True
shutdown_timeout: "{{ shutdown_timeout }}"
register: vm_result
- name: Get info about VM {{ vm_name_a }}
scale_computing.hypercore.vm_info:
vm_name: "{{ vm_name_a }}"
register: vm_info_a_result
- ansible.builtin.assert:
that:
- vm_result is succeeded
- vm_result.vm_rebooted == False
- vm_result.record.0.power_state == "stopped"
- vm_result.record.0.disks | length == 1
- vm_info_a_result.records.0.power_state == "stopped"
- vm_info_a_result.records.0.disks | length == 1
- vm_info_a_result.records.0.disks.0.uuid == vm_info_a_initial_result.records.0.disks.1.uuid
- ansible.builtin.assert:
that:
- vm_result is changed

- name: Remove disk from stopped VM - idempotence
block:
*remove_disk_from_vm
- ansible.builtin.assert:
that:
- vm_result is not changed


# ----------------------------------Cleanup--------------------------------------------------------------------------------
- name: Delete the VMs
scale_computing.hypercore.vm:
vm_name: "{{ item }}"
state: absent
loop: "{{ vm_names_all }}"
Loading