Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 25 additions & 6 deletions plugins/modules/vm_snapshot_attach_disk.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
version_added: 1.2.0
extends_documentation_fragment:
- scale_computing.hypercore.cluster_instance
- scale_computing.hypercore.force_reboot
seealso:
- module: scale_computing.hypercore.vm_snapshot_info
options:
Expand All @@ -38,6 +39,8 @@
type: int
description:
- Specify a disk slot from a vm to identify destination disk.
- Note that this MUST be a next free slot or an already used slot for the given disk_type.
Otherwise VM might not boot.
required: True
source_snapshot_uuid:
type: str
Expand All @@ -56,7 +59,7 @@
required: True
notes:
- C(check_mode) is not supported
- The VM to which the user is trying to attach the snapshot disk, B(must not) be running.
- The VM will be rebooted if it is running.
"""


Expand Down Expand Up @@ -107,14 +110,12 @@
from ..module_utils.client import Client
from ..module_utils.rest_client import RestClient
from ..module_utils.vm_snapshot import VMSnapshot
from ..module_utils.vm import VM
from ..module_utils.task_tag import TaskTag
from ..module_utils.typed_classes import TypedDiff
from typing import Tuple, Dict, Any, Optional


# ++++++++++++
# Must be reviewed - not sure if that's how this should work
# ++++++++++++
def attach_disk(
module: AnsibleModule, rest_client: RestClient
) -> Tuple[bool, Optional[Dict[Any, Any]], TypedDiff]:
Expand All @@ -131,6 +132,11 @@ def attach_disk(
module.params["source_disk_slot"]
) # the higher the index, the newer the disk

# Get destination VM object
vm_object = VM.get_by_name(module.params, rest_client, must_exist=True)
if vm_object is None:
raise errors.ScaleComputingError("VM named '" + vm_name + "' doesn't exist.")

# =============== IMPLEMENTATION ===================
vm_snapshot_hypercore = VMSnapshot.get_snapshot_by_uuid(
source_snapshot_uuid, rest_client
Expand All @@ -149,7 +155,6 @@ def attach_disk(
# Check if slot already taken
# - check if there is already a disk (vm_disk) with type (vm_type) on slot (vm_slot)
# - if this slot is already taken, return no change
# --> should it be an error that tells the user that the slot is already taken instead?
before_disk = VMSnapshot.get_vm_disk_info(
vm_uuid=vm_uuid,
slot=vm_disk_slot,
Expand All @@ -164,6 +169,9 @@ def attach_disk(
dict(before=before_disk, after=None),
)

# First power off the destination VM
vm_object.do_shutdown_steps(module, rest_client) # type: ignore

source_disk_info = VMSnapshot.get_snapshot_disk(
vm_snapshot, slot=source_disk_slot, _type=source_disk_type
)
Expand Down Expand Up @@ -199,6 +207,9 @@ def attach_disk(
create_task_tag["createdUUID"], rest_client
)

# Restart the previously running VM (destination)
vm_object.vm_power_up(module, rest_client) # type: ignore

# return changed, after, diff
return (
# if new block device was created, then this should not be None
Expand Down Expand Up @@ -242,9 +253,17 @@ def main() -> None:
type="int",
required=True,
),
# These two parameters must be present in order to use the VM functions from module_utils/vm
force_reboot=dict(
type="bool",
default=False,
),
shutdown_timeout=dict(
type="float",
default=300,
),
),
)

try:
client = Client.get_client(module.params["cluster_instance"])
rest_client = RestClient(client)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,6 @@
- snap-2
- not-unique
block:
- include_tasks: helper_api_vm_snapshot_create.yml
vars:
vms_number: "{{ test_vms_number }}"

# --------------------------------------------------------

- name: List all VM snapshots - API
scale_computing.hypercore.api:
action: get
Expand Down Expand Up @@ -185,10 +179,3 @@
- ansible.builtin.debug:
var: vm_snapshots.records
- ansible.builtin.assert: *assert-nonexistent

# ------------- Cleanup --------------

- name: Remove all created VMs for this test
include_tasks: helper_api_vm_snapshot_delete_all.yml
vars:
vms_number: "{{ number_of_snapshot_testing_vms }}"
Original file line number Diff line number Diff line change
@@ -1,22 +1,45 @@
---
# ==============================
# The VM to which we are trying to attach the
# snapshot disk to MUST NOT be running!
# ==============================
- name: Test vm_snapshot_attach_disk

vars:
vm_1_snapshot_label_0: ana-snap
vm_1_snapshot_label: snap-0
vm_1: snapshot-test-vm-1
vm_2: snapshot-test-vm-3
slot_a: 42
slot_b: 43
vm_2_label: "-attach"
vm_2: "snapshot-test-vm-1{{ vm_2_label }}"

# We must use next free slot on bus if we want VM to be bootable!
slot_vm_1_virtio: 2
slot_a: 3 # vm_2, virtio_disk
slot_b: 0 # vm_2, ide_disk

force_reboot: true # allow forced vm shutdown

block:
- include_tasks: helper_api_vm_snapshot_create.yml
vars:
vms_number: "{{ test_vms_number }}"
- name: Create a stopped VM "{{ vm_2 }}"
scale_computing.hypercore.vm:
vm_name: "{{ vm_2 }}"
description: Snapshot testing
state: present
tags:
- Xlab_snapshot_testing
memory: "{{ '512 MB' | human_to_bytes }}"
vcpu: 2
attach_guest_tools_iso: false
power_state: stop
disks:
- type: virtio_disk
disk_slot: 0
size: "{{ '10.1 GB' | human_to_bytes }}"
- type: virtio_disk
disk_slot: 1
size: "{{ '10.2 GB' | human_to_bytes }}"
nics: []
boot_devices:
- type: virtio_disk
disk_slot: 0
- type: ide_cdrom
disk_slot: 0
machine_type: BIOS

# --------------------------------------------------------

Expand All @@ -42,6 +65,7 @@
# ++++++++++++ Test attach snapshot disk from one VM to another +++++++++++
# --------- Test VIRTIO_DISK to VIRTIO_DISK ---------

# Test attach when vm_2 is stopped
- name: >-
Attach "snap-0" from VM "{{ vm_1 }}" to VM "{{ vm_2 }}" - as VIRTIO_DISK
scale_computing.hypercore.vm_snapshot_attach_disk:
Expand All @@ -52,18 +76,29 @@
"{{ vm_1_snapshot_info.records[0].snapshot_uuid }}"
source_disk_type: virtio_disk
source_disk_slot: 1
force_reboot: "{{ force_reboot }}"
register: result
- ansible.builtin.debug:
var: result
- ansible.builtin.assert:
that:
- result is changed
- include_tasks: helper_check_vm_state.yml
vars:
vm_name: "{{ vm_2 }}"
expected_state: stopped
- ansible.builtin.assert: &test-virtio
that:
- result.record.type == "virtio_disk"
- result.record.disk_slot == slot_a
- result.record.vm_uuid == vm_2_info.records[0].uuid

- name: Start VM "{{ vm_2 }}"
scale_computing.hypercore.vm_params:
vm_name: "{{ vm_2 }}"
power_state: start

# Test attach when vm_2 is running
- name: >-
IDEMPOTENCE - Attach "snap-0" from VM "{{ vm_1 }}"
to VM "{{ vm_2 }}" - as VIRTIO_DISK
Expand All @@ -75,15 +110,23 @@
"{{ vm_1_snapshot_info.records[0].snapshot_uuid }}"
source_disk_type: virtio_disk
source_disk_slot: 1
force_reboot: "{{ force_reboot }}"
register: result
- ansible.builtin.debug:
var: result
- ansible.builtin.assert:
that:
- result is not changed
- include_tasks: helper_check_vm_state.yml
vars:
vm_name: "{{ vm_2 }}"
expected_state: started
- ansible.builtin.assert: *test-virtio

# # --------- Test VIRTIO_DISK to SOME_OTHER_TYPE_OF_DISK ---------
# >>>>>>>>>>>>>>>>>>>>
# The rest of the tests are attaching on a running vm_2/vm_1
# >>>>>>>>>>>>>>>>>>>>
# --------- Test VIRTIO_DISK to SOME_OTHER_TYPE_OF_DISK ---------
- name: >-
Attach "snap-0" from VM "{{ vm_1 }}" to
VM "{{ vm_2 }}" - as NOT VIRTIO_DISK
Expand All @@ -95,12 +138,18 @@
"{{ vm_1_snapshot_info.records[0].snapshot_uuid }}"
source_disk_type: virtio_disk
source_disk_slot: 1
force_reboot: "{{ force_reboot }}"
shutdown_timeout: 10 # For faster testing. VM has no OS, so it cannot react to ACPI shutdown.
register: result
- ansible.builtin.debug:
var: result
- ansible.builtin.assert:
that:
- result is changed
- include_tasks: helper_check_vm_state.yml
vars:
vm_name: "{{ vm_2 }}"
expected_state: started
- ansible.builtin.assert: &test-not-virtio
that:
- result.record.type == "ide_disk"
Expand All @@ -118,37 +167,50 @@
"{{ vm_1_snapshot_info.records[0].snapshot_uuid }}"
source_disk_type: virtio_disk
source_disk_slot: 1
force_reboot: "{{ force_reboot }}"
register: result
- ansible.builtin.debug:
var: result
- ansible.builtin.assert:
that:
- result is not changed
- include_tasks: helper_check_vm_state.yml
vars:
vm_name: "{{ vm_2 }}"
expected_state: started
- ansible.builtin.assert: *test-not-virtio

# ++++++++++++ Test attach snapshot disk from a VM to itself +++++++++++++
# This does change vm_1 (snapshot-test-vm-1), because of this whole 03_vm_snapshot_attach_disk.yml
# requires snapshot-test-vm-1 to be deleted and recreated each time.
# Refactor test if you do not like this.

- name: >-
Attach "snap-0" from VM "{{ vm_1 }}"
to VM "{{ vm_1 }}"
scale_computing.hypercore.vm_snapshot_attach_disk:
vm_name: "{{ vm_1 }}"
vm_disk_type: virtio_disk
vm_disk_slot: "{{ slot_a }}"
vm_disk_slot: "{{ slot_vm_1_virtio }}"
source_snapshot_uuid:
"{{ vm_1_snapshot_info.records[0].snapshot_uuid }}"
source_disk_type: virtio_disk
source_disk_slot: 1
force_reboot: "{{ force_reboot }}"
register: result
- ansible.builtin.debug:
var: result
- ansible.builtin.assert:
that:
- result is changed
- include_tasks: helper_check_vm_state.yml
vars:
vm_name: "{{ vm_1 }}"
expected_state: started
- ansible.builtin.assert: &test-virtio-2
that:
- result.record.type == "virtio_disk"
- result.record.disk_slot == slot_a
- result.record.disk_slot == slot_vm_1_virtio
- result.record.vm_uuid == vm_1_info.records[0].uuid

- name: >-
Expand All @@ -157,15 +219,27 @@
scale_computing.hypercore.vm_snapshot_attach_disk:
vm_name: "{{ vm_1 }}"
vm_disk_type: virtio_disk
vm_disk_slot: "{{ slot_a }}"
vm_disk_slot: "{{ slot_vm_1_virtio }}"
source_snapshot_uuid:
"{{ vm_1_snapshot_info.records[0].snapshot_uuid }}"
source_disk_type: virtio_disk
source_disk_slot: 1
force_reboot: "{{ force_reboot }}"
register: result
- ansible.builtin.debug:
var: result
- include_tasks: helper_check_vm_state.yml
vars:
vm_name: "{{ vm_1 }}"
expected_state: started
- ansible.builtin.assert:
that:
- result is not changed
- ansible.builtin.assert: *test-virtio-2

# ---------- Cleanup ------------
always:
- name: Remove snapshot attach testing VM "{{ vm_2 }}"
scale_computing.hypercore.vm:
vm_name: "{{ vm_2 }}"
state: absent
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
memory: "{{ '512 MB' | human_to_bytes }}"
vcpu: 2
attach_guest_tools_iso: false
power_state: stop
power_state: start
disks:
- type: virtio_disk
disk_slot: 0
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
- name: Get VM "{{ vm_name }}" info
scale_computing.hypercore.vm_info:
vm_name: "{{ vm_name }}"
register: vm_info

- name: Is VM "{{ vm_name }}" power_state == "{{ expected_state }}" ?
ansible.builtin.assert:
that:
- vm_info.records[0].power_state == expected_state
7 changes: 6 additions & 1 deletion tests/integration/targets/vm_snapshot/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,19 @@
SC_TIMEOUT: "{{ sc_timeout }}"

vars:
number_of_snapshot_testing_vms: 3
number_of_snapshot_testing_vms: 2
non_unique_snapshot_label: not_unique
# unique snapshot labels are strings like:
# snap-x, where x is an iterative number
# -- snap-0 is the oldest snapshot, anything
# greater than 0, is a newer snapshot

block:
- name: Create VMs
include_tasks: helper_api_vm_snapshot_create.yml
vars:
vms_number: "{{ number_of_snapshot_testing_vms }}"

- include_tasks: 01_vm_snapshot_info.yml
vars:
test_vms_number: "{{ number_of_snapshot_testing_vms }}"
Expand Down
Loading