diff --git a/plugins/modules/vm_snapshot_attach_disk.py b/plugins/modules/vm_snapshot_attach_disk.py index 71ef162ed..326171cf7 100644 --- a/plugins/modules/vm_snapshot_attach_disk.py +++ b/plugins/modules/vm_snapshot_attach_disk.py @@ -21,6 +21,7 @@ version_added: 1.2.0 extends_documentation_fragment: - scale_computing.hypercore.cluster_instance + - scale_computing.hypercore.force_reboot seealso: - module: scale_computing.hypercore.vm_snapshot_info options: @@ -38,6 +39,8 @@ type: int description: - Specify a disk slot from a vm to identify destination disk. + - Note that this MUST be a next free slot or an already used slot for the given disk_type. + Otherwise VM might not boot. required: True source_snapshot_uuid: type: str @@ -56,7 +59,7 @@ required: True notes: - C(check_mode) is not supported - - The VM to which the user is trying to attach the snapshot disk, B(must not) be running. + - The VM will be rebooted if it is running. """ @@ -107,14 +110,12 @@ from ..module_utils.client import Client from ..module_utils.rest_client import RestClient from ..module_utils.vm_snapshot import VMSnapshot +from ..module_utils.vm import VM from ..module_utils.task_tag import TaskTag from ..module_utils.typed_classes import TypedDiff from typing import Tuple, Dict, Any, Optional -# ++++++++++++ -# Must be reviewed - not sure if that's how this should work -# ++++++++++++ def attach_disk( module: AnsibleModule, rest_client: RestClient ) -> Tuple[bool, Optional[Dict[Any, Any]], TypedDiff]: @@ -131,6 +132,11 @@ def attach_disk( module.params["source_disk_slot"] ) # the higher the index, the newer the disk + # Get destination VM object + vm_object = VM.get_by_name(module.params, rest_client, must_exist=True) + if vm_object is None: + raise errors.ScaleComputingError("VM named '" + vm_name + "' doesn't exist.") + # =============== IMPLEMENTATION =================== vm_snapshot_hypercore = VMSnapshot.get_snapshot_by_uuid( source_snapshot_uuid, rest_client @@ -149,7 +155,6 @@ def attach_disk( # Check if slot already taken # - check if there is already a disk (vm_disk) with type (vm_type) on slot (vm_slot) # - if this slot is already taken, return no change - # --> should it be an error that tells the user that the slot is already taken instead? before_disk = VMSnapshot.get_vm_disk_info( vm_uuid=vm_uuid, slot=vm_disk_slot, @@ -164,6 +169,9 @@ def attach_disk( dict(before=before_disk, after=None), ) + # First power off the destination VM + vm_object.do_shutdown_steps(module, rest_client) # type: ignore + source_disk_info = VMSnapshot.get_snapshot_disk( vm_snapshot, slot=source_disk_slot, _type=source_disk_type ) @@ -199,6 +207,9 @@ def attach_disk( create_task_tag["createdUUID"], rest_client ) + # Restart the previously running VM (destination) + vm_object.vm_power_up(module, rest_client) # type: ignore + # return changed, after, diff return ( # if new block device was created, then this should not be None @@ -242,9 +253,17 @@ def main() -> None: type="int", required=True, ), + # These two parameters must be present in order to use the VM functions from module_utils/vm + force_reboot=dict( + type="bool", + default=False, + ), + shutdown_timeout=dict( + type="float", + default=300, + ), ), ) - try: client = Client.get_client(module.params["cluster_instance"]) rest_client = RestClient(client) diff --git a/tests/integration/targets/vm_snapshot/tasks/01_vm_snapshot_info.yml b/tests/integration/targets/vm_snapshot/tasks/01_vm_snapshot_info.yml index 94adf0617..c754eeb88 100644 --- a/tests/integration/targets/vm_snapshot/tasks/01_vm_snapshot_info.yml +++ b/tests/integration/targets/vm_snapshot/tasks/01_vm_snapshot_info.yml @@ -20,12 +20,6 @@ - snap-2 - not-unique block: - - include_tasks: helper_api_vm_snapshot_create.yml - vars: - vms_number: "{{ test_vms_number }}" - - # -------------------------------------------------------- - - name: List all VM snapshots - API scale_computing.hypercore.api: action: get @@ -185,10 +179,3 @@ - ansible.builtin.debug: var: vm_snapshots.records - ansible.builtin.assert: *assert-nonexistent - -# ------------- Cleanup -------------- - - - name: Remove all created VMs for this test - include_tasks: helper_api_vm_snapshot_delete_all.yml - vars: - vms_number: "{{ number_of_snapshot_testing_vms }}" diff --git a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml index f38ee4e84..cb68f4ef5 100644 --- a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml +++ b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml @@ -1,22 +1,45 @@ --- -# ============================== -# The VM to which we are trying to attach the -# snapshot disk to MUST NOT be running! -# ============================== - name: Test vm_snapshot_attach_disk vars: - vm_1_snapshot_label_0: ana-snap vm_1_snapshot_label: snap-0 vm_1: snapshot-test-vm-1 - vm_2: snapshot-test-vm-3 - slot_a: 42 - slot_b: 43 + vm_2_label: "-attach" + vm_2: "snapshot-test-vm-1{{ vm_2_label }}" + + # We must use next free slot on bus if we want VM to be bootable! + slot_vm_1_virtio: 2 + slot_a: 3 # vm_2, virtio_disk + slot_b: 0 # vm_2, ide_disk + + force_reboot: true # allow forced vm shutdown block: - - include_tasks: helper_api_vm_snapshot_create.yml - vars: - vms_number: "{{ test_vms_number }}" + - name: Create a stopped VM "{{ vm_2 }}" + scale_computing.hypercore.vm: + vm_name: "{{ vm_2 }}" + description: Snapshot testing + state: present + tags: + - Xlab_snapshot_testing + memory: "{{ '512 MB' | human_to_bytes }}" + vcpu: 2 + attach_guest_tools_iso: false + power_state: stop + disks: + - type: virtio_disk + disk_slot: 0 + size: "{{ '10.1 GB' | human_to_bytes }}" + - type: virtio_disk + disk_slot: 1 + size: "{{ '10.2 GB' | human_to_bytes }}" + nics: [] + boot_devices: + - type: virtio_disk + disk_slot: 0 + - type: ide_cdrom + disk_slot: 0 + machine_type: BIOS # -------------------------------------------------------- @@ -42,6 +65,7 @@ # ++++++++++++ Test attach snapshot disk from one VM to another +++++++++++ # --------- Test VIRTIO_DISK to VIRTIO_DISK --------- + # Test attach when vm_2 is stopped - name: >- Attach "snap-0" from VM "{{ vm_1 }}" to VM "{{ vm_2 }}" - as VIRTIO_DISK scale_computing.hypercore.vm_snapshot_attach_disk: @@ -52,18 +76,29 @@ "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk source_disk_slot: 1 + force_reboot: "{{ force_reboot }}" register: result - ansible.builtin.debug: var: result - ansible.builtin.assert: that: - result is changed + - include_tasks: helper_check_vm_state.yml + vars: + vm_name: "{{ vm_2 }}" + expected_state: stopped - ansible.builtin.assert: &test-virtio that: - result.record.type == "virtio_disk" - result.record.disk_slot == slot_a - result.record.vm_uuid == vm_2_info.records[0].uuid + - name: Start VM "{{ vm_2 }}" + scale_computing.hypercore.vm_params: + vm_name: "{{ vm_2 }}" + power_state: start + + # Test attach when vm_2 is running - name: >- IDEMPOTENCE - Attach "snap-0" from VM "{{ vm_1 }}" to VM "{{ vm_2 }}" - as VIRTIO_DISK @@ -75,15 +110,23 @@ "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk source_disk_slot: 1 + force_reboot: "{{ force_reboot }}" register: result - ansible.builtin.debug: var: result - ansible.builtin.assert: that: - result is not changed + - include_tasks: helper_check_vm_state.yml + vars: + vm_name: "{{ vm_2 }}" + expected_state: started - ansible.builtin.assert: *test-virtio -# # --------- Test VIRTIO_DISK to SOME_OTHER_TYPE_OF_DISK --------- + # >>>>>>>>>>>>>>>>>>>> + # The rest of the tests are attaching on a running vm_2/vm_1 + # >>>>>>>>>>>>>>>>>>>> + # --------- Test VIRTIO_DISK to SOME_OTHER_TYPE_OF_DISK --------- - name: >- Attach "snap-0" from VM "{{ vm_1 }}" to VM "{{ vm_2 }}" - as NOT VIRTIO_DISK @@ -95,12 +138,18 @@ "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk source_disk_slot: 1 + force_reboot: "{{ force_reboot }}" + shutdown_timeout: 10 # For faster testing. VM has no OS, so it cannot react to ACPI shutdown. register: result - ansible.builtin.debug: var: result - ansible.builtin.assert: that: - result is changed + - include_tasks: helper_check_vm_state.yml + vars: + vm_name: "{{ vm_2 }}" + expected_state: started - ansible.builtin.assert: &test-not-virtio that: - result.record.type == "ide_disk" @@ -118,15 +167,23 @@ "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk source_disk_slot: 1 + force_reboot: "{{ force_reboot }}" register: result - ansible.builtin.debug: var: result - ansible.builtin.assert: that: - result is not changed + - include_tasks: helper_check_vm_state.yml + vars: + vm_name: "{{ vm_2 }}" + expected_state: started - ansible.builtin.assert: *test-not-virtio # ++++++++++++ Test attach snapshot disk from a VM to itself +++++++++++++ + # This does change vm_1 (snapshot-test-vm-1), because of this whole 03_vm_snapshot_attach_disk.yml + # requires snapshot-test-vm-1 to be deleted and recreated each time. + # Refactor test if you do not like this. - name: >- Attach "snap-0" from VM "{{ vm_1 }}" @@ -134,21 +191,26 @@ scale_computing.hypercore.vm_snapshot_attach_disk: vm_name: "{{ vm_1 }}" vm_disk_type: virtio_disk - vm_disk_slot: "{{ slot_a }}" + vm_disk_slot: "{{ slot_vm_1_virtio }}" source_snapshot_uuid: "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk source_disk_slot: 1 + force_reboot: "{{ force_reboot }}" register: result - ansible.builtin.debug: var: result - ansible.builtin.assert: that: - result is changed + - include_tasks: helper_check_vm_state.yml + vars: + vm_name: "{{ vm_1 }}" + expected_state: started - ansible.builtin.assert: &test-virtio-2 that: - result.record.type == "virtio_disk" - - result.record.disk_slot == slot_a + - result.record.disk_slot == slot_vm_1_virtio - result.record.vm_uuid == vm_1_info.records[0].uuid - name: >- @@ -157,15 +219,27 @@ scale_computing.hypercore.vm_snapshot_attach_disk: vm_name: "{{ vm_1 }}" vm_disk_type: virtio_disk - vm_disk_slot: "{{ slot_a }}" + vm_disk_slot: "{{ slot_vm_1_virtio }}" source_snapshot_uuid: "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk source_disk_slot: 1 + force_reboot: "{{ force_reboot }}" register: result - ansible.builtin.debug: var: result + - include_tasks: helper_check_vm_state.yml + vars: + vm_name: "{{ vm_1 }}" + expected_state: started - ansible.builtin.assert: that: - result is not changed - ansible.builtin.assert: *test-virtio-2 + +# ---------- Cleanup ------------ + always: + - name: Remove snapshot attach testing VM "{{ vm_2 }}" + scale_computing.hypercore.vm: + vm_name: "{{ vm_2 }}" + state: absent diff --git a/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml b/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml index 2a7d1b022..8363167f5 100644 --- a/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml +++ b/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml @@ -12,7 +12,7 @@ memory: "{{ '512 MB' | human_to_bytes }}" vcpu: 2 attach_guest_tools_iso: false - power_state: stop + power_state: start disks: - type: virtio_disk disk_slot: 0 diff --git a/tests/integration/targets/vm_snapshot/tasks/helper_check_vm_state.yml b/tests/integration/targets/vm_snapshot/tasks/helper_check_vm_state.yml new file mode 100644 index 000000000..eb1454ae3 --- /dev/null +++ b/tests/integration/targets/vm_snapshot/tasks/helper_check_vm_state.yml @@ -0,0 +1,10 @@ +--- +- name: Get VM "{{ vm_name }}" info + scale_computing.hypercore.vm_info: + vm_name: "{{ vm_name }}" + register: vm_info + +- name: Is VM "{{ vm_name }}" power_state == "{{ expected_state }}" ? + ansible.builtin.assert: + that: + - vm_info.records[0].power_state == expected_state diff --git a/tests/integration/targets/vm_snapshot/tasks/main.yml b/tests/integration/targets/vm_snapshot/tasks/main.yml index eb97a46a8..83c5eaeb3 100644 --- a/tests/integration/targets/vm_snapshot/tasks/main.yml +++ b/tests/integration/targets/vm_snapshot/tasks/main.yml @@ -6,7 +6,7 @@ SC_TIMEOUT: "{{ sc_timeout }}" vars: - number_of_snapshot_testing_vms: 3 + number_of_snapshot_testing_vms: 2 non_unique_snapshot_label: not_unique # unique snapshot labels are strings like: # snap-x, where x is an iterative number @@ -14,6 +14,11 @@ # greater than 0, is a newer snapshot block: + - name: Create VMs + include_tasks: helper_api_vm_snapshot_create.yml + vars: + vms_number: "{{ number_of_snapshot_testing_vms }}" + - include_tasks: 01_vm_snapshot_info.yml vars: test_vms_number: "{{ number_of_snapshot_testing_vms }}" diff --git a/tests/unit/plugins/modules/test_vm_snapshot_attach_disk.py b/tests/unit/plugins/modules/test_vm_snapshot_attach_disk.py index 6dce89baa..e0bcb9719 100644 --- a/tests/unit/plugins/modules/test_vm_snapshot_attach_disk.py +++ b/tests/unit/plugins/modules/test_vm_snapshot_attach_disk.py @@ -13,6 +13,7 @@ import pytest from ansible_collections.scale_computing.hypercore.plugins.module_utils.vm_snapshot import ( + VM, VMSnapshot, ) @@ -49,6 +50,8 @@ source_snapshot_uuid="snapshot-uuid", source_disk_type="virtio_disk", source_disk_slot=0, + force_reboot=True, + shutdown_timeout=10, ) @@ -89,6 +92,64 @@ def setup_method(self): replication=True, ) + self.destination_vm_object = VM( + attach_guest_tools_iso=False, + boot_devices=[], + description="desc", + disks=[], + memory=42, + name="vm-destination", + nics=[], + vcpu=2, + operating_system=None, + power_state="stopped", + tags=["XLAB-test-tag1", "XLAB-test-tag2"], + uuid="id", + node_uuid="node_id", + node_affinity={ + "strict_affinity": False, + "preferred_node": dict( + node_uuid="", + backplane_ip="", + lan_ip="", + peer_id=None, + ), + "backup_node": dict( + node_uuid="", + backplane_ip="", + lan_ip="", + peer_id=None, + ), + }, + snapshot_schedule="", + machine_type="BIOS", + ) + + self.vm_destination_hypercore_dict = dict( + uuid="id", + nodeUUID="node_id", + name="vm-destination", + tags="XLAB-test-tag1,XLAB-test-tag2", + description="desc", + mem=42, + state="RUNNING", + numVCPU=2, + netDevs=[], + blockDevs=[], + bootDevices=[], + attachGuestToolsISO=False, + operatingSystem=None, + affinityStrategy={ + "strictAffinity": False, + "preferredNodeUUID": "", + "backupNodeUUID": "", + }, + snapshotScheduleUUID="snapshot_schedule_id", + machineType="scale-7.2", + sourceVirDomainUUID="64c9b3a1-3eab-4d16-994f-177bed274f84", + snapUUIDs=[], + ) + self.magic = mock.MagicMock() @pytest.mark.parametrize( @@ -117,6 +178,13 @@ def test_attach_disk_is_change( "createdUUID": "new-block-uuid", } + # Mock the destination VM object + mocker.patch( + "ansible_collections.scale_computing.hypercore.plugins.module_utils.vm.VM.from_hypercore" + ).return_value = self.destination_vm_object + rest_client.get_record.return_value = self.vm_destination_hypercore_dict + + # -------- Test attaching -------- mocker.patch( "ansible_collections.scale_computing.hypercore.plugins.module_utils.vm_snapshot.VMSnapshot.get_snapshot_by_uuid" ).return_value = self.vm_snapshot @@ -157,9 +225,12 @@ def test_attach_disk_is_change( changed, record, diff = vm_snapshot_attach_disk.attach_disk(module, rest_client) if destination_vm_disk_info is None: - rest_client.create_record.assert_called_once_with(**called_with_dict) + rest_client.create_record.assert_any_call(**called_with_dict) else: - rest_client.create_record.assert_not_called() + assert ( + mock.call(**called_with_dict) + not in rest_client.create_record.mock_calls + ) assert changed == expected_return[0] assert record == expected_return[1]