From c867c1412bad250437f93826c081612d65b2e355 Mon Sep 17 00:00:00 2001 From: Ana Zobec Date: Mon, 15 May 2023 14:57:22 +0200 Subject: [PATCH 01/11] Add shutdown destination VM functionallity before attaching is done. --- plugins/module_utils/vm_snapshot.py | 36 +++++++++++++ plugins/modules/vm_snapshot_attach_disk.py | 50 ++++++++++++++++++- .../tasks/03_vm_snapshot_attach_disk.yml | 2 +- .../tasks/helper_api_vm_snapshot_create.yml | 2 +- 4 files changed, 86 insertions(+), 4 deletions(-) diff --git a/plugins/module_utils/vm_snapshot.py b/plugins/module_utils/vm_snapshot.py index 4af0b7fa9..42ffa55f4 100644 --- a/plugins/module_utils/vm_snapshot.py +++ b/plugins/module_utils/vm_snapshot.py @@ -10,6 +10,7 @@ from copy import copy +from ansible.module_utils.basic import AnsibleModule from .rest_client import RestClient from ..module_utils.utils import PayloadMapper @@ -371,3 +372,38 @@ def get_external_vm_uuid(cls, vm_name: str, rest_client: RestClient) -> Any: return None return vm_hypercore_dict["uuid"] + + @classmethod + # This method is meant to be called before the attaching was done on a destination VM + # - first get vm object before attaching was done + # - then try to normally shut down the vm + # - if this fails, then force shut down the vm + def power_off_vm(cls, module: AnsibleModule, rest_client: RestClient) -> None: + vm = VM.get_by_name(module.params, rest_client, must_exist=True) + + # Make sure we don't try to shut down an already non-running VM + if vm.power_state not in ('stopped', 'crashed'): + if not module.params["force_reboot"]: # force_reboot == False + # First try a normal shutdown + try: + module.params["force_reboot"] = False # must be "False" to be able to normally shut down + vm.do_shutdown_steps(module, rest_client) + + # If normal shutdown failed, then try a force shutdown + except errors.ScaleComputingError as normal_shutdown_error: + module.params["force_reboot"] = True # must be "True" to be able to forcibly shut down + vm.vm_shutdown_forced(module, rest_client) + module.params["force_reboot"] = False # set it back to what it was initially + else: + vm.vm_shutdown_forced(module, rest_client) + + @classmethod + # This method is meant to be called after the attaching was done on a destination VM + # - first get vm object after attaching was done + # - then start that vm + def power_up_vm(cls, module: AnsibleModule, rest_client: RestClient) -> None: + # If a VM is stopped or crashed, then start it up + if module.params["reboot_destination_vm"]: + vm = VM.get_by_name(module.params, rest_client, must_exist=True) + if vm.power_state in ('stopped', 'crashed'): + vm.update_vm_power_state(module, rest_client, "start") diff --git a/plugins/modules/vm_snapshot_attach_disk.py b/plugins/modules/vm_snapshot_attach_disk.py index 71ef162ed..6bee9da67 100644 --- a/plugins/modules/vm_snapshot_attach_disk.py +++ b/plugins/modules/vm_snapshot_attach_disk.py @@ -107,11 +107,15 @@ from ..module_utils.client import Client from ..module_utils.rest_client import RestClient from ..module_utils.vm_snapshot import VMSnapshot +from ..module_utils.vm import VM from ..module_utils.task_tag import TaskTag from ..module_utils.typed_classes import TypedDiff from typing import Tuple, Dict, Any, Optional +# A minimum size of an IDE typed disk in HC3 API is 1 GB +MIN_TYPE_IDE_SIZE = 1000341504 # bytes + # ++++++++++++ # Must be reviewed - not sure if that's how this should work # ++++++++++++ @@ -164,10 +168,23 @@ def attach_disk( dict(before=before_disk, after=None), ) + # Get VM before attach + # vm = VM.get_by_name(module.params, rest_client, must_exist=True) # type: ignore + + # First power off the destination VM + VMSnapshot.power_off_vm(module, rest_client) + source_disk_info = VMSnapshot.get_snapshot_disk( vm_snapshot, slot=source_disk_slot, _type=source_disk_type ) + destination_size = source_disk_info["size"] + if "ide" in vm_disk_type: + if destination_size < MIN_TYPE_IDE_SIZE: + destination_size = MIN_TYPE_IDE_SIZE + + module.log(str(destination_size)) + # build a payload according to /rest/v1/VirDomainBlockDevice/{uuid}/clone documentation payload = dict( options=dict( @@ -178,7 +195,7 @@ def attach_disk( template=dict( virDomainUUID=vm_uuid, # required type=vm_disk_type.upper(), # required - capacity=source_disk_info["size"], # required + capacity=destination_size, # source_disk_info["size"], # required chacheMode=source_disk_info["cache_mode"].upper(), slot=vm_disk_slot, disableSnapshotting=source_disk_info["disable_snapshotting"], @@ -199,6 +216,9 @@ def attach_disk( create_task_tag["createdUUID"], rest_client ) + # Restart the previously running VM (destination) + VMSnapshot.power_up_vm(module, rest_client) + # return changed, after, diff return ( # if new block device was created, then this should not be None @@ -242,9 +262,35 @@ def main() -> None: type="int", required=True, ), + + # Maybe the user will want to keep the destination VM shut down if, for example he has more tasks on + # this VM and wants to make it go quicker by keeping the VM shut down + # - keep or remove this parameter? + # - if keep, what could be a better name for it? + # - dont_reboot_destination_vm + # - keep_destination_vm_shutdown + # - keep_destination_vm_off + # - ignore_destination_vm_reboot --> False: reboot, True: no reboot + # - ...? + reboot_destination_vm=dict( + type="bool", + default=True, + ), + + # These two parameters must be present in order to use the VM functions from module_utils/vm + # - see: + # - vm_snapshot -> power_off_vm + # - vm_snapshot -> power_up_vm + force_reboot=dict( + type="bool", + default=False, + ), + shutdown_timeout=dict( # make this default to 300? (300 --> 5 minutes) + type="float", + default=30, + ) ), ) - try: client = Client.get_client(module.params["cluster_instance"]) rest_client = RestClient(client) diff --git a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml index f38ee4e84..485a8405b 100644 --- a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml +++ b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml @@ -11,7 +11,7 @@ vm_1: snapshot-test-vm-1 vm_2: snapshot-test-vm-3 slot_a: 42 - slot_b: 43 + slot_b: 0 block: - include_tasks: helper_api_vm_snapshot_create.yml diff --git a/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml b/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml index 2a7d1b022..8363167f5 100644 --- a/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml +++ b/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml @@ -12,7 +12,7 @@ memory: "{{ '512 MB' | human_to_bytes }}" vcpu: 2 attach_guest_tools_iso: false - power_state: stop + power_state: start disks: - type: virtio_disk disk_slot: 0 From 923e8c364af721290bbee57438571a7d5ecc32d2 Mon Sep 17 00:00:00 2001 From: Ana Zobec Date: Tue, 16 May 2023 11:53:16 +0200 Subject: [PATCH 02/11] Update vm_snapshot_attach_disk: unit tests, integration tests, module, module_utils. --- plugins/module_utils/vm_snapshot.py | 36 ------ plugins/modules/vm_snapshot_attach_disk.py | 50 ++------- .../vm_snapshot/tasks/01_vm_snapshot_info.yml | 13 --- .../tasks/03_vm_snapshot_attach_disk.yml | 63 ++++++++++- .../tasks/helper_api_vm_snapshot_create.yml | 106 +++++++++--------- .../tasks/helper_check_vm_state.yml | 10 ++ .../targets/vm_snapshot/tasks/main.yml | 7 +- .../modules/test_vm_snapshot_attach_disk.py | 75 ++++++++++++- 8 files changed, 213 insertions(+), 147 deletions(-) create mode 100644 tests/integration/targets/vm_snapshot/tasks/helper_check_vm_state.yml diff --git a/plugins/module_utils/vm_snapshot.py b/plugins/module_utils/vm_snapshot.py index 42ffa55f4..4af0b7fa9 100644 --- a/plugins/module_utils/vm_snapshot.py +++ b/plugins/module_utils/vm_snapshot.py @@ -10,7 +10,6 @@ from copy import copy -from ansible.module_utils.basic import AnsibleModule from .rest_client import RestClient from ..module_utils.utils import PayloadMapper @@ -372,38 +371,3 @@ def get_external_vm_uuid(cls, vm_name: str, rest_client: RestClient) -> Any: return None return vm_hypercore_dict["uuid"] - - @classmethod - # This method is meant to be called before the attaching was done on a destination VM - # - first get vm object before attaching was done - # - then try to normally shut down the vm - # - if this fails, then force shut down the vm - def power_off_vm(cls, module: AnsibleModule, rest_client: RestClient) -> None: - vm = VM.get_by_name(module.params, rest_client, must_exist=True) - - # Make sure we don't try to shut down an already non-running VM - if vm.power_state not in ('stopped', 'crashed'): - if not module.params["force_reboot"]: # force_reboot == False - # First try a normal shutdown - try: - module.params["force_reboot"] = False # must be "False" to be able to normally shut down - vm.do_shutdown_steps(module, rest_client) - - # If normal shutdown failed, then try a force shutdown - except errors.ScaleComputingError as normal_shutdown_error: - module.params["force_reboot"] = True # must be "True" to be able to forcibly shut down - vm.vm_shutdown_forced(module, rest_client) - module.params["force_reboot"] = False # set it back to what it was initially - else: - vm.vm_shutdown_forced(module, rest_client) - - @classmethod - # This method is meant to be called after the attaching was done on a destination VM - # - first get vm object after attaching was done - # - then start that vm - def power_up_vm(cls, module: AnsibleModule, rest_client: RestClient) -> None: - # If a VM is stopped or crashed, then start it up - if module.params["reboot_destination_vm"]: - vm = VM.get_by_name(module.params, rest_client, must_exist=True) - if vm.power_state in ('stopped', 'crashed'): - vm.update_vm_power_state(module, rest_client, "start") diff --git a/plugins/modules/vm_snapshot_attach_disk.py b/plugins/modules/vm_snapshot_attach_disk.py index 6bee9da67..f15580e53 100644 --- a/plugins/modules/vm_snapshot_attach_disk.py +++ b/plugins/modules/vm_snapshot_attach_disk.py @@ -21,6 +21,7 @@ version_added: 1.2.0 extends_documentation_fragment: - scale_computing.hypercore.cluster_instance + - scale_computing.hypercore.force_reboot seealso: - module: scale_computing.hypercore.vm_snapshot_info options: @@ -113,9 +114,6 @@ from typing import Tuple, Dict, Any, Optional -# A minimum size of an IDE typed disk in HC3 API is 1 GB -MIN_TYPE_IDE_SIZE = 1000341504 # bytes - # ++++++++++++ # Must be reviewed - not sure if that's how this should work # ++++++++++++ @@ -135,6 +133,11 @@ def attach_disk( module.params["source_disk_slot"] ) # the higher the index, the newer the disk + # Get destination VM object + vm_object = VM.get_by_name(module.params, rest_client, must_exist=True) + if vm_object is None: + raise errors.ScaleComputingError("VM named '" + vm_name + "' doesn't exist.") + # =============== IMPLEMENTATION =================== vm_snapshot_hypercore = VMSnapshot.get_snapshot_by_uuid( source_snapshot_uuid, rest_client @@ -153,7 +156,6 @@ def attach_disk( # Check if slot already taken # - check if there is already a disk (vm_disk) with type (vm_type) on slot (vm_slot) # - if this slot is already taken, return no change - # --> should it be an error that tells the user that the slot is already taken instead? before_disk = VMSnapshot.get_vm_disk_info( vm_uuid=vm_uuid, slot=vm_disk_slot, @@ -168,23 +170,13 @@ def attach_disk( dict(before=before_disk, after=None), ) - # Get VM before attach - # vm = VM.get_by_name(module.params, rest_client, must_exist=True) # type: ignore - # First power off the destination VM - VMSnapshot.power_off_vm(module, rest_client) + vm_object.do_shutdown_steps(module, rest_client) # type: ignore source_disk_info = VMSnapshot.get_snapshot_disk( vm_snapshot, slot=source_disk_slot, _type=source_disk_type ) - destination_size = source_disk_info["size"] - if "ide" in vm_disk_type: - if destination_size < MIN_TYPE_IDE_SIZE: - destination_size = MIN_TYPE_IDE_SIZE - - module.log(str(destination_size)) - # build a payload according to /rest/v1/VirDomainBlockDevice/{uuid}/clone documentation payload = dict( options=dict( @@ -195,7 +187,7 @@ def attach_disk( template=dict( virDomainUUID=vm_uuid, # required type=vm_disk_type.upper(), # required - capacity=destination_size, # source_disk_info["size"], # required + capacity=source_disk_info["size"], # required chacheMode=source_disk_info["cache_mode"].upper(), slot=vm_disk_slot, disableSnapshotting=source_disk_info["disable_snapshotting"], @@ -217,7 +209,7 @@ def attach_disk( ) # Restart the previously running VM (destination) - VMSnapshot.power_up_vm(module, rest_client) + vm_object.vm_power_up(module, rest_client) # type: ignore # return changed, after, diff return ( @@ -262,33 +254,15 @@ def main() -> None: type="int", required=True, ), - - # Maybe the user will want to keep the destination VM shut down if, for example he has more tasks on - # this VM and wants to make it go quicker by keeping the VM shut down - # - keep or remove this parameter? - # - if keep, what could be a better name for it? - # - dont_reboot_destination_vm - # - keep_destination_vm_shutdown - # - keep_destination_vm_off - # - ignore_destination_vm_reboot --> False: reboot, True: no reboot - # - ...? - reboot_destination_vm=dict( - type="bool", - default=True, - ), - # These two parameters must be present in order to use the VM functions from module_utils/vm - # - see: - # - vm_snapshot -> power_off_vm - # - vm_snapshot -> power_up_vm force_reboot=dict( type="bool", default=False, ), - shutdown_timeout=dict( # make this default to 300? (300 --> 5 minutes) + shutdown_timeout=dict( type="float", - default=30, - ) + default=300, + ), ), ) try: diff --git a/tests/integration/targets/vm_snapshot/tasks/01_vm_snapshot_info.yml b/tests/integration/targets/vm_snapshot/tasks/01_vm_snapshot_info.yml index 94adf0617..c754eeb88 100644 --- a/tests/integration/targets/vm_snapshot/tasks/01_vm_snapshot_info.yml +++ b/tests/integration/targets/vm_snapshot/tasks/01_vm_snapshot_info.yml @@ -20,12 +20,6 @@ - snap-2 - not-unique block: - - include_tasks: helper_api_vm_snapshot_create.yml - vars: - vms_number: "{{ test_vms_number }}" - - # -------------------------------------------------------- - - name: List all VM snapshots - API scale_computing.hypercore.api: action: get @@ -185,10 +179,3 @@ - ansible.builtin.debug: var: vm_snapshots.records - ansible.builtin.assert: *assert-nonexistent - -# ------------- Cleanup -------------- - - - name: Remove all created VMs for this test - include_tasks: helper_api_vm_snapshot_delete_all.yml - vars: - vms_number: "{{ number_of_snapshot_testing_vms }}" diff --git a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml index 485a8405b..798b71b20 100644 --- a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml +++ b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml @@ -6,17 +6,23 @@ - name: Test vm_snapshot_attach_disk vars: - vm_1_snapshot_label_0: ana-snap vm_1_snapshot_label: snap-0 vm_1: snapshot-test-vm-1 - vm_2: snapshot-test-vm-3 + vm_2_label: "-attach" + vm_2: "snapshot-test-vm-1{{ vm_2_label }}" + slot_a: 42 slot_b: 0 + force_reboot: true # allow forced vm shutdown + block: - - include_tasks: helper_api_vm_snapshot_create.yml + - name: Create a stopped VM "{{ vm_2 }}" + include_tasks: helper_api_vm_snapshot_create.yml vars: - vms_number: "{{ test_vms_number }}" + vms_number: 1 + label: "{{ vm_2_label }}" + vm_init_state: stop # -------------------------------------------------------- @@ -42,6 +48,7 @@ # ++++++++++++ Test attach snapshot disk from one VM to another +++++++++++ # --------- Test VIRTIO_DISK to VIRTIO_DISK --------- + # Test attach when vm_2 is stopped - name: >- Attach "snap-0" from VM "{{ vm_1 }}" to VM "{{ vm_2 }}" - as VIRTIO_DISK scale_computing.hypercore.vm_snapshot_attach_disk: @@ -52,18 +59,29 @@ "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk source_disk_slot: 1 + force_reboot: "{{ force_reboot }}" register: result - ansible.builtin.debug: var: result - ansible.builtin.assert: that: - result is changed + - include_tasks: helper_check_vm_state.yml + vars: + vm_name: "{{ vm_2 }}" + expected_state: stopped - ansible.builtin.assert: &test-virtio that: - result.record.type == "virtio_disk" - result.record.disk_slot == slot_a - result.record.vm_uuid == vm_2_info.records[0].uuid + - name: Start VM "{{ vm_2 }}" + scale_computing.hypercore.vm_params: + vm_name: "{{ vm_2 }}" + power_state: start + + # Test attach when vm_2 is running - name: >- IDEMPOTENCE - Attach "snap-0" from VM "{{ vm_1 }}" to VM "{{ vm_2 }}" - as VIRTIO_DISK @@ -75,15 +93,23 @@ "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk source_disk_slot: 1 + force_reboot: "{{ force_reboot }}" register: result - ansible.builtin.debug: var: result - ansible.builtin.assert: that: - result is not changed + - include_tasks: helper_check_vm_state.yml + vars: + vm_name: "{{ vm_2 }}" + expected_state: started - ansible.builtin.assert: *test-virtio -# # --------- Test VIRTIO_DISK to SOME_OTHER_TYPE_OF_DISK --------- + # >>>>>>>>>>>>>>>>>>>> + # The rest of the tests are attaching on a running vm_2/vm_1 + # >>>>>>>>>>>>>>>>>>>> + # --------- Test VIRTIO_DISK to SOME_OTHER_TYPE_OF_DISK --------- - name: >- Attach "snap-0" from VM "{{ vm_1 }}" to VM "{{ vm_2 }}" - as NOT VIRTIO_DISK @@ -95,12 +121,17 @@ "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk source_disk_slot: 1 + force_reboot: "{{ force_reboot }}" register: result - ansible.builtin.debug: var: result - ansible.builtin.assert: that: - result is changed + - include_tasks: helper_check_vm_state.yml + vars: + vm_name: "{{ vm_2 }}" + expected_state: started - ansible.builtin.assert: &test-not-virtio that: - result.record.type == "ide_disk" @@ -118,12 +149,17 @@ "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk source_disk_slot: 1 + force_reboot: "{{ force_reboot }}" register: result - ansible.builtin.debug: var: result - ansible.builtin.assert: that: - result is not changed + - include_tasks: helper_check_vm_state.yml + vars: + vm_name: "{{ vm_2 }}" + expected_state: started - ansible.builtin.assert: *test-not-virtio # ++++++++++++ Test attach snapshot disk from a VM to itself +++++++++++++ @@ -139,12 +175,17 @@ "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk source_disk_slot: 1 + force_reboot: "{{ force_reboot }}" register: result - ansible.builtin.debug: var: result - ansible.builtin.assert: that: - result is changed + - include_tasks: helper_check_vm_state.yml + vars: + vm_name: "{{ vm_1 }}" + expected_state: started - ansible.builtin.assert: &test-virtio-2 that: - result.record.type == "virtio_disk" @@ -162,10 +203,22 @@ "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk source_disk_slot: 1 + force_reboot: "{{ force_reboot }}" register: result - ansible.builtin.debug: var: result + - include_tasks: helper_check_vm_state.yml + vars: + vm_name: "{{ vm_1 }}" + expected_state: started - ansible.builtin.assert: that: - result is not changed - ansible.builtin.assert: *test-virtio-2 + +# ---------- Cleanup ------------ + always: + - name: Remove snapshot attach testing VM "{{ vm_2 }}" + scale_computing.hypercore.vm: + vm_name: "{{ vm_2 }}" + state: absent diff --git a/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml b/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml index 8363167f5..65e7c9a9c 100644 --- a/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml +++ b/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml @@ -4,7 +4,7 @@ # ++++++++++++++++++++++++++++++ - name: Create test VMs scale_computing.hypercore.vm: - vm_name: "snapshot-test-vm-{{ item }}" + vm_name: "snapshot-test-vm-{{ item }}{{ label | default('') }}" description: Snapshot testing state: present tags: @@ -12,14 +12,14 @@ memory: "{{ '512 MB' | human_to_bytes }}" vcpu: 2 attach_guest_tools_iso: false - power_state: start + power_state: "{{ vm_init_state | default('start') }}" disks: - type: virtio_disk disk_slot: 0 - size: "{{ '0.1 GB' | human_to_bytes }}" + size: "{{ '1.1 GB' | human_to_bytes }}" - type: virtio_disk disk_slot: 1 - size: "{{ '0.2 GB' | human_to_bytes }}" + size: "{{ '1.2 GB' | human_to_bytes }}" nics: - vlan: 1 type: RTL8139 @@ -43,61 +43,63 @@ # ----------- Create/POST USER SNAPSHOTS ------------- -- name: Create 3 snapshots with "unique" label for "snapshot-test-vm-1" - scale_computing.hypercore.api: - action: post - endpoint: /rest/v1/VirDomainSnapshot - data: - domainUUID: "{{ vms_created.results.0.record.0.uuid }}" - label: "snap-{{ item }}" - with_sequence: start=0 end=2 - register: unique_labeled_snapshots +- block: + - name: Create 3 snapshots with "unique" label for "snapshot-test-vm-1" + scale_computing.hypercore.api: + action: post + endpoint: /rest/v1/VirDomainSnapshot + data: + domainUUID: "{{ vms_created.results.0.record.0.uuid }}" + label: "snap-{{ item }}" + with_sequence: start=0 end=2 + register: unique_labeled_snapshots -- name: Show created snapshots with "unique" label on "snapshot-test-vm-1" - ansible.builtin.debug: - var: unique_labeled_snapshots + - name: Show created snapshots with "unique" label on "snapshot-test-vm-1" + ansible.builtin.debug: + var: unique_labeled_snapshots -- name: Create 3 snapshots with "unique" label for "snapshot-test-vm-2" - scale_computing.hypercore.api: - action: post - endpoint: /rest/v1/VirDomainSnapshot - data: - domainUUID: "{{ vms_created.results.1.record.0.uuid }}" - label: "snap-{{ item }}" - with_sequence: start=0 end=2 - register: unique_labeled_snapshots + - name: Create 3 snapshots with "unique" label for "snapshot-test-vm-2" + scale_computing.hypercore.api: + action: post + endpoint: /rest/v1/VirDomainSnapshot + data: + domainUUID: "{{ vms_created.results.1.record.0.uuid }}" + label: "snap-{{ item }}" + with_sequence: start=0 end=2 + register: unique_labeled_snapshots -- name: Show created snapshots with "unique" label on "snapshot-test-vm-2" - ansible.builtin.debug: - var: unique_labeled_snapshots + - name: Show created snapshots with "unique" label on "snapshot-test-vm-2" + ansible.builtin.debug: + var: unique_labeled_snapshots -- name: Create 3 snapshots with "non-unique" label for "snapshot-test-vm-1" - scale_computing.hypercore.api: - action: post - endpoint: /rest/v1/VirDomainSnapshot - data: - domainUUID: "{{ vms_created.results.0.record.0.uuid }}" - label: "not-unique" - with_sequence: start=0 end=2 - register: non_unique_labeled_snapshots + - name: Create 3 snapshots with "non-unique" label for "snapshot-test-vm-1" + scale_computing.hypercore.api: + action: post + endpoint: /rest/v1/VirDomainSnapshot + data: + domainUUID: "{{ vms_created.results.0.record.0.uuid }}" + label: "not-unique" + with_sequence: start=0 end=2 + register: non_unique_labeled_snapshots -- name: Show created snapshots with "non-unique" label on "snapshot-test-vm-1" - ansible.builtin.debug: - var: non_unique_labeled_snapshots + - name: Show created snapshots with "non-unique" label on "snapshot-test-vm-1" + ansible.builtin.debug: + var: non_unique_labeled_snapshots -- name: Create 3 snapshots with "non-unique" label for "snapshot-test-vm-2" - scale_computing.hypercore.api: - action: post - endpoint: /rest/v1/VirDomainSnapshot - data: - domainUUID: "{{ vms_created.results.1.record.0.uuid }}" - label: "not-unique" - with_sequence: start=0 end=2 - register: non_unique_labeled_snapshots + - name: Create 3 snapshots with "non-unique" label for "snapshot-test-vm-2" + scale_computing.hypercore.api: + action: post + endpoint: /rest/v1/VirDomainSnapshot + data: + domainUUID: "{{ vms_created.results.1.record.0.uuid }}" + label: "not-unique" + with_sequence: start=0 end=2 + register: non_unique_labeled_snapshots -- name: Show created snapshots with "non-unique" label on "snapshot-test-vm-2" - ansible.builtin.debug: - var: non_unique_labeled_snapshots + - name: Show created snapshots with "non-unique" label on "snapshot-test-vm-2" + ansible.builtin.debug: + var: non_unique_labeled_snapshots + when: label is undefined # These snapshot serials are always the same, everytime they are freshly created # ++++++++++++++++++++++++++ diff --git a/tests/integration/targets/vm_snapshot/tasks/helper_check_vm_state.yml b/tests/integration/targets/vm_snapshot/tasks/helper_check_vm_state.yml new file mode 100644 index 000000000..eb1454ae3 --- /dev/null +++ b/tests/integration/targets/vm_snapshot/tasks/helper_check_vm_state.yml @@ -0,0 +1,10 @@ +--- +- name: Get VM "{{ vm_name }}" info + scale_computing.hypercore.vm_info: + vm_name: "{{ vm_name }}" + register: vm_info + +- name: Is VM "{{ vm_name }}" power_state == "{{ expected_state }}" ? + ansible.builtin.assert: + that: + - vm_info.records[0].power_state == expected_state diff --git a/tests/integration/targets/vm_snapshot/tasks/main.yml b/tests/integration/targets/vm_snapshot/tasks/main.yml index eb97a46a8..83c5eaeb3 100644 --- a/tests/integration/targets/vm_snapshot/tasks/main.yml +++ b/tests/integration/targets/vm_snapshot/tasks/main.yml @@ -6,7 +6,7 @@ SC_TIMEOUT: "{{ sc_timeout }}" vars: - number_of_snapshot_testing_vms: 3 + number_of_snapshot_testing_vms: 2 non_unique_snapshot_label: not_unique # unique snapshot labels are strings like: # snap-x, where x is an iterative number @@ -14,6 +14,11 @@ # greater than 0, is a newer snapshot block: + - name: Create VMs + include_tasks: helper_api_vm_snapshot_create.yml + vars: + vms_number: "{{ number_of_snapshot_testing_vms }}" + - include_tasks: 01_vm_snapshot_info.yml vars: test_vms_number: "{{ number_of_snapshot_testing_vms }}" diff --git a/tests/unit/plugins/modules/test_vm_snapshot_attach_disk.py b/tests/unit/plugins/modules/test_vm_snapshot_attach_disk.py index 6dce89baa..e0bcb9719 100644 --- a/tests/unit/plugins/modules/test_vm_snapshot_attach_disk.py +++ b/tests/unit/plugins/modules/test_vm_snapshot_attach_disk.py @@ -13,6 +13,7 @@ import pytest from ansible_collections.scale_computing.hypercore.plugins.module_utils.vm_snapshot import ( + VM, VMSnapshot, ) @@ -49,6 +50,8 @@ source_snapshot_uuid="snapshot-uuid", source_disk_type="virtio_disk", source_disk_slot=0, + force_reboot=True, + shutdown_timeout=10, ) @@ -89,6 +92,64 @@ def setup_method(self): replication=True, ) + self.destination_vm_object = VM( + attach_guest_tools_iso=False, + boot_devices=[], + description="desc", + disks=[], + memory=42, + name="vm-destination", + nics=[], + vcpu=2, + operating_system=None, + power_state="stopped", + tags=["XLAB-test-tag1", "XLAB-test-tag2"], + uuid="id", + node_uuid="node_id", + node_affinity={ + "strict_affinity": False, + "preferred_node": dict( + node_uuid="", + backplane_ip="", + lan_ip="", + peer_id=None, + ), + "backup_node": dict( + node_uuid="", + backplane_ip="", + lan_ip="", + peer_id=None, + ), + }, + snapshot_schedule="", + machine_type="BIOS", + ) + + self.vm_destination_hypercore_dict = dict( + uuid="id", + nodeUUID="node_id", + name="vm-destination", + tags="XLAB-test-tag1,XLAB-test-tag2", + description="desc", + mem=42, + state="RUNNING", + numVCPU=2, + netDevs=[], + blockDevs=[], + bootDevices=[], + attachGuestToolsISO=False, + operatingSystem=None, + affinityStrategy={ + "strictAffinity": False, + "preferredNodeUUID": "", + "backupNodeUUID": "", + }, + snapshotScheduleUUID="snapshot_schedule_id", + machineType="scale-7.2", + sourceVirDomainUUID="64c9b3a1-3eab-4d16-994f-177bed274f84", + snapUUIDs=[], + ) + self.magic = mock.MagicMock() @pytest.mark.parametrize( @@ -117,6 +178,13 @@ def test_attach_disk_is_change( "createdUUID": "new-block-uuid", } + # Mock the destination VM object + mocker.patch( + "ansible_collections.scale_computing.hypercore.plugins.module_utils.vm.VM.from_hypercore" + ).return_value = self.destination_vm_object + rest_client.get_record.return_value = self.vm_destination_hypercore_dict + + # -------- Test attaching -------- mocker.patch( "ansible_collections.scale_computing.hypercore.plugins.module_utils.vm_snapshot.VMSnapshot.get_snapshot_by_uuid" ).return_value = self.vm_snapshot @@ -157,9 +225,12 @@ def test_attach_disk_is_change( changed, record, diff = vm_snapshot_attach_disk.attach_disk(module, rest_client) if destination_vm_disk_info is None: - rest_client.create_record.assert_called_once_with(**called_with_dict) + rest_client.create_record.assert_any_call(**called_with_dict) else: - rest_client.create_record.assert_not_called() + assert ( + mock.call(**called_with_dict) + not in rest_client.create_record.mock_calls + ) assert changed == expected_return[0] assert record == expected_return[1] From 1c1cbbc5ade0714ffc87a51fd3c17cfc8c322a5d Mon Sep 17 00:00:00 2001 From: Ana Zobec Date: Tue, 16 May 2023 12:31:12 +0200 Subject: [PATCH 03/11] Remove already resolved comment. --- plugins/modules/vm_snapshot_attach_disk.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/plugins/modules/vm_snapshot_attach_disk.py b/plugins/modules/vm_snapshot_attach_disk.py index f15580e53..18cd49f70 100644 --- a/plugins/modules/vm_snapshot_attach_disk.py +++ b/plugins/modules/vm_snapshot_attach_disk.py @@ -114,9 +114,6 @@ from typing import Tuple, Dict, Any, Optional -# ++++++++++++ -# Must be reviewed - not sure if that's how this should work -# ++++++++++++ def attach_disk( module: AnsibleModule, rest_client: RestClient ) -> Tuple[bool, Optional[Dict[Any, Any]], TypedDiff]: From 6bca66f0bf8de8325d45252d90907c8d72a95f46 Mon Sep 17 00:00:00 2001 From: Ana Zobec Date: Tue, 16 May 2023 12:45:03 +0200 Subject: [PATCH 04/11] Remove unneeded note from vm_snapshot_attach_disk documentation. --- plugins/modules/vm_snapshot_attach_disk.py | 1 - .../targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml | 4 ---- 2 files changed, 5 deletions(-) diff --git a/plugins/modules/vm_snapshot_attach_disk.py b/plugins/modules/vm_snapshot_attach_disk.py index 18cd49f70..05bdb9524 100644 --- a/plugins/modules/vm_snapshot_attach_disk.py +++ b/plugins/modules/vm_snapshot_attach_disk.py @@ -57,7 +57,6 @@ required: True notes: - C(check_mode) is not supported - - The VM to which the user is trying to attach the snapshot disk, B(must not) be running. """ diff --git a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml index 798b71b20..f1d344c35 100644 --- a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml +++ b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml @@ -1,8 +1,4 @@ --- -# ============================== -# The VM to which we are trying to attach the -# snapshot disk to MUST NOT be running! -# ============================== - name: Test vm_snapshot_attach_disk vars: From 092a2ba0c772478c6b68d514902237ee7b93a57a Mon Sep 17 00:00:00 2001 From: Justin Cinkelj Date: Tue, 16 May 2023 13:16:39 +0200 Subject: [PATCH 05/11] Document user is responsible to select next free slot on specific disk bus Otherwise disk is attached, but VM is not bootable. Signed-off-by: Justin Cinkelj --- plugins/modules/vm_snapshot_attach_disk.py | 2 ++ .../targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/plugins/modules/vm_snapshot_attach_disk.py b/plugins/modules/vm_snapshot_attach_disk.py index 05bdb9524..fbf55e9e6 100644 --- a/plugins/modules/vm_snapshot_attach_disk.py +++ b/plugins/modules/vm_snapshot_attach_disk.py @@ -39,6 +39,8 @@ type: int description: - Specify a disk slot from a vm to identify destination disk. + - Note that this MUST be a next free slot or an already used slot for the given disk_type. + Otherwise VM might not boot. required: True source_snapshot_uuid: type: str diff --git a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml index f1d344c35..f8896ee90 100644 --- a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml +++ b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml @@ -7,8 +7,9 @@ vm_2_label: "-attach" vm_2: "snapshot-test-vm-1{{ vm_2_label }}" - slot_a: 42 - slot_b: 0 + # We must use next free slot on bus if we want VM to be bootable! + slot_a: 3 # virtio_disk + slot_b: 0 # ide_disk force_reboot: true # allow forced vm shutdown From e7fe42969aad1ea3c159230863f7297e7e953416 Mon Sep 17 00:00:00 2001 From: Justin Cinkelj Date: Tue, 16 May 2023 14:06:33 +0200 Subject: [PATCH 06/11] Update module might reboot VM Signed-off-by: Justin Cinkelj --- plugins/modules/vm_snapshot_attach_disk.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/modules/vm_snapshot_attach_disk.py b/plugins/modules/vm_snapshot_attach_disk.py index fbf55e9e6..326171cf7 100644 --- a/plugins/modules/vm_snapshot_attach_disk.py +++ b/plugins/modules/vm_snapshot_attach_disk.py @@ -59,6 +59,7 @@ required: True notes: - C(check_mode) is not supported + - The VM will be rebooted if it is running. """ From 9b501c931201bfc5a8810979dd1c9df44e77e29b Mon Sep 17 00:00:00 2001 From: Justin Cinkelj Date: Tue, 16 May 2023 13:17:06 +0200 Subject: [PATCH 07/11] CI faster testing Signed-off-by: Justin Cinkelj --- .../targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml index f8896ee90..2236cac24 100644 --- a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml +++ b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml @@ -119,6 +119,7 @@ source_disk_type: virtio_disk source_disk_slot: 1 force_reboot: "{{ force_reboot }}" + shutdown_timeout: 10 # For faster testing. VM has no OS, so it cannot react to ACPI shutdown. register: result - ansible.builtin.debug: var: result From 37c4c65eb2e64ae4d745833682f2ab3e275179f1 Mon Sep 17 00:00:00 2001 From: Justin Cinkelj Date: Tue, 16 May 2023 13:33:22 +0200 Subject: [PATCH 08/11] Comment about test side effect Signed-off-by: Justin Cinkelj --- .../targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml index 2236cac24..5c1cb4bf6 100644 --- a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml +++ b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml @@ -161,6 +161,9 @@ - ansible.builtin.assert: *test-not-virtio # ++++++++++++ Test attach snapshot disk from a VM to itself +++++++++++++ + # This does change vm_1 (snapshot-test-vm-1), because of this whole 03_vm_snapshot_attach_disk.yml + # requires snapshot-test-vm-1 to be deleted and recreated each time. + # Refactor test if you do not like this. - name: >- Attach "snap-0" from VM "{{ vm_1 }}" From 13ae756b27dfd5dc6597674114acedd9f6b35294 Mon Sep 17 00:00:00 2001 From: Justin Cinkelj Date: Tue, 16 May 2023 13:34:40 +0200 Subject: [PATCH 09/11] Fix test for attaching snapshoted disk to the same VM Signed-off-by: Justin Cinkelj --- .../vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml index 5c1cb4bf6..b1b74eb96 100644 --- a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml +++ b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml @@ -8,8 +8,9 @@ vm_2: "snapshot-test-vm-1{{ vm_2_label }}" # We must use next free slot on bus if we want VM to be bootable! - slot_a: 3 # virtio_disk - slot_b: 0 # ide_disk + slot_vm_1_virtio: 2 + slot_a: 3 # vm_2, virtio_disk + slot_b: 0 # vm_2, ide_disk force_reboot: true # allow forced vm shutdown @@ -171,7 +172,7 @@ scale_computing.hypercore.vm_snapshot_attach_disk: vm_name: "{{ vm_1 }}" vm_disk_type: virtio_disk - vm_disk_slot: "{{ slot_a }}" + vm_disk_slot: "{{ slot_vm_1_virtio }}" source_snapshot_uuid: "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk @@ -190,7 +191,7 @@ - ansible.builtin.assert: &test-virtio-2 that: - result.record.type == "virtio_disk" - - result.record.disk_slot == slot_a + - result.record.disk_slot == slot_vm_1_virtio - result.record.vm_uuid == vm_1_info.records[0].uuid - name: >- @@ -199,7 +200,7 @@ scale_computing.hypercore.vm_snapshot_attach_disk: vm_name: "{{ vm_1 }}" vm_disk_type: virtio_disk - vm_disk_slot: "{{ slot_a }}" + vm_disk_slot: "{{ slot_vm_1_virtio }}" source_snapshot_uuid: "{{ vm_1_snapshot_info.records[0].snapshot_uuid }}" source_disk_type: virtio_disk From fb9659c931ab8ba81aeb9f9c7b54a74fd54585a9 Mon Sep 17 00:00:00 2001 From: Justin Cinkelj Date: Tue, 16 May 2023 13:50:26 +0200 Subject: [PATCH 10/11] The destination VM can be created by vm module The destination VM does not need snapshots. Signed-off-by: Justin Cinkelj --- .../tasks/03_vm_snapshot_attach_disk.yml | 29 +++++++++++++++---- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml index b1b74eb96..cb68f4ef5 100644 --- a/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml +++ b/tests/integration/targets/vm_snapshot/tasks/03_vm_snapshot_attach_disk.yml @@ -16,11 +16,30 @@ block: - name: Create a stopped VM "{{ vm_2 }}" - include_tasks: helper_api_vm_snapshot_create.yml - vars: - vms_number: 1 - label: "{{ vm_2_label }}" - vm_init_state: stop + scale_computing.hypercore.vm: + vm_name: "{{ vm_2 }}" + description: Snapshot testing + state: present + tags: + - Xlab_snapshot_testing + memory: "{{ '512 MB' | human_to_bytes }}" + vcpu: 2 + attach_guest_tools_iso: false + power_state: stop + disks: + - type: virtio_disk + disk_slot: 0 + size: "{{ '10.1 GB' | human_to_bytes }}" + - type: virtio_disk + disk_slot: 1 + size: "{{ '10.2 GB' | human_to_bytes }}" + nics: [] + boot_devices: + - type: virtio_disk + disk_slot: 0 + - type: ide_cdrom + disk_slot: 0 + machine_type: BIOS # -------------------------------------------------------- From a2930577af170756ad496624ebf8806e111d9f2e Mon Sep 17 00:00:00 2001 From: Justin Cinkelj Date: Tue, 16 May 2023 13:58:18 +0200 Subject: [PATCH 11/11] Revert some changes in helper_api_vm_snapshot_create.yml But having those VMs in running state is a good idea. It makes things harder. Signed-off-by: Justin Cinkelj --- .../tasks/helper_api_vm_snapshot_create.yml | 106 +++++++++--------- 1 file changed, 52 insertions(+), 54 deletions(-) diff --git a/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml b/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml index 65e7c9a9c..8363167f5 100644 --- a/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml +++ b/tests/integration/targets/vm_snapshot/tasks/helper_api_vm_snapshot_create.yml @@ -4,7 +4,7 @@ # ++++++++++++++++++++++++++++++ - name: Create test VMs scale_computing.hypercore.vm: - vm_name: "snapshot-test-vm-{{ item }}{{ label | default('') }}" + vm_name: "snapshot-test-vm-{{ item }}" description: Snapshot testing state: present tags: @@ -12,14 +12,14 @@ memory: "{{ '512 MB' | human_to_bytes }}" vcpu: 2 attach_guest_tools_iso: false - power_state: "{{ vm_init_state | default('start') }}" + power_state: start disks: - type: virtio_disk disk_slot: 0 - size: "{{ '1.1 GB' | human_to_bytes }}" + size: "{{ '0.1 GB' | human_to_bytes }}" - type: virtio_disk disk_slot: 1 - size: "{{ '1.2 GB' | human_to_bytes }}" + size: "{{ '0.2 GB' | human_to_bytes }}" nics: - vlan: 1 type: RTL8139 @@ -43,63 +43,61 @@ # ----------- Create/POST USER SNAPSHOTS ------------- -- block: - - name: Create 3 snapshots with "unique" label for "snapshot-test-vm-1" - scale_computing.hypercore.api: - action: post - endpoint: /rest/v1/VirDomainSnapshot - data: - domainUUID: "{{ vms_created.results.0.record.0.uuid }}" - label: "snap-{{ item }}" - with_sequence: start=0 end=2 - register: unique_labeled_snapshots +- name: Create 3 snapshots with "unique" label for "snapshot-test-vm-1" + scale_computing.hypercore.api: + action: post + endpoint: /rest/v1/VirDomainSnapshot + data: + domainUUID: "{{ vms_created.results.0.record.0.uuid }}" + label: "snap-{{ item }}" + with_sequence: start=0 end=2 + register: unique_labeled_snapshots - - name: Show created snapshots with "unique" label on "snapshot-test-vm-1" - ansible.builtin.debug: - var: unique_labeled_snapshots +- name: Show created snapshots with "unique" label on "snapshot-test-vm-1" + ansible.builtin.debug: + var: unique_labeled_snapshots - - name: Create 3 snapshots with "unique" label for "snapshot-test-vm-2" - scale_computing.hypercore.api: - action: post - endpoint: /rest/v1/VirDomainSnapshot - data: - domainUUID: "{{ vms_created.results.1.record.0.uuid }}" - label: "snap-{{ item }}" - with_sequence: start=0 end=2 - register: unique_labeled_snapshots +- name: Create 3 snapshots with "unique" label for "snapshot-test-vm-2" + scale_computing.hypercore.api: + action: post + endpoint: /rest/v1/VirDomainSnapshot + data: + domainUUID: "{{ vms_created.results.1.record.0.uuid }}" + label: "snap-{{ item }}" + with_sequence: start=0 end=2 + register: unique_labeled_snapshots - - name: Show created snapshots with "unique" label on "snapshot-test-vm-2" - ansible.builtin.debug: - var: unique_labeled_snapshots +- name: Show created snapshots with "unique" label on "snapshot-test-vm-2" + ansible.builtin.debug: + var: unique_labeled_snapshots - - name: Create 3 snapshots with "non-unique" label for "snapshot-test-vm-1" - scale_computing.hypercore.api: - action: post - endpoint: /rest/v1/VirDomainSnapshot - data: - domainUUID: "{{ vms_created.results.0.record.0.uuid }}" - label: "not-unique" - with_sequence: start=0 end=2 - register: non_unique_labeled_snapshots +- name: Create 3 snapshots with "non-unique" label for "snapshot-test-vm-1" + scale_computing.hypercore.api: + action: post + endpoint: /rest/v1/VirDomainSnapshot + data: + domainUUID: "{{ vms_created.results.0.record.0.uuid }}" + label: "not-unique" + with_sequence: start=0 end=2 + register: non_unique_labeled_snapshots - - name: Show created snapshots with "non-unique" label on "snapshot-test-vm-1" - ansible.builtin.debug: - var: non_unique_labeled_snapshots +- name: Show created snapshots with "non-unique" label on "snapshot-test-vm-1" + ansible.builtin.debug: + var: non_unique_labeled_snapshots - - name: Create 3 snapshots with "non-unique" label for "snapshot-test-vm-2" - scale_computing.hypercore.api: - action: post - endpoint: /rest/v1/VirDomainSnapshot - data: - domainUUID: "{{ vms_created.results.1.record.0.uuid }}" - label: "not-unique" - with_sequence: start=0 end=2 - register: non_unique_labeled_snapshots +- name: Create 3 snapshots with "non-unique" label for "snapshot-test-vm-2" + scale_computing.hypercore.api: + action: post + endpoint: /rest/v1/VirDomainSnapshot + data: + domainUUID: "{{ vms_created.results.1.record.0.uuid }}" + label: "not-unique" + with_sequence: start=0 end=2 + register: non_unique_labeled_snapshots - - name: Show created snapshots with "non-unique" label on "snapshot-test-vm-2" - ansible.builtin.debug: - var: non_unique_labeled_snapshots - when: label is undefined +- name: Show created snapshots with "non-unique" label on "snapshot-test-vm-2" + ansible.builtin.debug: + var: non_unique_labeled_snapshots # These snapshot serials are always the same, everytime they are freshly created # ++++++++++++++++++++++++++