diff --git a/.ansible-lint b/.ansible-lint index e31b2a46c..265a1ec15 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -6,5 +6,6 @@ exclude_paths: - tests - examples/cloud-init-user-data-example.yml - examples/hypercore_inventory.yml + - examples/vm_os_upgrade/main.yml - .github/workflows - changelogs diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml index dda6c6d1a..18f84a306 100644 --- a/.github/workflows/ansible-test.yml +++ b/.github/workflows/ansible-test.yml @@ -27,7 +27,7 @@ jobs: - name: Perform sanity testing with ansible-test uses: ansible-community/ansible-test-gh-action@release/v1 with: - ansible-core-version: stable-2.14 + ansible-core-version: stable-2.15 target-python-version: 3.11 testing-type: sanity diff --git a/.github/workflows/ci-test.yml b/.github/workflows/ci-test.yml index 3e2709d14..561b057a1 100644 --- a/.github/workflows/ci-test.yml +++ b/.github/workflows/ci-test.yml @@ -5,11 +5,12 @@ env: # ansible-test needs special directory structure. # WORKDIR is a subdir of GITHUB_WORKSPACE WORKDIR: work-dir/ansible_collections/scale_computing/hypercore + LANG: C.UTF-8 jobs: mypy: name: Type checks (mypy) runs-on: [ubuntu-latest] - container: quay.io/justinc1_github/scale_ci_integ:3 + container: quay.io/justinc1_github/scale_ci_integ:7 defaults: run: working-directory: ${{ env.WORKDIR }} @@ -26,7 +27,7 @@ jobs: docs: runs-on: [ubuntu-latest] - container: quay.io/justinc1_github/scale_ci_integ:3 + container: quay.io/justinc1_github/scale_ci_integ:7 defaults: run: working-directory: ${{ env.WORKDIR }} @@ -47,7 +48,7 @@ jobs: sanity-test: runs-on: [ubuntu-latest] - container: quay.io/justinc1_github/scale_ci_integ:3 + container: quay.io/justinc1_github/scale_ci_integ:7 defaults: run: working-directory: ${{ env.WORKDIR }} @@ -59,19 +60,16 @@ jobs: # Same as "make sanity" # TODO reuse Makefile - run: black -t py38 --check --diff --color plugins tests/unit - - run: ansible-lint - run: flake8 --exclude tests/output/ - run: ansible-test sanity --local --python 3.10 - # Running `ansible-lint` will look at examples as being arbitrary yaml files. - # It will complain about missing space after hash in "#xy" comment. - # Running `ansible-lint examples/*` will interpret files as ansible playbooks. - # Here we need also installed collections used in examples. + # We need to install collections used in examples, + # ansible-lint knows those files are playbooks. - run: ansible-galaxy collection install community.crypto - - run: ansible-lint examples/* + - run: ansible-lint units-test: runs-on: [ubuntu-latest] - container: quay.io/justinc1_github/scale_ci_integ:3 + container: quay.io/justinc1_github/scale_ci_integ:7 defaults: run: working-directory: ${{ env.WORKDIR }} diff --git a/.github/workflows/files/ansible-test-units-matrix.yml b/.github/workflows/files/ansible-test-units-matrix.yml index caf7f59d3..0b0b30a3c 100644 --- a/.github/workflows/files/ansible-test-units-matrix.yml +++ b/.github/workflows/files/ansible-test-units-matrix.yml @@ -32,3 +32,10 @@ python: "3.10" - ansible: "stable-2.14" python: "3.11" + +- ansible: "stable-2.15" + python: "3.9" +- ansible: "stable-2.15" + python: "3.10" +- ansible: "stable-2.15" + python: "3.11" diff --git a/.github/workflows/integ-test.yml b/.github/workflows/integ-test.yml index ccaeda174..cb96851ef 100644 --- a/.github/workflows/integ-test.yml +++ b/.github/workflows/integ-test.yml @@ -49,7 +49,7 @@ jobs: # to delay integ-test until integration-prepare-env finishes. integration-prepare-env: runs-on: [self-hosted2] - container: quay.io/justinc1_github/scale_ci_integ:3 + container: quay.io/justinc1_github/scale_ci_integ:7 env: ANSIBLE_COLLECTIONS_PATH: $GITHUB_WORKSPACE/work-dir defaults: @@ -83,7 +83,7 @@ jobs: integ-matrix: runs-on: [ubuntu-latest] - container: quay.io/justinc1_github/scale_ci_integ:3 + container: quay.io/justinc1_github/scale_ci_integ:7 defaults: run: working-directory: ${{ env.WORKDIR }} @@ -113,7 +113,7 @@ jobs: examples-matrix: runs-on: [ ubuntu-latest ] - container: quay.io/justinc1_github/scale_ci_integ:3 + container: quay.io/justinc1_github/scale_ci_integ:7 defaults: run: working-directory: ${{ env.WORKDIR }} @@ -153,7 +153,7 @@ jobs: - integ-seq if: "always() && (needs.examples-matrix.result=='success')" runs-on: [ self-hosted2 ] - container: quay.io/justinc1_github/scale_ci_integ:3 + container: quay.io/justinc1_github/scale_ci_integ:7 env: ANSIBLE_COLLECTIONS_PATH: $GITHUB_WORKSPACE/work-dir DEBIAN_FRONTEND: noninteractive @@ -163,7 +163,7 @@ jobs: strategy: fail-fast: false matrix: - ansible: [ 2.13.0 ] + ansible: [ 2.15.0 ] example_name: ${{ fromJson(needs.examples-matrix.outputs.matrix) }} steps: - name: Checkout @@ -200,7 +200,7 @@ jobs: - integ-matrix # - units-test runs-on: [self-hosted2] - container: quay.io/justinc1_github/scale_ci_integ:3 + container: quay.io/justinc1_github/scale_ci_integ:7 env: DEBIAN_FRONTEND: noninteractive defaults: @@ -209,7 +209,7 @@ jobs: strategy: fail-fast: false matrix: - # ansible: [2.13.0] + # ansible: [2.15.0] # python: [3.11] # test_name: [user_info] test_name: ${{ fromJson(needs.integ-matrix.outputs.matrix) }} @@ -247,7 +247,7 @@ jobs: uses: actions/checkout@v3 with: path: ${{ env.WORKDIR }} - - run: pip install ansible-core~=2.13.0 + - run: pip install ansible-core~=2.15.0 # We have ansible.cfg "for testing" in git repo # (it is excluded in galaxy.yml, so it is not part of collection artifact) # But it does affect ansible-galaxy and ansible-test commands. @@ -333,7 +333,7 @@ jobs: - integ - integ-seq runs-on: [self-hosted2] - container: quay.io/justinc1_github/scale_ci_integ:3 + container: quay.io/justinc1_github/scale_ci_integ:7 env: ANSIBLE_COLLECTIONS_PATH: $GITHUB_WORKSPACE/work-dir defaults: @@ -344,7 +344,7 @@ jobs: uses: actions/checkout@v3 with: path: ${{ env.WORKDIR }} - - run: pip install ansible-core~=2.13.0 + - run: pip install ansible-core~=2.15.0 # ${{ env.WORKDIR }} cannot be used - uses: ./work-dir/ansible_collections/scale_computing/hypercore/.github/actions/make-integ-config with: @@ -362,7 +362,7 @@ jobs: - integ - integ-seq runs-on: [self-hosted2] - container: quay.io/justinc1_github/scale_ci_integ:3 + container: quay.io/justinc1_github/scale_ci_integ:7 defaults: run: working-directory: ${{ env.WORKDIR }} diff --git a/.github/workflows/static.yml b/.github/workflows/static.yml index 569d77023..7f47ca831 100644 --- a/.github/workflows/static.yml +++ b/.github/workflows/static.yml @@ -24,7 +24,7 @@ concurrency: jobs: # Single deploy job since we're just deploying deploy: - container: quay.io/justinc1_github/scale_ci_integ:3 + container: quay.io/justinc1_github/scale_ci_integ:7 environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} diff --git a/ci-infra/docker-image/Dockerfile b/ci-infra/docker-image/Dockerfile index ccba219ad..72fd5cf77 100644 --- a/ci-infra/docker-image/Dockerfile +++ b/ci-infra/docker-image/Dockerfile @@ -1,7 +1,7 @@ # Usage: build new image locally, push it to gcr, use it in CI. and # we use 'pip install name~=1.2.0' to install latest 1.2.x release -ARG ANSIBLE_CORE_VERSION=2.13.0 +ARG ANSIBLE_CORE_VERSION=2.15.0 # ======================================================================= FROM python:3.10-slim-buster diff --git a/ci-infra/docker-image/build.sh b/ci-infra/docker-image/build.sh index 4bf6e052e..86db51e31 100755 --- a/ci-infra/docker-image/build.sh +++ b/ci-infra/docker-image/build.sh @@ -11,7 +11,7 @@ set -eux # Where to push images DOCKER_REGISTRY_REPO=quay.io/justinc1_github/scale_ci_integ # Tag to push -DOCKER_IMAGE_TAG=3 +DOCKER_IMAGE_TAG=7 DOCKER_CACHE="${DOCKER_CACHE:-n}" if [ "$DOCKER_CACHE" == "n" ] diff --git a/docs/rst/extra/deprecation.rst b/docs/rst/extra/deprecation.rst index 5045ec21b..b99f913f3 100644 --- a/docs/rst/extra/deprecation.rst +++ b/docs/rst/extra/deprecation.rst @@ -57,6 +57,17 @@ For ``scale_computing.hypercore.iso`` module: ansible.builtin.debug: msg: The uploaded_iso size={{ uploaded_iso.record.size }} +Release 1.3.0 +============= + +Role parameters were renamed to start with ``role_name_`` prefix. +For example, role `scale_computing.hypercore.version_update_single_node <../collections/scale_computing/hypercore/version_update_single_node_role.html>`_: + +* ``scale_computing_hypercore_desired_version`` name was used before. +* ``version_update_single_node_desired_version`` name is used now. + +Old names are still valid, but will be removed in future release 3.0.0. + Release 3.0.0 (not yet released) ================================ diff --git a/examples/cluster_config.yml b/examples/cluster_config.yml index 9cd018aef..5a1f8da72 100644 --- a/examples/cluster_config.yml +++ b/examples/cluster_config.yml @@ -50,4 +50,4 @@ ansible.builtin.include_role: name: scale_computing.hypercore.cluster_config vars: - scale_computing_hypercore_cluster_config: "{{ cluster_configuration }}" + cluster_config_configuration: "{{ cluster_configuration }}" diff --git a/examples/iso.yml b/examples/iso.yml index d0c04d481..9ee882951 100644 --- a/examples/iso.yml +++ b/examples/iso.yml @@ -10,13 +10,13 @@ tasks: # ------------------------------------------------------ - - name: Download ISO {{ iso_filename }} from URL + - name: Download ISO from URL - {{ iso_filename }} ansible.builtin.get_url: url: "{{ iso_url }}" dest: /tmp/{{ iso_filename }} - mode: 0644 + mode: "0644" - - name: (Optionally) remove existing ISO {{ iso_filename }} from HyperCore + - name: (Optionally) remove existing ISO from HyperCore - {{ iso_filename }} scale_computing.hypercore.iso: name: "{{ iso_filename }}" source: /tmp/{{ iso_filename }} @@ -24,7 +24,7 @@ when: iso_remove_old_image # ------------------------------------------------------ - - name: Upload ISO {{ iso_filename }} to HyperCore + - name: Upload ISO to HyperCore - {{ iso_filename }} scale_computing.hypercore.iso: name: "{{ iso_filename }}" source: /tmp/{{ iso_filename }} @@ -32,7 +32,7 @@ register: uploaded_iso # Use iso module output, old syntax, valid until release < 3.0.0 - - name: Show upload result for ISO {{ iso_filename }} - deprecated syntax + - name: Show upload result for ISO - deprecated syntax - {{ iso_filename }} ansible.builtin.debug: msg: The uploaded_iso size={{ uploaded_iso.results.0.size }} - deprecated syntax @@ -47,6 +47,6 @@ name: "{{ iso_filename }}" register: iso_results - - name: Show info about {{ iso_filename }} ISO + - name: Show info about ISO {{ iso_filename }} ansible.builtin.debug: var: iso_results.records[0] diff --git a/examples/shutdown_restart_tagged_vms.yml b/examples/shutdown_restart_tagged_vms.yml index a5f82caf4..e03dd84ac 100644 --- a/examples/shutdown_restart_tagged_vms.yml +++ b/examples/shutdown_restart_tagged_vms.yml @@ -63,8 +63,8 @@ name: scale_computing.hypercore.version_update_single_node tasks_from: shutdown_vms vars: - scale_computing_hypercore_shutdown_vms: "{{ vm_desired_state }}" - scale_computing_hypercore_shutdown_tags: "{{ vm_shutdown_tags }}" + version_update_single_node_shutdown_vms: "{{ vm_desired_state }}" + version_update_single_node_shutdown_tags: "{{ vm_shutdown_tags }}" - name: List tagged VMs power-state after shutdown block: *list-tagged-vms @@ -75,7 +75,7 @@ name: scale_computing.hypercore.version_update_single_node tasks_from: restart_vms.yml vars: - scale_computing_hypercore_restart_vms: "{{ vm_desired_state }}" + version_update_single_node_restart_vms: "{{ vm_desired_state }}" - name: List tagged VMs power-state after restart block: *list-tagged-vms diff --git a/examples/version_update_single_node.yml b/examples/version_update_single_node.yml index 83dc68ff0..b23a02b35 100644 --- a/examples/version_update_single_node.yml +++ b/examples/version_update_single_node.yml @@ -28,11 +28,11 @@ ansible.builtin.include_role: name: scale_computing.hypercore.check_local_time vars: - time_zone: "{{ time_zone_info.record.zone }}" - time_interval: 22:00-6:00 + check_local_time_time_zone: "{{ time_zone_info.record.zone }}" + check_local_time_time_interval: 22:00-6:00 - name: Update HyperCore single-node system to a desired version ansible.builtin.include_role: name: scale_computing.hypercore.version_update_single_node vars: - scale_computing_hypercore_desired_version: "{{ desired_version }}" + version_update_single_node_desired_version: "{{ desired_version }}" diff --git a/examples/virtual_disk.yml b/examples/virtual_disk.yml index b8a92e83d..6c0382b82 100644 --- a/examples/virtual_disk.yml +++ b/examples/virtual_disk.yml @@ -11,13 +11,13 @@ tasks: # ------------------------------------------------------ - - name: Download Virtual Disk {{ image_filename }} from URL + - name: Download Virtual Disk from URL - {{ image_filename }} ansible.builtin.get_url: # TODO: what if file doesn't download completely? url: "{{ image_url }}" dest: /tmp/{{ image_filename }} - mode: 0644 + mode: "0644" - - name: (Optionally) remove existing Virtual Disk {{ image_filename }} from HyperCore + - name: (Optionally) remove existing Virtual Disk from HyperCore - {{ image_filename }} when: image_remove_old | bool scale_computing.hypercore.virtual_disk: name: "{{ item }}" @@ -27,7 +27,7 @@ - "{{ image_filename }}" # ------------------------------------------------------ - - name: Upload Virtual Disk {{ image_filename }} to HyperCore + - name: Upload Virtual Disk to HyperCore - {{ image_filename }} scale_computing.hypercore.virtual_disk: source: /tmp/{{ image_filename }} name: "{{ image_filename }}" diff --git a/examples/virtual_disk_attach.yml b/examples/virtual_disk_attach.yml index 019864eb2..fb50a653f 100644 --- a/examples/virtual_disk_attach.yml +++ b/examples/virtual_disk_attach.yml @@ -29,7 +29,7 @@ - virtual_disk_info_result.records | length == 1 # ------------------------------------------------------ - - name: Attach virtual disk {{ virtual_disk_name }} to VM {{ vm_name }} {{ vm_disk_type }}:{{ vm_disk_slot }} + - name: Attach virtual disk to VM - {{ vm_name + ", " + virtual_disk_name + " " + vm_disk_type + ":" + vm_disk_slot }} scale_computing.hypercore.virtual_disk_attach: name: "{{ virtual_disk_name }}" vm_name: "{{ vm_name }}" diff --git a/examples/vm.yml b/examples/vm.yml index e53b732aa..3eaef1192 100644 --- a/examples/vm.yml +++ b/examples/vm.yml @@ -24,19 +24,19 @@ operating_system: os_other register: vm_result - - name: Show the info about {{ vm_name }} VM + - name: Show the info about VM {{ vm_name }} ansible.builtin.debug: var: vm_result # Use vm module output, syntax valid until release < 3.0.0 - - name: Show VM {{ vm_name }} vCPU count - syntax valid until release < 3.0.0 + - name: Show VM vCPU count - syntax valid until release < 3.0.0 ansible.builtin.debug: msg: >- VM {{ vm_name }} has {{ vm_result.record.0.vcpu }} vCPUs - syntax valid until release < 3.0.0 # Use vm module output, new syntax, valid after release >= 3.0.0 - - name: Show VM {{ vm_name }} vCPU count - syntax valid after release >= 3.0.0 + - name: Show VM vCPU count - syntax valid after release >= 3.0.0 ansible.builtin.debug: msg: >- VM {{ vm_name }} has {{ vm_result.record.vcpu }} vCPUs - diff --git a/examples/vm_info.yml b/examples/vm_info.yml index 38ae4725d..69749fccc 100644 --- a/examples/vm_info.yml +++ b/examples/vm_info.yml @@ -12,6 +12,6 @@ vm_name: "{{ vm_name }}" register: vm_info_result - - name: Show the info about {{ vm_name }} VM + - name: Show the info about VM {{ vm_name }} ansible.builtin.debug: var: vm_info_result diff --git a/examples/vm_nic.yml b/examples/vm_nic.yml index d5a133f17..f94752a36 100644 --- a/examples/vm_nic.yml +++ b/examples/vm_nic.yml @@ -9,7 +9,7 @@ vlan_b: 11 tasks: - - name: Configure VM {{ vm_name }} with 2 NICs, VLANs {{ vlan_a }} and {{ vlan_b }} + - name: Configure VM with 2 NICs - {{ "VM=" + vm_name + ", VLANs=[" + (vlan_a | string) + ", " + (vlan_b | string) + "]" }} scale_computing.hypercore.vm_nic: vm_name: "{{ vm_name }}" items: @@ -18,10 +18,10 @@ state: set register: vm_nic_result - - name: Show the VM {{ vm_name }} new NIC state + - name: Show the VM new NIC state - {{ vm_name }} ansible.builtin.debug: var: vm_nic_result - - name: Show VM {{ vm_name }} new NIC VLANs + - name: Show VM new NIC VLANs - {{ vm_name }} ansible.builtin.debug: msg: The configured VLANs are {{ vm_nic_result.records | map(attribute="vlan") }} diff --git a/examples/vm_os_upgrade/02_upload_iso.yml b/examples/vm_os_upgrade/02_upload_iso.yml index daef03545..694696528 100644 --- a/examples/vm_os_upgrade/02_upload_iso.yml +++ b/examples/vm_os_upgrade/02_upload_iso.yml @@ -1,10 +1,10 @@ -- name: Download ISO {{ iso_filename }} from URL +- name: Download ISO from URL - {{ iso_filename }} ansible.builtin.get_url: url: "{{ iso_url }}" dest: /tmp/{{ iso_filename }} - mode: 0644 + mode: "0644" -- name: Upload ISO {{ iso_filename }} to HyperCore +- name: Upload ISO to HyperCore - {{ iso_filename }} scale_computing.hypercore.iso: name: "{{ iso_filename }}" source: /tmp/{{ iso_filename }} diff --git a/examples/vm_replication_info.yml b/examples/vm_replication_info.yml index d2d745d18..783c95a91 100644 --- a/examples/vm_replication_info.yml +++ b/examples/vm_replication_info.yml @@ -12,6 +12,6 @@ vm_name: "{{ vm_name }}" register: replication_info_result - - name: Show the replication info status of {{ vm_name }} VM + - name: Show the replication info status of VM {{ vm_name }} ansible.builtin.debug: var: replication_info_result diff --git a/examples/vm_snapshot_info.yml b/examples/vm_snapshot_info.yml index 0d2a9672a..b490fb328 100644 --- a/examples/vm_snapshot_info.yml +++ b/examples/vm_snapshot_info.yml @@ -5,17 +5,17 @@ gather_facts: false vars: vm_name: demo-vm - serial: 123 - label: snapshot-label + snapshot_serial: 123 + snapshot_label: snapshot-label tasks: - name: List all VM snapshots on HyperCore API scale_computing.hypercore.vm_snapshot_info: register: vm_snapshot - - name: List all VM snapshots on HyperCore API with label="{{ label }}" + - name: List all VM snapshots on HyperCore API with label="{{ snapshot_label }}" scale_computing.hypercore.vm_snapshot_info: - label: "{{ label }}" + label: "{{ snapshot_label }}" register: vm_snapshot - name: List all VM snapshots on HyperCore API with vm_name="{{ vm_name }}" @@ -23,16 +23,16 @@ vm_name: "{{ vm_name }}" register: vm_snapshot - - name: List all VM snapshots on HyperCore API with serial="{{ serial }}" + - name: List all VM snapshots on HyperCore API with serial="{{ snapshot_serial }}" scale_computing.hypercore.vm_snapshot_info: - serial: "{{ serial }}" + serial: "{{ snapshot_serial }}" register: vm_snapshot - name: >- List all VM snapshots on HyperCore API with - label="{{ label }}", vm_name="{{ vm_name }}", serial="{{ serial }}" + [vm_name, label, serial]={{ [vm_name, snapshot_label, snapshot_serial] }} scale_computing.hypercore.vm_snapshot_info: - label: "{{ label }}" vm_name: "{{ vm_name }}" - serial: "{{ serial }}" + label: "{{ snapshot_label }}" + serial: "{{ snapshot_serial }}" register: vm_snapshot diff --git a/plugins/doc_fragments/cloud_init.py b/plugins/doc_fragments/cloud_init.py index 8b505bc7e..a871fb26c 100644 --- a/plugins/doc_fragments/cloud_init.py +++ b/plugins/doc_fragments/cloud_init.py @@ -19,6 +19,7 @@ class ModuleDocFragment(object): - There has to be cloud-config comment present at the beginning of cloud_init file or raw yaml. required: false type: dict + default: {} suboptions: user_data: description: diff --git a/plugins/modules/vm.py b/plugins/modules/vm.py index 5f4f8f474..c0990681e 100644 --- a/plugins/modules/vm.py +++ b/plugins/modules/vm.py @@ -97,7 +97,7 @@ description: - List of disks we want to create. - Required if I(state=present). - default: [] + # default: None suboptions: disk_slot: type: int @@ -135,7 +135,7 @@ - Required if I(state=present). type: list elements: dict - default: [] + # default: None suboptions: vlan: type: int diff --git a/plugins/modules/vm_nic.py b/plugins/modules/vm_nic.py index b9108c5a2..56a1eea4f 100644 --- a/plugins/modules/vm_nic.py +++ b/plugins/modules/vm_nic.py @@ -37,6 +37,7 @@ description: - List of network interfaces. type: list + default: [] elements: dict suboptions: vlan: diff --git a/roles/check_local_time/defaults/main.yml b/roles/check_local_time/defaults/main.yml new file mode 100644 index 000000000..f8aac1f2e --- /dev/null +++ b/roles/check_local_time/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# renamed variables - if new name is not present, use deprecated name as default value +check_local_time_time_zone: "{{ time_zone | default(omit) }}" +check_local_time_time_interval: "{{ time_interval | default(omit) }}" diff --git a/roles/check_local_time/meta/argument_specs.yml b/roles/check_local_time/meta/argument_specs.yml index fad2aa634..bdf3bace9 100644 --- a/roles/check_local_time/meta/argument_specs.yml +++ b/roles/check_local_time/meta/argument_specs.yml @@ -5,15 +5,29 @@ argument_specs: description: - Check if local time meets the required time interval options: - time_zone: + check_local_time_time_zone: &check_local_time_time_zone description: - Time zone for which to calculate if local time meets the required time interval - Must be provided in a form 'Europe/Amsterdam' required: true type: str - time_interval: + check_local_time_time_interval: &check_local_time_time_interval description: - Time interval in which local time must be located - Must be provided in a form '22:00-6:15' or '7:30-12:36' required: true type: str + # ------------- + # Renamed/deprecated vars + time_zone: + <<: *check_local_time_time_zone + description: + - DEPRECATED, will be removed in release 3.0.0. + - Please use I(check_local_time_time_zone) instead. + required: false + time_interval: + <<: *check_local_time_time_interval + description: + - DEPRECATED, will be removed in release 3.0.0. + - Please use I(check_local_time_time_interval) instead. + required: false diff --git a/roles/check_local_time/tasks/main.yml b/roles/check_local_time/tasks/main.yml index 8e000f2ad..a31b1d3ef 100644 --- a/roles/check_local_time/tasks/main.yml +++ b/roles/check_local_time/tasks/main.yml @@ -2,16 +2,17 @@ - name: Check if local time is in time interval (run check_local_time.py) ansible.builtin.script: executable: "{{ ansible_python_interpreter | default(ansible_playbook_python) }}" - cmd: check_local_time.py "{{ time_zone }}" "{{ time_interval }}" - register: local_time_output + cmd: check_local_time.py "{{ check_local_time_time_zone }}" "{{ check_local_time_time_interval }}" + register: check_local_time_output - name: Assert that local time is in time interval ansible.builtin.assert: - fail_msg: "Local time for time zone {{ time_zone }} is not in required time interval {{ time_interval }}" - success_msg: "Local time for time zone {{ time_zone }} is in required time interval {{ time_interval }}" + fail_msg: "Local time for time zone {{ check_local_time_time_zone }} is not in required time interval {{ check_local_time_time_interval }}" + success_msg: "Local time for time zone {{ check_local_time_time_zone }} is in required time interval {{ check_local_time_time_interval }}" that: - - local_time_output.stdout_lines[0] == "True" - register: result + - check_local_time_output.stdout_lines[0] == "True" + register: check_local_time_result - name: Set fact to use in tests - ansible.builtin.set_fact: local_time_msg="{{ result.msg }}" + ansible.builtin.set_fact: + check_local_time_msg: "{{ check_local_time_result.msg }}" diff --git a/roles/cluster_config/defaults/main.yml b/roles/cluster_config/defaults/main.yml new file mode 100644 index 000000000..599035823 --- /dev/null +++ b/roles/cluster_config/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# Deprecated scale_computing_hypercore_cluster_config is still used if cluster_config_configuration is not defined +cluster_config_configuration: "{{ scale_computing_hypercore_cluster_config | default(omit) }}" diff --git a/roles/cluster_config/meta/argument_specs.yml b/roles/cluster_config/meta/argument_specs.yml index 33e3d486c..b7958501c 100644 --- a/roles/cluster_config/meta/argument_specs.yml +++ b/roles/cluster_config/meta/argument_specs.yml @@ -5,7 +5,7 @@ argument_specs: description: - Role cluster_config can be used to fully configure a new HyperCore server, or partially reconfigure an existing HyperCore server. options: - scale_computing_hypercore_cluster_config: + cluster_config_configuration: &cluster_config_configuration description: - A dict describing a full or partial cluster configuration. - Partial configuration will be used if some of the keys in `scale_computing_hypercore_cluster_config` are omitted. @@ -167,3 +167,9 @@ argument_specs: choices: - udp - tcp + scale_computing_hypercore_cluster_config: + <<: *cluster_config_configuration + description: + - DEPRECATED, will be removed in release 3.0.0. + - Please use I(cluster_config_configuration) instead. + required: false diff --git a/roles/cluster_config/tasks/main.yml b/roles/cluster_config/tasks/main.yml index ce8e16479..009a8fa31 100644 --- a/roles/cluster_config/tasks/main.yml +++ b/roles/cluster_config/tasks/main.yml @@ -7,113 +7,113 @@ - name: Set cluster name scale_computing.hypercore.cluster_name: - name_new: "{{ scale_computing_hypercore_cluster_config.name }}" - when: scale_computing_hypercore_cluster_config.name | default(False) + name_new: "{{ cluster_config_configuration.name }}" + when: cluster_config_configuration.name | default(False) - name: Set registration data scale_computing.hypercore.registration: - company_name: "{{ scale_computing_hypercore_cluster_config.registration.company_name }}" - contact: "{{ scale_computing_hypercore_cluster_config.registration.contact }}" - phone: "{{ scale_computing_hypercore_cluster_config.registration.phone }}" - email: "{{ scale_computing_hypercore_cluster_config.registration.email }}" + company_name: "{{ cluster_config_configuration.registration.company_name }}" + contact: "{{ cluster_config_configuration.registration.contact }}" + phone: "{{ cluster_config_configuration.registration.phone }}" + email: "{{ cluster_config_configuration.registration.email }}" state: present - when: scale_computing_hypercore_cluster_config.registration | default(False) + when: cluster_config_configuration.registration | default(False) - name: Set DNS servers configuration scale_computing.hypercore.dns_config: - dns_servers: "{{ scale_computing_hypercore_cluster_config.dns.server_ips | default(omit) }}" + dns_servers: "{{ cluster_config_configuration.dns.server_ips | default(omit) }}" state: set when: - - scale_computing_hypercore_cluster_config.dns | default(False) - - '"server_ips" in scale_computing_hypercore_cluster_config.dns' - - scale_computing_hypercore_cluster_config.dns.server_ips is not none + - cluster_config_configuration.dns | default(False) + - '"server_ips" in cluster_config_configuration.dns' + - cluster_config_configuration.dns.server_ips is not none - name: Set DNS search_domains configuration scale_computing.hypercore.dns_config: - dns_servers: "{{ scale_computing_hypercore_cluster_config.dns.server_ips | default(omit) }}" - search_domains: "{{ scale_computing_hypercore_cluster_config.dns.search_domains | default(omit) }}" + dns_servers: "{{ cluster_config_configuration.dns.server_ips | default(omit) }}" + search_domains: "{{ cluster_config_configuration.dns.search_domains | default(omit) }}" state: set when: - - scale_computing_hypercore_cluster_config.dns | default(False) - - '"search_domains" in scale_computing_hypercore_cluster_config.dns' - - scale_computing_hypercore_cluster_config.dns.search_domains is not none + - cluster_config_configuration.dns | default(False) + - '"search_domains" in cluster_config_configuration.dns' + - cluster_config_configuration.dns.search_domains is not none - name: Set OIDC configuration scale_computing.hypercore.oidc_config: - client_id: "{{ scale_computing_hypercore_cluster_config.oidc.client_id }}" - shared_secret: "{{ scale_computing_hypercore_cluster_config.oidc.shared_secret | default(omit) }}" - certificate: "{{ scale_computing_hypercore_cluster_config.oidc.certificate | default(omit) }}" - config_url: "{{ scale_computing_hypercore_cluster_config.oidc.config_url }}" - scopes: "{{ scale_computing_hypercore_cluster_config.oidc.scopes }}" - when: scale_computing_hypercore_cluster_config.oidc | default(False) + client_id: "{{ cluster_config_configuration.oidc.client_id }}" + shared_secret: "{{ cluster_config_configuration.oidc.shared_secret | default(omit) }}" + certificate: "{{ cluster_config_configuration.oidc.certificate | default(omit) }}" + config_url: "{{ cluster_config_configuration.oidc.config_url }}" + scopes: "{{ cluster_config_configuration.oidc.scopes }}" + when: cluster_config_configuration.oidc | default(False) - name: Set time server scale_computing.hypercore.time_server: - source: "{{ scale_computing_hypercore_cluster_config.time_server }}" - when: scale_computing_hypercore_cluster_config.time_server | default(False) + source: "{{ cluster_config_configuration.time_server }}" + when: cluster_config_configuration.time_server | default(False) - name: Set time zone scale_computing.hypercore.time_zone: - zone: "{{ scale_computing_hypercore_cluster_config.time_zone }}" - when: scale_computing_hypercore_cluster_config.time_zone | default(False) + zone: "{{ cluster_config_configuration.time_zone }}" + when: cluster_config_configuration.time_zone | default(False) - name: Set SMTP configuration scale_computing.hypercore.smtp: - server: "{{ scale_computing_hypercore_cluster_config.smtp.server }}" - port: "{{ scale_computing_hypercore_cluster_config.smtp.port }}" - use_ssl: "{{ scale_computing_hypercore_cluster_config.smtp.use_ssl | default(omit) }}" - auth_user: "{{ scale_computing_hypercore_cluster_config.smtp.auth_user | default(omit) }}" - auth_password: "{{ scale_computing_hypercore_cluster_config.smtp.auth_password | default(omit) }}" - from_address: "{{ scale_computing_hypercore_cluster_config.smtp.from_address | default(omit) }}" - when: scale_computing_hypercore_cluster_config.smtp | default(False) + server: "{{ cluster_config_configuration.smtp.server }}" + port: "{{ cluster_config_configuration.smtp.port }}" + use_ssl: "{{ cluster_config_configuration.smtp.use_ssl | default(omit) }}" + auth_user: "{{ cluster_config_configuration.smtp.auth_user | default(omit) }}" + auth_password: "{{ cluster_config_configuration.smtp.auth_password | default(omit) }}" + from_address: "{{ cluster_config_configuration.smtp.from_address | default(omit) }}" + when: cluster_config_configuration.smtp | default(False) - name: Reconfigure email alert recipients when: - - '"email_alerts" in scale_computing_hypercore_cluster_config' + - '"email_alerts" in cluster_config_configuration' - | - scale_computing_hypercore_cluster_config.email_alerts or - scale_computing_hypercore_cluster_config.email_alerts == [] + cluster_config_configuration.email_alerts or + cluster_config_configuration.email_alerts == [] block: - name: Get old email alert recipients scale_computing.hypercore.email_alert_info: - register: email_alert_info_result + register: cluster_config_email_alert_info_result - name: Remove old email alerts scale_computing.hypercore.email_alert: email: "{{ email_alert_recipient.email }}" state: absent - loop: "{{ email_alert_info_result.records }}" + loop: "{{ cluster_config_email_alert_info_result.records }}" loop_control: loop_var: email_alert_recipient - when: email_alert_recipient.email not in scale_computing_hypercore_cluster_config.email_alerts + when: email_alert_recipient.email not in cluster_config_configuration.email_alerts - name: Set new email alerts scale_computing.hypercore.email_alert: email: "{{ email_address }}" state: present - loop: "{{ scale_computing_hypercore_cluster_config.email_alerts or [] }}" + loop: "{{ cluster_config_configuration.email_alerts or [] }}" loop_control: loop_var: email_address - name: Reconfigure syslog servers when: - - '"syslog_servers" in scale_computing_hypercore_cluster_config' + - '"syslog_servers" in cluster_config_configuration' - | - scale_computing_hypercore_cluster_config.syslog_servers or - scale_computing_hypercore_cluster_config.syslog_servers == [] + cluster_config_configuration.syslog_servers or + cluster_config_configuration.syslog_servers == [] block: - name: Get old syslog servers scale_computing.hypercore.syslog_server_info: - register: syslog_server_info_result + register: cluster_config_syslog_server_info_result - name: Remove old syslog servers scale_computing.hypercore.syslog_server: host: "{{ syslog_server.host }}" state: absent - loop: "{{ syslog_server_info_result.records }}" + loop: "{{ cluster_config_syslog_server_info_result.records }}" loop_control: loop_var: syslog_server - when: syslog_server.host not in (scale_computing_hypercore_cluster_config.syslog_servers | map(attribute='host') | list) + when: syslog_server.host not in (cluster_config_configuration.syslog_servers | map(attribute='host') | list) - name: Set new syslog servers scale_computing.hypercore.syslog_server: @@ -121,6 +121,6 @@ port: "{{ syslog_server.port | default(omit) }}" protocol: "{{ syslog_server.protocol | default(omit) }}" state: present - loop: "{{ scale_computing_hypercore_cluster_config.syslog_servers or [] }}" + loop: "{{ cluster_config_configuration.syslog_servers or [] }}" loop_control: loop_var: syslog_server diff --git a/roles/version_update_single_node/defaults/main.yml b/roles/version_update_single_node/defaults/main.yml index b132a00d8..2230276e1 100644 --- a/roles/version_update_single_node/defaults/main.yml +++ b/roles/version_update_single_node/defaults/main.yml @@ -1,3 +1,6 @@ --- -scale_computing_hypercore_shutdown_wait_time: 300 -scale_computing_hypercore_shutdown_tags: [] +version_update_single_node_shutdown_wait_time: "{{ scale_computing_hypercore_shutdown_wait_time | default(300) }}" +version_update_single_node_shutdown_tags: "{{ scale_computing_hypercore_shutdown_tags | default([]) }}" +# renamed variables - if new name is not present, use deprecated name as default value +version_update_single_node_shutdown_vms: "{{ scale_computing_hypercore_shutdown_vms | default(omit) }}" +version_update_single_node_restart_vms: "{{ scale_computing_hypercore_restart_vms | default(omit) }}" diff --git a/roles/version_update_single_node/meta/argument_specs.yml b/roles/version_update_single_node/meta/argument_specs.yml index 03f2455f0..02300248b 100644 --- a/roles/version_update_single_node/meta/argument_specs.yml +++ b/roles/version_update_single_node/meta/argument_specs.yml @@ -9,7 +9,7 @@ argument_specs: upgrade cluster, start back VMs that were running before upgrade. options: - scale_computing_hypercore_desired_version: + version_update_single_node_desired_version: &version_update_single_node_desired_version description: - The desired HyperCore version we wish to update to. - If already on desired version, the updates will not be applied. @@ -17,12 +17,26 @@ argument_specs: - If multi-node system was detected, no update will be applied. required: true type: str - scale_computing_hypercore_shutdown_wait_time: &scale_computing_hypercore_shutdown_wait_time + version_update_single_node_shutdown_wait_time: &version_update_single_node_shutdown_wait_time description: - How much time (in seconds) VMs have to gracefully shutdown. - After wait time expires a force shutdown is issued. Force shutdown can corrupt VM disk data. default: 300 type: int + # ------------- + # Renamed/deprecated vars + scale_computing_hypercore_desired_version: + <<: *version_update_single_node_desired_version + description: + - DEPRECATED, will be removed in release 3.0.0. + - Please use I(version_update_single_node_desired_version) instead. + required: false + scale_computing_hypercore_shutdown_wait_time: &scale_computing_hypercore_shutdown_wait_time + <<: *version_update_single_node_shutdown_wait_time + description: + - DEPRECATED, will be removed in release 3.0.0. + - Please use I(version_update_single_node_shutdown_wait_time) instead. + required: false shutdown_vms: short_description: Shutdown running VMs before upgrade @@ -31,13 +45,13 @@ argument_specs: - Input is a list of VMs, as returned by M(scale_computing.hypercore.vm_info) module. The VMs listed as `running` in the list are then shutdown. options: - scale_computing_hypercore_shutdown_wait_time: *scale_computing_hypercore_shutdown_wait_time - scale_computing_hypercore_shutdown_vms: + version_update_single_node_shutdown_wait_time: *version_update_single_node_shutdown_wait_time + version_update_single_node_shutdown_vms: &version_update_single_node_shutdown_vms description: - VM list as returned by M(scale_computing.hypercore.vm_info) module. required: true type: dict - scale_computing_hypercore_shutdown_tags: + version_update_single_node_shutdown_tags: &version_update_single_node_shutdown_tags description: - VM will be shutdown only if it has assigned (at least one) tag from this list. - If tag list is empty, than every running VM from the VM list is shutdown. @@ -45,6 +59,21 @@ argument_specs: type: list elements: str default: [] + # ------------- + # Renamed/deprecated vars + scale_computing_hypercore_shutdown_wait_time: *scale_computing_hypercore_shutdown_wait_time + scale_computing_hypercore_shutdown_vms: + <<: *version_update_single_node_shutdown_vms + description: + - DEPRECATED, will be removed in release 3.0.0. + - Please use I(version_update_single_node_shutdown_vms) instead. + required: false + scale_computing_hypercore_shutdown_tags: + <<: *version_update_single_node_shutdown_tags + description: + - DEPRECATED, will be removed in release 3.0.0. + - Please use I(version_update_single_node_shutdown_tags) instead. + required: false restart_vms: short_description: Start VMs that were running before upgrade @@ -53,8 +82,16 @@ argument_specs: - Input is a list of VMs, as returned by M(scale_computing.hypercore.vm_info) module. The VMs listed as `running` in the list are then started. options: - scale_computing_hypercore_restart_vms: + version_update_single_node_restart_vms: &version_update_single_node_restart_vms description: - VM list as returned by M(scale_computing.hypercore.vm_info) module. required: true type: dict + # ------------- + # Renamed/deprecated vars + scale_computing_hypercore_restart_vms: + <<: *version_update_single_node_restart_vms + description: + - DEPRECATED, will be removed in release 3.0.0. + - Please use I(version_update_single_node_restart_vms) instead. + required: false diff --git a/roles/version_update_single_node/tasks/main.yml b/roles/version_update_single_node/tasks/main.yml index f7d5fd490..0956d2b78 100644 --- a/roles/version_update_single_node/tasks/main.yml +++ b/roles/version_update_single_node/tasks/main.yml @@ -1,92 +1,92 @@ --- - name: Check if there is already an update in progress scale_computing.hypercore.version_update_status_info: - register: update_status_before_update + register: version_update_single_node_update_status_before_update - name: Current update status ansible.builtin.debug: - var: update_status_before_update + var: version_update_single_node_update_status_before_update - name: Get cluster info scale_computing.hypercore.cluster_info: - register: cluster_info + register: version_update_single_node_cluster_info - name: Show cluster info ansible.builtin.debug: - var: cluster_info + var: version_update_single_node_cluster_info - name: Get node info scale_computing.hypercore.node_info: - register: node_info + register: version_update_single_node_node_info - name: Show node info ansible.builtin.debug: - var: node_info + var: version_update_single_node_node_info - name: Check if single-node system - fail if not ansible.builtin.fail: msg: >- The role should be used only with single node systems. - This system does have {{ node_info.records | length }} nodes. - when: node_info.records | length > 1 + This system does have {{ version_update_single_node_node_info.records | length }} nodes. + when: version_update_single_node_node_info.records | length > 1 # =================================================================== - name: Update + when: + - version_update_single_node_cluster_info.record.icos_version != version_update_single_node_desired_version + - >- + version_update_single_node_update_status_before_update.record == None or + version_update_single_node_update_status_before_update.record.update_status != "IN PROGRESS" block: - name: Get available updates scale_computing.hypercore.version_update_info: - register: available_updates + register: version_update_single_node_available_updates - name: Show available updates ansible.builtin.debug: - var: available_updates + var: version_update_single_node_available_updates - name: Check if desired update is available - fail if not available ansible.builtin.fail: msg: >- - Requested update {{ scale_computing_hypercore_desired_version }} is not - in available_updates {{ available_updates.records | map(attribute='uuid') | list }} - when: not scale_computing_hypercore_desired_version in (available_updates.records | map(attribute='uuid') | list) + Requested update {{ version_update_single_node_desired_version }} is not + in version_update_single_node_available_updates {{ version_update_single_node_available_updates.records | map(attribute='uuid') | list }} + when: not version_update_single_node_desired_version in (version_update_single_node_available_updates.records | map(attribute='uuid') | list) - name: Get all available running VMs scale_computing.hypercore.vm_info: - register: vm_info + register: version_update_single_node_vm_info - name: Shutdown all running VMs - include_tasks: shutdown_vms.yml + ansible.builtin.include_tasks: shutdown_vms.yml vars: - scale_computing_hypercore_shutdown_vms: "{{ vm_info }}" - when: scale_computing_hypercore_shutdown_vms.records != [] + version_update_single_node_shutdown_vms: "{{ version_update_single_node_vm_info }}" + when: version_update_single_node_shutdown_vms.records != [] # ----------------- UPDATE -------------------- - name: Update single-node system scale_computing.hypercore.version_update: - icos_version: "{{ scale_computing_hypercore_desired_version }}" - register: update_result + icos_version: "{{ version_update_single_node_desired_version }}" + register: version_update_single_node_update_result - name: Check update status - include_tasks: update_status_check.yml + ansible.builtin.include_tasks: update_status_check.yml - name: Show update result ansible.builtin.debug: - var: update_result + var: version_update_single_node_update_result # --------------------------------------------- - name: Restart previously running VMs - include_tasks: restart_vms.yml + ansible.builtin.include_tasks: restart_vms.yml vars: - scale_computing_hypercore_restart_vms: "{{ vm_info }}" - when: scale_computing_hypercore_restart_vms.records != [] + version_update_single_node_restart_vms: "{{ version_update_single_node_vm_info }}" + when: version_update_single_node_restart_vms.records != [] - name: Check if updating to desired version failed ansible.builtin.fail: - msg: Update to version "{{ scale_computing_hypercore_desired_version }}" failed. - when: update_result.record.uuid != scale_computing_hypercore_desired_version - when: - - cluster_info.record.icos_version != scale_computing_hypercore_desired_version - - >- - update_status_before_update.record == None or - update_status_before_update.record.update_status != "IN PROGRESS" + msg: Update to version "{{ version_update_single_node_desired_version }}" failed. + when: version_update_single_node_update_result.record.uuid != version_update_single_node_desired_version diff --git a/roles/version_update_single_node/tasks/restart_vms.yml b/roles/version_update_single_node/tasks/restart_vms.yml index e75641f52..1c8bb479a 100644 --- a/roles/version_update_single_node/tasks/restart_vms.yml +++ b/roles/version_update_single_node/tasks/restart_vms.yml @@ -4,9 +4,9 @@ vm_name: "{{ item.vm_name }}" power_state: start when: item.power_state == 'started' - loop: "{{ scale_computing_hypercore_restart_vms.records }}" - register: vm_start_result + loop: "{{ version_update_single_node_restart_vms.records }}" + register: version_update_single_node_vm_start_result - name: Show restart results ansible.builtin.debug: - var: vm_start_result + var: version_update_single_node_vm_start_result diff --git a/roles/version_update_single_node/tasks/shutdown_vms.yml b/roles/version_update_single_node/tasks/shutdown_vms.yml index 2c51d80fc..b3ef2f79c 100644 --- a/roles/version_update_single_node/tasks/shutdown_vms.yml +++ b/roles/version_update_single_node/tasks/shutdown_vms.yml @@ -3,18 +3,18 @@ ansible.builtin.debug: msg: "{{ item.vm_name }}" when: item.power_state == 'started' - loop: "{{ scale_computing_hypercore_shutdown_vms.records }}" - register: running_vms + loop: "{{ version_update_single_node_shutdown_vms.records }}" + register: version_update_single_node_running_vms -- name: Set fact version_update_all_vms_stopped to initial false +- name: Set fact version_update_single_node_all_vms_stopped to initial false ansible.builtin.set_fact: - version_update_all_vms_stopped: false + version_update_single_node_all_vms_stopped: false # Wait up to 300 sec (30*10) - name: Wait until VMs shutdown - include_tasks: wait_vm_shutdown.yml - loop: "{{ range(0, (scale_computing_hypercore_shutdown_wait_time / 10.0) | round(0, 'ceil') | int) | list }}" - when: not version_update_all_vms_stopped + ansible.builtin.include_tasks: wait_vm_shutdown.yml + loop: "{{ range(0, (version_update_single_node_shutdown_wait_time / 10.0) | round(0, 'ceil') | int) | list }}" + when: not version_update_single_node_all_vms_stopped - name: Force shutdown the remaining running VMs scale_computing.hypercore.vm_params: @@ -22,10 +22,10 @@ power_state: stop when: - item.item.power_state == 'started' - - (scale_computing_hypercore_shutdown_tags == []) or (scale_computing_hypercore_shutdown_tags | intersect(item.item.tags)) - loop: "{{ vm_shutdown_result.results }}" - register: vm_stop_result + - (version_update_single_node_shutdown_tags == []) or (version_update_single_node_shutdown_tags | intersect(item.item.tags)) + loop: "{{ version_update_single_node_vm_shutdown_result.results }}" + register: version_update_single_node_vm_stop_result - name: Show VM stop results ansible.builtin.debug: - var: vm_stop_result + var: version_update_single_node_vm_stop_result diff --git a/roles/version_update_single_node/tasks/update_status_check.yml b/roles/version_update_single_node/tasks/update_status_check.yml index 729791592..ac7a51503 100644 --- a/roles/version_update_single_node/tasks/update_status_check.yml +++ b/roles/version_update_single_node/tasks/update_status_check.yml @@ -1,18 +1,22 @@ --- - name: Check update status on HC3 block: - - name: Increment retry_count + - name: Increment version_update_single_node_retry_count ansible.builtin.set_fact: - retry_count: "{{ 0 if retry_count is undefined else retry_count | int + 1 }}" + version_update_single_node_retry_count: >- + "{{ + 0 if version_update_single_node_retry_count is undefined + else version_update_single_node_retry_count | int + 1 + }}" - name: Check update status - will report FAILED-RETRYING until update COMPLETE/TERMINATED scale_computing.hypercore.version_update_status_info: - register: update_status + register: version_update_single_node_update_status until: >- - update_status.record != None and + version_update_single_node_update_status.record != None and ( - update_status.record.update_status == "COMPLETE" or - update_status.record.update_status == "TERMINATING" + version_update_single_node_update_status.record.update_status == "COMPLETE" or + version_update_single_node_update_status.record.update_status == "TERMINATING" ) retries: 100 delay: 30 @@ -22,10 +26,11 @@ - name: Fail if retries reached 20 ansible.builtin.fail: msg: Maximum retries of grouped tasks reached - when: retry_count | int == 20 + when: version_update_single_node_retry_count | int == 20 - name: Log ansible.builtin.debug: msg: Update status check failed due to server down / restart - retrying - - include_tasks: update_status_check.yml # Recursion + - name: Check update status again + ansible.builtin.include_tasks: update_status_check.yml # Recursion diff --git a/roles/version_update_single_node/tasks/wait_vm_shutdown.yml b/roles/version_update_single_node/tasks/wait_vm_shutdown.yml index bee170576..f5572eece 100644 --- a/roles/version_update_single_node/tasks/wait_vm_shutdown.yml +++ b/roles/version_update_single_node/tasks/wait_vm_shutdown.yml @@ -7,24 +7,24 @@ power_state: shutdown when: - single_vm.power_state == 'started' - - (scale_computing_hypercore_shutdown_tags == []) or (scale_computing_hypercore_shutdown_tags | intersect(single_vm.tags)) - loop: "{{ scale_computing_hypercore_shutdown_vms.records }}" + - (version_update_single_node_shutdown_tags == []) or (version_update_single_node_shutdown_tags | intersect(single_vm.tags)) + loop: "{{ version_update_single_node_shutdown_vms.records }}" loop_control: loop_var: single_vm - register: vm_shutdown_result + register: version_update_single_node_vm_shutdown_result ignore_errors: true # if VMs fail to shut down without force, error will occur, so we skip and try on to shut down with force - name: Show shutdown results ansible.builtin.debug: - var: vm_shutdown_result + var: version_update_single_node_vm_shutdown_result - name: Get all available running VMs scale_computing.hypercore.vm_info: - register: version_update_vms + register: version_update_single_node_vms - name: Show unique VM power states ansible.builtin.debug: - msg: Unique VM power states {{ version_update_vms.records | map(attribute='power_state') | unique }} + msg: Unique VM power states {{ version_update_single_node_vms.records | map(attribute='power_state') | unique }} # HyperCore states # RUNNING Currently running @@ -42,25 +42,25 @@ # CRASHED="crashed", # Do not include 'shutdown' - it means "shutting_down" # States paused, blocked - might be safe to include, might not. Do not include yet. - - name: Set fact version_update_all_vms_stopped to initial true + - name: Set fact version_update_single_node_all_vms_stopped to initial true ansible.builtin.set_fact: - version_update_all_vms_stopped: true + version_update_single_node_all_vms_stopped: true - # We wait for VMs to shutdown, but only if they are included in the scale_computing_hypercore_shutdown_vms list. - - name: Reset version_update_all_vms_stopped if any VM is still running + # We wait for VMs to shutdown, but only if they are included in the version_update_single_node_shutdown_vms list. + - name: Reset version_update_single_node_all_vms_stopped if any VM is still running ansible.builtin.set_fact: - version_update_all_vms_stopped: false + version_update_single_node_all_vms_stopped: false when: - - (version_update_vms.records | selectattr("vm_name", "equalto", item.vm_name) | list).0.power_state not in ['stopped', 'crashed'] - - (scale_computing_hypercore_shutdown_tags == []) or (scale_computing_hypercore_shutdown_tags | intersect(item.tags)) - loop: "{{ scale_computing_hypercore_shutdown_vms.records }}" - register: vm_shutdown_result + - (version_update_single_node_vms.records | selectattr("vm_name", "equalto", item.vm_name) | list).0.power_state not in ['stopped', 'crashed'] + - (version_update_single_node_shutdown_tags == []) or (version_update_single_node_shutdown_tags | intersect(item.tags)) + loop: "{{ version_update_single_node_shutdown_vms.records }}" + register: version_update_single_node_vm_shutdown_result - name: Are all VMs stopped? ansible.builtin.debug: - var: version_update_all_vms_stopped + var: version_update_single_node_all_vms_stopped - name: Wait if VMs are still running - when: not version_update_all_vms_stopped + when: not version_update_single_node_all_vms_stopped ansible.builtin.pause: seconds: 10 diff --git a/test.requirements b/test.requirements index dca8bdab9..78ac5d708 100644 --- a/test.requirements +++ b/test.requirements @@ -1,5 +1,6 @@ -coverage==4.5.4 -pytest==7.1.2 -pytest-xdist==2.5.0 -pytest-mock==3.8.2 -ansible-lint==6.5.2 +coverage==7.2.5 +pytest==7.3.1 +pytest-xdist==3.3.0 +pytest-mock==3.10.0 +pytest-forked==1.6.0 +ansible-lint==6.17.0 diff --git a/tests/integration/targets/role_check_local_time/tasks/main.yml b/tests/integration/targets/role_check_local_time/tasks/main.yml index e0f71e8c3..8bca12604 100644 --- a/tests/integration/targets/role_check_local_time/tasks/main.yml +++ b/tests/integration/targets/role_check_local_time/tasks/main.yml @@ -81,15 +81,15 @@ ansible.builtin.include_role: name: scale_computing.hypercore.check_local_time vars: - time_zone: "{{ test_timezone }}" # ansible_date_time.tz returns CEST which is not a valid tz for env var TZ - time_interval: "{{ local_time.hour_m1 }}:00-{{ local_time.hour_p1 }}:59" + check_local_time_time_zone: "{{ test_timezone }}" # ansible_date_time.tz returns CEST which is not a valid tz for env var TZ + check_local_time_time_interval: "{{ local_time.hour_m1 }}:00-{{ local_time.hour_p1 }}:59" - - name: Check local_time_msg for passed case + - name: Check check_local_time_msg for passed case ansible.builtin.assert: that: - >- 'Local time for time zone {{ test_timezone }} is in required time interval - {{ local_time.hour_m1 }}:00-{{ local_time.hour_p1 }}:59' in local_time_msg + {{ local_time.hour_m1 }}:00-{{ local_time.hour_p1 }}:59' in check_local_time_msg # ------------------------------------------------------------------------------ - name: Check that local time doesn't meet required time interval @@ -98,12 +98,12 @@ apply: ignore_errors: True vars: - time_zone: "{{ test_timezone }}" - time_interval: "{{ local_time.hour_m1 }}:00-{{ local_time.hour_m1 }}:01" + check_local_time_time_zone: "{{ test_timezone }}" + check_local_time_time_interval: "{{ local_time.hour_m1 }}:00-{{ local_time.hour_m1 }}:01" - - name: Check local_time_msg for failed case + - name: Check check_local_time_msg for failed case ansible.builtin.assert: that: - >- 'Local time for time zone {{ test_timezone }} is not in required time interval - {{ local_time.hour_m1 }}:00-{{ local_time.hour_m1 }}:01' in local_time_msg + {{ local_time.hour_m1 }}:00-{{ local_time.hour_m1 }}:01' in check_local_time_msg diff --git a/tests/integration/targets/role_version_update_single_node/tasks/01_update.yml b/tests/integration/targets/role_version_update_single_node/tasks/01_update.yml index 4a4ec03ff..6aa85e940 100644 --- a/tests/integration/targets/role_version_update_single_node/tasks/01_update.yml +++ b/tests/integration/targets/role_version_update_single_node/tasks/01_update.yml @@ -11,7 +11,7 @@ ansible.builtin.include_role: name: scale_computing.hypercore.version_update_single_node vars: - scale_computing_hypercore_desired_version: "{{ desired_version_apply }}" + version_update_single_node_desired_version: "{{ desired_version_apply }}" # -------------------------------------------------------------- diff --git a/tests/integration/targets/role_version_update_single_node/tasks/02_update_not_available.yml b/tests/integration/targets/role_version_update_single_node/tasks/02_update_not_available.yml index 0fbe13f32..f2ddca26d 100644 --- a/tests/integration/targets/role_version_update_single_node/tasks/02_update_not_available.yml +++ b/tests/integration/targets/role_version_update_single_node/tasks/02_update_not_available.yml @@ -11,7 +11,7 @@ ansible.builtin.include_role: name: scale_computing.hypercore.version_update_single_node vars: - scale_computing_hypercore_desired_version: "{{ desired_version_apply }}" + version_update_single_node_desired_version: "{{ desired_version_apply }}" # -------------------------------------------------------------- diff --git a/tests/integration/targets/version_update__shutdown_restart_vms/tasks/01_shutdown_all.yml b/tests/integration/targets/version_update__shutdown_restart_vms/tasks/01_shutdown_all.yml index faef34430..6cacc8476 100644 --- a/tests/integration/targets/version_update__shutdown_restart_vms/tasks/01_shutdown_all.yml +++ b/tests/integration/targets/version_update__shutdown_restart_vms/tasks/01_shutdown_all.yml @@ -31,7 +31,7 @@ name: scale_computing.hypercore.version_update_single_node tasks_from: shutdown_vms.yml vars: - scale_computing_hypercore_shutdown_vms: "{{ vm_info_a }}" + version_update_single_node_shutdown_vms: "{{ vm_info_a }}" # ------------------------------------------------------------------------------------------------------------------ # after shutdown @@ -55,7 +55,7 @@ name: scale_computing.hypercore.version_update_single_node tasks_from: restart_vms.yml vars: - scale_computing_hypercore_restart_vms: "{{ vm_info_a }}" + version_update_single_node_restart_vms: "{{ vm_info_a }}" # ------------------------------------------------------------------------------------------------------------------ # after restart diff --git a/tests/integration/targets/version_update__shutdown_restart_vms/tasks/02_shutdown_none.yml b/tests/integration/targets/version_update__shutdown_restart_vms/tasks/02_shutdown_none.yml index d40038144..74765d866 100644 --- a/tests/integration/targets/version_update__shutdown_restart_vms/tasks/02_shutdown_none.yml +++ b/tests/integration/targets/version_update__shutdown_restart_vms/tasks/02_shutdown_none.yml @@ -38,7 +38,7 @@ name: scale_computing.hypercore.version_update_single_node tasks_from: shutdown_vms.yml vars: - scale_computing_hypercore_shutdown_vms: "{{ vm_info_a }}" + version_update_single_node_shutdown_vms: "{{ vm_info_a }}" # ------------------------------------------------------------------------------------------------------------------ @@ -76,7 +76,7 @@ name: scale_computing.hypercore.version_update_single_node tasks_from: restart_vms.yml vars: - scale_computing_hypercore_restart_vms: "{{ vm_info_a }}" + version_update_single_node_restart_vms: "{{ vm_info_a }}" # ------------------------------------------------------------------------------------------------------------------ # after restart diff --git a/tests/integration/targets/version_update__shutdown_restart_vms/tasks/03_shutdown_some.yml b/tests/integration/targets/version_update__shutdown_restart_vms/tasks/03_shutdown_some.yml index d68d39f53..cbb01d1da 100644 --- a/tests/integration/targets/version_update__shutdown_restart_vms/tasks/03_shutdown_some.yml +++ b/tests/integration/targets/version_update__shutdown_restart_vms/tasks/03_shutdown_some.yml @@ -38,7 +38,7 @@ name: scale_computing.hypercore.version_update_single_node tasks_from: shutdown_vms.yml vars: - scale_computing_hypercore_shutdown_vms: "{{ vm_info_a }}" + version_update_single_node_shutdown_vms: "{{ vm_info_a }}" # ------------------------------------------------------------------------------------------------------------------ # after shutdown @@ -62,7 +62,7 @@ name: scale_computing.hypercore.version_update_single_node tasks_from: restart_vms.yml vars: - scale_computing_hypercore_restart_vms: "{{ vm_info_a }}" + version_update_single_node_restart_vms: "{{ vm_info_a }}" # ------------------------------------------------------------------------------------------------------------------ # after restart diff --git a/tests/integration/targets/version_update__shutdown_restart_vms/tasks/04_shutdown_tagged_a.yml b/tests/integration/targets/version_update__shutdown_restart_vms/tasks/04_shutdown_tagged_a.yml index 06d26a771..078c63206 100644 --- a/tests/integration/targets/version_update__shutdown_restart_vms/tasks/04_shutdown_tagged_a.yml +++ b/tests/integration/targets/version_update__shutdown_restart_vms/tasks/04_shutdown_tagged_a.yml @@ -31,8 +31,8 @@ name: scale_computing.hypercore.version_update_single_node tasks_from: shutdown_vms.yml vars: - scale_computing_hypercore_shutdown_vms: "{{ vm_info_a }}" - scale_computing_hypercore_shutdown_tags: + version_update_single_node_shutdown_vms: "{{ vm_info_a }}" + version_update_single_node_shutdown_tags: - ci_live_migrate__no_a # ------------------------------------------------------------------------------------------------------------------ @@ -77,7 +77,7 @@ name: scale_computing.hypercore.version_update_single_node tasks_from: restart_vms.yml vars: - scale_computing_hypercore_restart_vms: "{{ vm_info_a }}" + version_update_single_node_restart_vms: "{{ vm_info_a }}" # ------------------------------------------------------------------------------------------------------------------ # after restart diff --git a/tests/integration/targets/version_update__shutdown_restart_vms/tasks/05_shutdown_tagged_bc.yml b/tests/integration/targets/version_update__shutdown_restart_vms/tasks/05_shutdown_tagged_bc.yml index 75b8b139e..fbed9ad28 100644 --- a/tests/integration/targets/version_update__shutdown_restart_vms/tasks/05_shutdown_tagged_bc.yml +++ b/tests/integration/targets/version_update__shutdown_restart_vms/tasks/05_shutdown_tagged_bc.yml @@ -31,8 +31,8 @@ name: scale_computing.hypercore.version_update_single_node tasks_from: shutdown_vms.yml vars: - scale_computing_hypercore_shutdown_vms: "{{ vm_info_a }}" - scale_computing_hypercore_shutdown_tags: + version_update_single_node_shutdown_vms: "{{ vm_info_a }}" + version_update_single_node_shutdown_tags: - ci_live_migrate__no_b - ci_live_migrate__yes_c @@ -79,7 +79,7 @@ name: scale_computing.hypercore.version_update_single_node tasks_from: restart_vms.yml vars: - scale_computing_hypercore_restart_vms: "{{ vm_info_a }}" + version_update_single_node_restart_vms: "{{ vm_info_a }}" # ------------------------------------------------------------------------------------------------------------------ # after restart diff --git a/tests/integration/targets/version_update__shutdown_restart_vms/tasks/main.yml b/tests/integration/targets/version_update__shutdown_restart_vms/tasks/main.yml index 2a78919ec..08ba69958 100644 --- a/tests/integration/targets/version_update__shutdown_restart_vms/tasks/main.yml +++ b/tests/integration/targets/version_update__shutdown_restart_vms/tasks/main.yml @@ -22,7 +22,7 @@ - "{{ vm_name_a }}" - "{{ vm_name_b }}" - "{{ vm_name_c }}" - scale_computing_hypercore_shutdown_wait_time: 30 + version_update_single_node_shutdown_wait_time: 30 block: - name: Check vm_shutdown_restart_allow_string value @@ -38,5 +38,5 @@ vars: # Porteus VM needs about 20 sec to shut down. # Use longer wait time, we want to check VM did shut down, without exceeding the wait time. - scale_computing_hypercore_shutdown_wait_time: 300 + version_update_single_node_shutdown_wait_time: 300 - include_tasks: 05_shutdown_tagged_bc.yml diff --git a/tests/unit/plugins/modules/test_cluster_name.py b/tests/unit/plugins/modules/test_cluster_name.py index a49ac519c..57ccd8332 100644 --- a/tests/unit/plugins/modules/test_cluster_name.py +++ b/tests/unit/plugins/modules/test_cluster_name.py @@ -22,8 +22,8 @@ def test_all_params(self, run_main, mocker): params = dict( cluster_instance=dict( host="https://0.0.0.0", - username=None, - password=None, + username="myuser", + password="mypass", ), name_new="updated_name", ) @@ -42,8 +42,8 @@ def test_required(self, run_main): params = dict( cluster_instance=dict( host="https://0.0.0.0", - username=None, - password=None, + username="myuser", + password="mypass", ), ) diff --git a/tests/unit/plugins/modules/test_support_tunnel.py b/tests/unit/plugins/modules/test_support_tunnel.py index 0d02f95b7..2e33e32c2 100644 --- a/tests/unit/plugins/modules/test_support_tunnel.py +++ b/tests/unit/plugins/modules/test_support_tunnel.py @@ -22,8 +22,8 @@ def test_all_params(self, run_main): params = dict( cluster_instance=dict( host="https://0.0.0.0", - username=None, - password=None, + username="myuser", + password="mypass", ), state="present", code=4422, @@ -36,8 +36,8 @@ def test_minimal_set_of_params(self, run_main): params = dict( cluster_instance=dict( host="https://0.0.0.0", - username=None, - password=None, + username="myuser", + password="mypass", ), state="absent", ) @@ -49,8 +49,8 @@ def test_required_if(self, run_main): params = dict( cluster_instance=dict( host="https://0.0.0.0", - username=None, - password=None, + username="myuser", + password="mypass", ), state="present", ) diff --git a/tests/unit/plugins/modules/test_version_update.py b/tests/unit/plugins/modules/test_version_update.py index 13d8dc1ea..c9aa5df16 100644 --- a/tests/unit/plugins/modules/test_version_update.py +++ b/tests/unit/plugins/modules/test_version_update.py @@ -22,8 +22,8 @@ def test_all_params(self, run_main, mocker): params = dict( cluster_instance=dict( host="https://0.0.0.0", - username=None, - password=None, + username="myuser", + password="mypass", ), icos_version="9.2.11.210763", ) @@ -36,8 +36,8 @@ def test_required(self, run_main): params = dict( cluster_instance=dict( host="https://0.0.0.0", - username=None, - password=None, + username="myuser", + password="mypass", ), ) diff --git a/tests/unit/plugins/modules/test_virtual_disk.py b/tests/unit/plugins/modules/test_virtual_disk.py index 088b5bc43..1a1dd8e00 100644 --- a/tests/unit/plugins/modules/test_virtual_disk.py +++ b/tests/unit/plugins/modules/test_virtual_disk.py @@ -175,7 +175,7 @@ class TestMain: ), ), ( - None, + "", "c:/location/foobar.qcow2", "present", ( @@ -188,7 +188,7 @@ class TestMain: ), ), ( - None, + "", None, "present", ( @@ -343,7 +343,7 @@ class TestMain: ), ), ( - None, + "", "c:/location/foobar.qcow2", "absent", ( @@ -356,7 +356,7 @@ class TestMain: ), ), ( - None, + "", None, "absent", ( @@ -374,8 +374,8 @@ def test_parameters_virtual_disk( self, run_main, mocker, - file_location_test, file_name_test, + file_location_test, state_test, expected_result, ) -> None: @@ -383,8 +383,8 @@ def test_parameters_virtual_disk( cluster_instance=dict( host="https://my.host.name", username="user", password="pass" ), - name=file_location_test, - source=file_name_test, + name=file_name_test, + source=file_location_test, state=state_test, ) mocker.patch(