From 17115af44e2d926c84e1e8a0c797620380ee91af Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 10:55:22 +0200 Subject: [PATCH 01/37] Fixing installer variable bug. --- install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.sh b/install.sh index b74950a87..c8c8b8db4 100755 --- a/install.sh +++ b/install.sh @@ -210,7 +210,7 @@ ce_provision: venv_install_username: ${CONTROLLER_USER} upgrade_timer_name: upgrade_ce_provision_ansible aws_support: ${AWS_SUPPORT} - new_user: ${CONTROLLER_USER} + new_user: true username: ${CONTROLLER_USER} ssh_key_bits: "521" ssh_key_type: ed25519 From 176df44f01cc27a353dc28eae8d5f4da2521421f Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 11:11:21 +0200 Subject: [PATCH 02/37] Fixing tests for external PRs. --- .github/workflows/ce-provision-test-gitlab.yml | 2 +- .github/workflows/ce-provision-test-web.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ce-provision-test-gitlab.yml b/.github/workflows/ce-provision-test-gitlab.yml index 9e5b46a30..566ea1377 100644 --- a/.github/workflows/ce-provision-test-gitlab.yml +++ b/.github/workflows/ce-provision-test-gitlab.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/codeenigma/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall diff --git a/.github/workflows/ce-provision-test-web.yml b/.github/workflows/ce-provision-test-web.yml index 595905064..c8ae20bbe 100644 --- a/.github/workflows/ce-provision-test-web.yml +++ b/.github/workflows/ce-provision-test-web.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/codeenigma/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall From 24845320411170dc5ac23d90c05837de38183dbe Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 11:15:22 +0200 Subject: [PATCH 03/37] Testing with a fork. --- .github/workflows/ce-provision-test-gitlab.yml | 2 +- .github/workflows/ce-provision-test-web.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ce-provision-test-gitlab.yml b/.github/workflows/ce-provision-test-gitlab.yml index 566ea1377..9d6670617 100644 --- a/.github/workflows/ce-provision-test-gitlab.yml +++ b/.github/workflows/ce-provision-test-gitlab.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall diff --git a/.github/workflows/ce-provision-test-web.yml b/.github/workflows/ce-provision-test-web.yml index c8ae20bbe..509d0e2d3 100644 --- a/.github/workflows/ce-provision-test-web.yml +++ b/.github/workflows/ce-provision-test-web.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall From cde9a6037638f76b2e2615146fb2550eaaf4d820 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 11:18:41 +0200 Subject: [PATCH 04/37] Adding repo owner's username into installer string. --- .github/workflows/ce-provision-test-gitlab.yml | 2 +- .github/workflows/ce-provision-test-web.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ce-provision-test-gitlab.yml b/.github/workflows/ce-provision-test-gitlab.yml index 9d6670617..6da7b5a4b 100644 --- a/.github/workflows/ce-provision-test-gitlab.yml +++ b/.github/workflows/ce-provision-test-gitlab.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall diff --git a/.github/workflows/ce-provision-test-web.yml b/.github/workflows/ce-provision-test-web.yml index 509d0e2d3..e95bf6337 100644 --- a/.github/workflows/ce-provision-test-web.yml +++ b/.github/workflows/ce-provision-test-web.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall From a2c4bac692aa51a4eee16f268988fc7cdfe42a4c Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 11:35:23 +0200 Subject: [PATCH 05/37] Refactoring config repo detection to simplify. --- roles/debian/ce_provision/tasks/main.yml | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/roles/debian/ce_provision/tasks/main.yml b/roles/debian/ce_provision/tasks/main.yml index 792eea3d6..0ccd6e680 100644 --- a/roles/debian/ce_provision/tasks/main.yml +++ b/roles/debian/ce_provision/tasks/main.yml @@ -57,11 +57,6 @@ filename: "{{ ce_provision.username }}" when: _ce_provision_username != ce_provision.username -# This prevent the original var to be re-evaluated when we move things around. -- name: Register config repository. - ansible.builtin.set_fact: - ce_provision_has_config_repo: "{{ 'yes' if ce_provision.config_repository else 'no' }}" - - name: Ensure APT dependencies are installed. ansible.builtin.apt: pkg: ["git", "parallel"] @@ -102,7 +97,7 @@ become: true become_user: "{{ ce_provision.username }}" when: - - ce_provision_has_config_repo + - ce_provision.config_repository | length > 0 - not ce_provision.config_repository_skip_checkout - name: Create defaults folders. @@ -111,13 +106,13 @@ state: directory with_items: - hosts - when: not ce_provision_has_config_repo + when: not ce_provision.config_repository | length > 0 - name: Create default config. ansible.builtin.copy: src: ansible.cfg dest: "{{ ce_provision.local_dir }}/ansible.cfg" - when: not ce_provision_has_config_repo + when: not ce_provision.config_repository | length > 0 - name: Symlink config folders to /etc/ansible. ansible.builtin.file: @@ -129,7 +124,7 @@ - files - templates - ansible.cfg - when: ce_provision_has_config_repo + when: ce_provision.config_repository | length > 0 - name: Create data dir. ansible.builtin.file: From 7ce204b56cfe7039f4a462fa97be46a824cd7fad Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 11:48:58 +0200 Subject: [PATCH 06/37] No longer permitted to use an integer as a truthy value. --- roles/debian/user_ansible/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/debian/user_ansible/tasks/main.yml b/roles/debian/user_ansible/tasks/main.yml index 93290f410..5f138f26a 100644 --- a/roles/debian/user_ansible/tasks/main.yml +++ b/roles/debian/user_ansible/tasks/main.yml @@ -13,7 +13,7 @@ with_items: "{{ user_ansible.groups }}" loop_control: loop_var: group - when: user_ansible.groups | length + when: user_ansible.groups | length > 0 - name: Create the system user. ansible.builtin.user: @@ -74,7 +74,7 @@ owner: "{{ user_ansible.username }}" group: "{{ user_ansible.username }}" mode: '0600' - when: user_ansible.known_hosts | length + when: user_ansible.known_hosts | length > 0 - name: Add public keys to known_hosts. ansible.builtin.known_hosts: From c7ae00387857c07e59e8ef8a9e74d0c3a1dee172 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 11:56:53 +0200 Subject: [PATCH 07/37] No longer permitted to use existence check as a truthy value. --- roles/_init/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/_init/tasks/main.yml b/roles/_init/tasks/main.yml index c401fefb3..82668f566 100644 --- a/roles/_init/tasks/main.yml +++ b/roles/_init/tasks/main.yml @@ -121,9 +121,9 @@ - name: Load custom vars file. ansible.builtin.include_tasks: allowed_vars.yml when: - - _init.ce_provision_extra_repository - - _init.ce_provision_extra_repository_vars_file - - _init.ce_provision_extra_repository_allowed_vars + - _init.ce_provision_extra_repository | length > 0 + - _init.ce_provision_extra_repository_vars_file | length > 0 + - _init.ce_provision_extra_repository_allowed_vars | length > 0 # Install Ansible under the controller user for all servers # Ensure ansible_connection == 'ssh' (i.e. we are connecting to a server) before executing From 6379b2e39df24121df90b8a6fae8e7a1faf9f22e Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 12:27:22 +0200 Subject: [PATCH 08/37] Can't see a reason why linotp var shouldn't be a boolean. --- roles/debian/apt_unattended_upgrades/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/debian/apt_unattended_upgrades/defaults/main.yml b/roles/debian/apt_unattended_upgrades/defaults/main.yml index 855c7f924..a63f3e140 100644 --- a/roles/debian/apt_unattended_upgrades/defaults/main.yml +++ b/roles/debian/apt_unattended_upgrades/defaults/main.yml @@ -4,7 +4,7 @@ _apt_unattended_upgrades_default_origins: - "origin=Debian,codename=${distro_codename}-security,label=Debian-Security" apt_unattended_upgrades: enable: true - linotp: "false" + linotp: false # unattended-upgrades template vars. # booleans must be strings to avoid Jinja2 interpretting. origins: "{{ _apt_unattended_upgrades_default_origins }}" From 318f532d6e145c044d901f90b3f3b5572df22de5 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 12:38:29 +0200 Subject: [PATCH 09/37] No longer permitted to use existence check as a truthy value. --- roles/_exit/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/_exit/tasks/main.yml b/roles/_exit/tasks/main.yml index 51d676278..b9dce908d 100644 --- a/roles/_exit/tasks/main.yml +++ b/roles/_exit/tasks/main.yml @@ -3,8 +3,8 @@ - name: Generate/Update custom vars file. ansible.builtin.include_tasks: allowed_vars.yml when: - - _init.ce_provision_extra_repository - - _init.ce_provision_extra_repository_vars_file + - _init.ce_provision_extra_repository | length > 0 + - _init.ce_provision_extra_repository_vars_file | length > 0 - _init.ce_provision_extra_repository_push - name: Store current playbook md5. From 1466d24f87123b195af6e77b2f4ea755a9e68704 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 12:47:27 +0200 Subject: [PATCH 10/37] Fixing truthy errors in ce_deploy role. --- roles/debian/ce_deploy/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/debian/ce_deploy/tasks/main.yml b/roles/debian/ce_deploy/tasks/main.yml index 15f2265dd..526d2bf86 100644 --- a/roles/debian/ce_deploy/tasks/main.yml +++ b/roles/debian/ce_deploy/tasks/main.yml @@ -62,7 +62,7 @@ version: "{{ ce_deploy.config_repository_branch | default('main') }}" become: false delegate_to: localhost - when: ce_deploy.config_repository is defined and ce_deploy.config_repository + when: ce_deploy.config_repository is defined and ce_deploy.config_repository | length > 0 - name: Synchronize config directory. ansible.posix.synchronize: @@ -71,7 +71,7 @@ delete: true rsync_opts: - "--chown={{ ce_deploy.username }}:{{ ce_deploy.username }}" - when: ce_deploy.config_repository is defined and ce_deploy.config_repository + when: ce_deploy.config_repository is defined and ce_deploy.config_repository | length > 0 - name: Check if we have a config directory. ansible.builtin.stat: @@ -81,7 +81,7 @@ - name: Register config repository. ansible.builtin.set_fact: key_value: ce_deploy_has_config_repo - ce_deploy_has_config_repo: "{{ 'yes' if ce_deploy_config_repo.stat.isdir is defined and ce_deploy_config_repo.stat.isdir else 'no' }}" + ce_deploy_has_config_repo: "{{ true if ce_deploy_config_repo.stat.isdir is defined and ce_deploy_config_repo.stat.isdir else false }}" - name: Create defaults folders. ansible.builtin.file: From 3c14dfa005a2ece232f3ea65574d8666e93d2fef Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 12:58:45 +0200 Subject: [PATCH 11/37] No longer permitted to use an integer as a truthy value. --- roles/debian/ssh_server/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/debian/ssh_server/tasks/main.yml b/roles/debian/ssh_server/tasks/main.yml index 8d52d8eee..47c07ed41 100644 --- a/roles/debian/ssh_server/tasks/main.yml +++ b/roles/debian/ssh_server/tasks/main.yml @@ -18,7 +18,7 @@ with_items: "{{ sshd.groups }}" loop_control: loop_var: group - when: sshd.groups | length + when: sshd.groups | length > 0 - name: Generate group section of the sshd_config file. ansible.builtin.blockinfile: @@ -29,7 +29,7 @@ with_items: "{{ sshd.groups }}" loop_control: loop_var: group - when: sshd.groups | length + when: sshd.groups | length > 0 - name: Generate user section of the sshd_config file. ansible.builtin.blockinfile: @@ -40,7 +40,7 @@ with_items: "{{ sshd.users }}" loop_control: loop_var: users - when: sshd.users | length + when: sshd.users | length > 0 # - name: Trigger overrides # include_role: From 9472416296a10db67de2321dc1092ca5f55a991a Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Mon, 22 Sep 2025 13:27:41 +0200 Subject: [PATCH 12/37] Updating clamav command to use flock avoiding duplicate processes running. --- roles/debian/clamav/defaults/main.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/debian/clamav/defaults/main.yml b/roles/debian/clamav/defaults/main.yml index 4706b665f..de2ee3029 100644 --- a/roles/debian/clamav/defaults/main.yml +++ b/roles/debian/clamav/defaults/main.yml @@ -12,12 +12,12 @@ clamav: # scheduled scans, set to an empty list for no timers timers: - clamscan_daily: - timer_command: /usr/local/clamav/script/clamscan_daily # path to clamscan wrapper script, ensure it is defined in clamav.scripts - timer_OnCalendar: "*-*-* 02:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events - server_name: "{{ inventory_hostname }}" # for identification via email, defaults to Ansible inventory name. + timer_command: /usr/bin/flock -n /var/run/clamscan.lock -c /usr/local/clamav/script/clamscan_daily # command to run clamscan wrapper script, ensure script location is defined in clamav.scripts + timer_OnCalendar: "*-*-* 02:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events + server_name: "{{ inventory_hostname }}" # for identification via email, defaults to Ansible inventory name. log_location: /var/log/clamav - send_mail: false # Important - will not send any emails by default. - send_on_fail: true # Only sends emails on scan failure, will not email for successful scans. + send_mail: false # Important - will not send any emails by default. + send_on_fail: true # Only sends emails on scan failure, will not email for successful scans. report_recipient_email: mail@example.com report_sender_email: admin@server.example.com - install_clamdscan: false # flag to install additional 'clamdscan' package + install_clamdscan: false # flag to install additional 'clamdscan' package From c75b16a8395dc5ab6c5bdabab68afe7940b76f99 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 24 Sep 2025 13:08:05 +0200 Subject: [PATCH 13/37] More truthy length fixes. --- roles/_overrides/tasks/main.yml | 4 ++-- roles/aws/aws_ami/templates/packer.json.j2 | 6 +++--- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 4 ++-- roles/aws/aws_elb/tasks/main.yml | 2 +- .../aws/aws_iam_saml/templates/simplesamlphp_sp.j2 | 2 +- .../apache/templates/cloudwatch-main.json.j2 | 6 +++--- .../apache/templates/cloudwatch-vhost.json.j2 | 10 +++++----- .../aws_cloudwatch_agent/templates/config.json.j2 | 14 +++++++------- .../templates/include-exclude-filelist.j2 | 2 +- .../debian/nginx/templates/cloudwatch-main.json.j2 | 4 ++-- .../nginx/templates/cloudwatch-vhost.json.j2 | 8 ++++---- .../templates/headless-openvpn-install.sh.j2 | 4 ++-- .../templates/cloudwatch-php-fpm-fixedport.json.j2 | 8 ++++---- .../php-fpm/templates/cloudwatch-php-fpm.json.j2 | 8 ++++---- roles/debian/postfix/templates/transport.j2 | 2 +- roles/debian/ssh_server/templates/sshd_config.j2 | 4 ++-- 16 files changed, 44 insertions(+), 44 deletions(-) diff --git a/roles/_overrides/tasks/main.yml b/roles/_overrides/tasks/main.yml index 18365b2e6..3fcfdd4eb 100644 --- a/roles/_overrides/tasks/main.yml +++ b/roles/_overrides/tasks/main.yml @@ -6,7 +6,7 @@ loop_var: override_file when: - _overrides.files is defined - - _overrides.files | length + - _overrides.files|length > 0 - name: Generate links overrides. ansible.builtin.include_tasks: link.yml @@ -15,4 +15,4 @@ loop_var: override_link when: - _overrides.links is defined - - _overrides.links | length + - _overrides.links|length > 0 diff --git a/roles/aws/aws_ami/templates/packer.json.j2 b/roles/aws/aws_ami/templates/packer.json.j2 index faa3074a1..0a27cdbc7 100755 --- a/roles/aws/aws_ami/templates/packer.json.j2 +++ b/roles/aws/aws_ami/templates/packer.json.j2 @@ -31,7 +31,7 @@ "owners": ["{{ aws_ami.owner }}"], "most_recent": true }, - {% if aws_ami.vpc_filter is defined and aws_ami.vpc_filter | length > 0 %} + {% if aws_ami.vpc_filter is defined and aws_ami.vpc_filter|length > 0 %} "vpc_filter": { "filters": { "tag:Name": "{{ aws_ami.vpc_filter }}" @@ -53,7 +53,7 @@ "playbook_file": "{{ aws_ami.playbook_file }}", "inventory_directory": "{{ _ce_provision_base_dir }}/hosts", "ssh_authorized_key_file": "/home/{{ user_provision.username }}/.ssh/{{ aws_ami.public_key_name }}", - {% if aws_ami.groups is defined and aws_ami.groups | length %} + {% if aws_ami.groups is defined and aws_ami.groups|length > 0 %} "groups": {{ aws_ami.groups | to_json }}, {% endif %} "ansible_env_vars": @@ -68,7 +68,7 @@ {% if ansible_verbosity >= 1 %} "-vvvv", {% endif %} - {% if _aws_ami_extra_vars is defined and _aws_ami_extra_vars | length %} + {% if _aws_ami_extra_vars is defined and _aws_ami_extra_vars|length > 0 %} "--extra-vars", "{{ _aws_ami_extra_vars }}", {% endif %} diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index 2bb19d861..f92b3c0df 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -27,7 +27,7 @@ - name: Use provided VPC id. ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_vpc_id: "{{ aws_ec2_autoscale_cluster.vpc_id }}" - when: aws_ec2_autoscale_cluster.vpc_name is not defined or (aws_ec2_autoscale_cluster.vpc_name | length) == 0 + when: (aws_ec2_autoscale_cluster.vpc_name is not defined) or (aws_ec2_autoscale_cluster.vpc_name|length == 0) - name: Create matching Security Group. ansible.builtin.include_role: @@ -415,7 +415,7 @@ _aws_ec2_autoscale_cluster_listeners: "{{ _aws_ec2_autoscale_cluster_listeners + aws_ec2_autoscale_cluster.listeners }}" when: - aws_ec2_autoscale_cluster is defined - - aws_ec2_autoscale_cluster | length + - aws_ec2_autoscale_cluster | length > 0 - aws_ec2_autoscale_cluster.create_elb - name: Generate security group information for the ALB. diff --git a/roles/aws/aws_elb/tasks/main.yml b/roles/aws/aws_elb/tasks/main.yml index 9ea51db71..8c988ff1c 100644 --- a/roles/aws/aws_elb/tasks/main.yml +++ b/roles/aws/aws_elb/tasks/main.yml @@ -111,7 +111,7 @@ _aws_ec2_listeners: "{{ _aws_ec2_listeners + aws_elb.listeners }}" when: - aws_elb is defined - - aws_elb | length + - aws_elb | length > 0 - name: Generate security group information. ansible.builtin.include_role: diff --git a/roles/aws/aws_iam_saml/templates/simplesamlphp_sp.j2 b/roles/aws/aws_iam_saml/templates/simplesamlphp_sp.j2 index 509fd4dbe..d931cdbaa 100644 --- a/roles/aws/aws_iam_saml/templates/simplesamlphp_sp.j2 +++ b/roles/aws/aws_iam_saml/templates/simplesamlphp_sp.j2 @@ -55,7 +55,7 @@ $metadata['urn:amazon:{{ _aws_account_info.account }}'] = array ( 'groups' => 'urn:oid:1.3.6.1.4.1.5923.1.1.1.1', ), -{% if aws_iam_saml.linotp_server is defined and aws_iam_saml.linotp_server|length %} +{% if aws_iam_saml.linotp_server is defined and aws_iam_saml.linotp_server|length > 0 %} # LinOTP settings 55 => array( 'class' => 'linotp2:OTP', diff --git a/roles/debian/apache/templates/cloudwatch-main.json.j2 b/roles/debian/apache/templates/cloudwatch-main.json.j2 index e5e899a15..38b8a0772 100644 --- a/roles/debian/apache/templates/cloudwatch-main.json.j2 +++ b/roles/debian/apache/templates/cloudwatch-main.json.j2 @@ -5,7 +5,7 @@ "collect_list": [ { "file_path": "/var/log/apache2/access.log", -{% if apache.log_group_prefix is defined and apache.log_group_prefix|length %} +{% if apache.log_group_prefix is defined and apache.log_group_prefix|length > 0 %} "log_group_name": "{{ apache.log_group_prefix }}apache-access", {% else %} "log_group_name": "apache-access", @@ -14,7 +14,7 @@ }, { "file_path": "/var/log/apache2/error.log", -{% if apache.log_group_prefix is defined and apache.log_group_prefix|length %} +{% if apache.log_group_prefix is defined and apache.log_group_prefix|length > 0 %} "log_group_name": "{{ apache.log_group_prefix }}apache-error", {% else %} "log_group_name": "apache-error", @@ -25,4 +25,4 @@ } } } -} \ No newline at end of file +} diff --git a/roles/debian/apache/templates/cloudwatch-vhost.json.j2 b/roles/debian/apache/templates/cloudwatch-vhost.json.j2 index 331e30ff5..7299936c0 100644 --- a/roles/debian/apache/templates/cloudwatch-vhost.json.j2 +++ b/roles/debian/apache/templates/cloudwatch-vhost.json.j2 @@ -5,12 +5,12 @@ "collect_list": [ { "file_path": "{{ domain.access_log }}", -{% if apache.log_group_prefix is defined and apache.log_group_prefix|length %} +{% if apache.log_group_prefix is defined and apache.log_group_prefix|length > 0 %} "log_group_name": "{{ apache.log_group_prefix }}apache2-access", {% else %} "log_group_name": "apache2-access", {% endif %} -{% if domain.log_stream_name is defined and domain.log_stream_name|length %} +{% if domain.log_stream_name is defined and domain.log_stream_name|length > 0 %} "log_stream_name": "{{ domain.log_stream_name }}" {% else %} "log_stream_name": "{{ apache.log_stream_name }}" @@ -18,12 +18,12 @@ }, { "file_path": "{{ domain.error_log }}", -{% if apache.log_group_prefix is defined and apache.log_group_prefix|length %} +{% if apache.log_group_prefix is defined and apache.log_group_prefix|length > 0 %} "log_group_name": "{{ apache.log_group_prefix }}apache2-error", {% else %} "log_group_name": "apache2-error", {% endif %} -{% if domain.log_stream_name is defined and domain.log_stream_name|length %} +{% if domain.log_stream_name is defined and domain.log_stream_name|length > 0 %} "log_stream_name": "{{ domain.log_stream_name }}" {% else %} "log_stream_name": "{{ apache.log_stream_name }}" @@ -33,4 +33,4 @@ } } } -} \ No newline at end of file +} diff --git a/roles/debian/aws_cloudwatch_agent/templates/config.json.j2 b/roles/debian/aws_cloudwatch_agent/templates/config.json.j2 index 169ea4c53..6dce2d3fe 100755 --- a/roles/debian/aws_cloudwatch_agent/templates/config.json.j2 +++ b/roles/debian/aws_cloudwatch_agent/templates/config.json.j2 @@ -9,7 +9,7 @@ "collect_list": [ { "file_path": "/var/log/syslog", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}syslog", {% else %} "log_group_name": "syslog", @@ -18,7 +18,7 @@ }, { "file_path": "/var/log/auth.log", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}auth", {% else %} "log_group_name": "auth", @@ -27,7 +27,7 @@ }, { "file_path": "/var/log/daemon.log", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}daemon", {% else %} "log_group_name": "daemon", @@ -36,7 +36,7 @@ }, { "file_path": "/var/log/messages", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}messages", {% else %} "log_group_name": "messages", @@ -45,7 +45,7 @@ }, { "file_path": "/var/log/alternatives.log", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}alternatives", {% else %} "log_group_name": "alternatives", @@ -57,7 +57,7 @@ } }, "metrics": { -{% if aws_cloudwatch_agent.metrics_namespace is defined and aws_cloudwatch_agent.metrics_namespace|length %} +{% if aws_cloudwatch_agent.metrics_namespace is defined and aws_cloudwatch_agent.metrics_namespace|length > 0 %} "namespace": "{{ aws_cloudwatch_agent.metrics_namespace }}", {% endif %} "append_dimensions": { @@ -110,4 +110,4 @@ } } } -} \ No newline at end of file +} diff --git a/roles/debian/duplicity/templates/include-exclude-filelist.j2 b/roles/debian/duplicity/templates/include-exclude-filelist.j2 index bf491cd28..61f745439 100644 --- a/roles/debian/duplicity/templates/include-exclude-filelist.j2 +++ b/roles/debian/duplicity/templates/include-exclude-filelist.j2 @@ -1,5 +1,5 @@ {% for rule in dir.rules %} -{% if rule|length %} +{% if rule|length > 0 %} {{ rule }} {% endif %} {% endfor %} diff --git a/roles/debian/nginx/templates/cloudwatch-main.json.j2 b/roles/debian/nginx/templates/cloudwatch-main.json.j2 index 8ba152202..4c17bb09b 100644 --- a/roles/debian/nginx/templates/cloudwatch-main.json.j2 +++ b/roles/debian/nginx/templates/cloudwatch-main.json.j2 @@ -5,7 +5,7 @@ "collect_list": [ { "file_path": "/var/log/nginx/access.log", -{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length %} +{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length > 0 %} "log_group_name": "{{ nginx.log_group_prefix }}nginx-access", {% else %} "log_group_name": "nginx-access", @@ -14,7 +14,7 @@ }, { "file_path": "/var/log/nginx/error.log", -{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length %} +{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length > 0 %} "log_group_name": "{{ nginx.log_group_prefix }}nginx-error", {% else %} "log_group_name": "nginx-error", diff --git a/roles/debian/nginx/templates/cloudwatch-vhost.json.j2 b/roles/debian/nginx/templates/cloudwatch-vhost.json.j2 index 285252767..a278f674d 100644 --- a/roles/debian/nginx/templates/cloudwatch-vhost.json.j2 +++ b/roles/debian/nginx/templates/cloudwatch-vhost.json.j2 @@ -5,12 +5,12 @@ "collect_list": [ { "file_path": "{{ domain.access_log }}", -{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length %} +{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length > 0 %} "log_group_name": "{{ nginx.log_group_prefix }}nginx-access", {% else %} "log_group_name": "nginx-access", {% endif %} -{% if domain.log_stream_name is defined and domain.log_stream_name|length %} +{% if domain.log_stream_name is defined and domain.log_stream_name|length > 0 %} "log_stream_name": "{{ domain.log_stream_name }}" {% else %} "log_stream_name": "{{ nginx.log_stream_name }}" @@ -18,12 +18,12 @@ }, { "file_path": "{{ domain.error_log }}", -{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length %} +{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length > 0 %} "log_group_name": "{{ nginx.log_group_prefix }}nginx-error", {% else %} "log_group_name": "nginx-error", {% endif %} -{% if domain.log_stream_name is defined and domain.log_stream_name|length %} +{% if domain.log_stream_name is defined and domain.log_stream_name|length > 0 %} "log_stream_name": "{{ domain.log_stream_name }}" {% else %} "log_stream_name": "{{ nginx.log_stream_name }}" diff --git a/roles/debian/openvpn/templates/headless-openvpn-install.sh.j2 b/roles/debian/openvpn/templates/headless-openvpn-install.sh.j2 index 2d078a5f9..0b36d94d8 100644 --- a/roles/debian/openvpn/templates/headless-openvpn-install.sh.j2 +++ b/roles/debian/openvpn/templates/headless-openvpn-install.sh.j2 @@ -13,12 +13,12 @@ export COMPRESSION_CHOICE={{ openvpn.compression_choice }} export CUSTOMIZE_ENC=n export CLIENT={{ openvpn.test_username }} export PASS=1 -{% if openvpn.nat_endpoint is defined and openvpn.nat_endpoint | length %} +{% if openvpn.nat_endpoint is defined and openvpn.nat_endpoint | length > 0 %} export ENDPOINT={{ openvpn.nat_endpoint }} {% endif %} {% if openvpn.dns | int == 13 %} export DNS1={{ openvpn.dns1 }} -{% if openvpn.dns2 is defined and openvpn.dns2 | length %} +{% if openvpn.dns2 is defined and openvpn.dns2 | length > 0 %} export DNS2={{ openvpn.dns2 }} {% endif %} {% endif %} diff --git a/roles/debian/php-fpm/templates/cloudwatch-php-fpm-fixedport.json.j2 b/roles/debian/php-fpm/templates/cloudwatch-php-fpm-fixedport.json.j2 index 74523ecdf..e5d5ba9eb 100644 --- a/roles/debian/php-fpm/templates/cloudwatch-php-fpm-fixedport.json.j2 +++ b/roles/debian/php-fpm/templates/cloudwatch-php-fpm-fixedport.json.j2 @@ -5,12 +5,12 @@ "collect_list": [ { "file_path": "/var/log/php{{ php.version[0] }}-fpm.log", -{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length %} +{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length > 0 %} "log_group_name": "{{ php.fpm.log_group_prefix }}php{{ php.version[0] }}", {% else %} "log_group_name": "php", {% endif %} -{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length %} +{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length > 0 %} "log_stream_name": "{{ php.fpm.log_stream_name }}" {% else %} "log_stream_name": "php-fpm" @@ -18,12 +18,12 @@ }, { "file_path": "{{ php.fpm.slowlog_file_directory }}/php{{ php.version[0] }}-fpm.slow.log", -{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length %} +{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length > 0 %} "log_group_name": "{{ php.fpm.log_group_prefix }}php{{ php.version[0] }}", {% else %} "log_group_name": "php", {% endif %} -{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length %} +{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length > 0 %} "log_stream_name": "{{ php.fpm.log_stream_name }}-slowlog" {% else %} "log_stream_name": "php-fpm-slowlog" diff --git a/roles/debian/php-fpm/templates/cloudwatch-php-fpm.json.j2 b/roles/debian/php-fpm/templates/cloudwatch-php-fpm.json.j2 index 19a848bf3..bfb9efab0 100644 --- a/roles/debian/php-fpm/templates/cloudwatch-php-fpm.json.j2 +++ b/roles/debian/php-fpm/templates/cloudwatch-php-fpm.json.j2 @@ -5,12 +5,12 @@ "collect_list": [ { "file_path": "/var/log/php{{ version }}-fpm.log", -{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length %} +{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length > 0 %} "log_group_name": "{{ php.fpm.log_group_prefix }}php{{ version }}", {% else %} "log_group_name": "php", {% endif %} -{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length %} +{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length > 0 %} "log_stream_name": "{{ php.fpm.log_stream_name }}" {% else %} "log_stream_name": "php-fpm" @@ -18,12 +18,12 @@ }, { "file_path": "{{ php.fpm.slowlog_file_directory }}/php{{ version }}-fpm.slow.log", -{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length %} +{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length > 0 %} "log_group_name": "{{ php.fpm.log_group_prefix }}php{{ version }}", {% else %} "log_group_name": "php", {% endif %} -{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length %} +{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length > 0 %} "log_stream_name": "{{ php.fpm.log_stream_name }}-slowlog" {% else %} "log_stream_name": "php-fpm-slowlog" diff --git a/roles/debian/postfix/templates/transport.j2 b/roles/debian/postfix/templates/transport.j2 index 098bf5265..1f053c8e2 100644 --- a/roles/debian/postfix/templates/transport.j2 +++ b/roles/debian/postfix/templates/transport.j2 @@ -1,7 +1,7 @@ {{ ansible_hostname }} : {{ ansible_fqdn }} : {% for transport in postfix.transport_maps %} -{% if transport|length %} +{% if transport|length > 0 %} {{ transport }} {% endif %} {% endfor %} diff --git a/roles/debian/ssh_server/templates/sshd_config.j2 b/roles/debian/ssh_server/templates/sshd_config.j2 index 216792bb0..9c832dbd3 100644 --- a/roles/debian/ssh_server/templates/sshd_config.j2 +++ b/roles/debian/ssh_server/templates/sshd_config.j2 @@ -27,7 +27,7 @@ ListenAddress {{ address }} #HostKey /etc/ssh/ssh_host_ecdsa_key #HostKey /etc/ssh/ssh_host_ed25519_key {% for key in sshd.HostKey %} -{% if key|length %} +{% if key|length > 0 %} HostKey {{ key }} {% endif %} {% endfor %} @@ -119,7 +119,7 @@ UsePAM {{ sshd.UsePAM }} AllowAgentForwarding {{ sshd.AllowAgentForwarding }} AllowTcpForwarding {{ sshd.AllowTcpForwarding }} -{% if sshd.AllowGroups|length %} +{% if sshd.AllowGroups|length > 0 %} AllowGroups {{ sshd.AllowGroups }} {% endif %} GatewayPorts {{ sshd.GatewayPorts }} From 725fcf7ba1ee8887869b408305430e11ee465bc0 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 25 Sep 2025 13:03:53 +0200 Subject: [PATCH 14/37] Fixing more LDAP role truthy issues. --- roles/debian/pam_ldap/tasks/main.yml | 43 ++++++++++++++-------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/roles/debian/pam_ldap/tasks/main.yml b/roles/debian/pam_ldap/tasks/main.yml index 2445bed77..8ea7995f1 100644 --- a/roles/debian/pam_ldap/tasks/main.yml +++ b/roles/debian/pam_ldap/tasks/main.yml @@ -14,14 +14,14 @@ ansible.builtin.file: path: /etc/ldap/ssl state: directory - when: pam_ldap.ssl_certificate is defined and pam_ldap.ssl_certificate | length > 0 + when: pam_ldap.ssl_certificate|length > 0 - name: Copy certificate. ansible.builtin.copy: src: "{{ pam_ldap.ssl_certificate }}" dest: "/etc/ldap/ssl/{{ pam_ldap.ssl_certificate | basename }}" mode: "0666" - when: pam_ldap.ssl_certificate is defined and pam_ldap.ssl_certificate | length > 0 + when: pam_ldap.ssl_certificate|length > 0 - name: Copy nslcd config. ansible.builtin.template: @@ -82,25 +82,24 @@ mode: 0555 owner: root -- name: Create LDAP key script passwd file. - ansible.builtin.template: - src: ldap-bindpw.j2 - dest: /etc/ldap/ldap-bindpw - mode: "0600" - owner: root - when: - - ldap_client.binddn is defined and ldap_client.binddn - - ldap_client.bindpw is defined and ldap_client.bindpw - -- name: Create wrapper script for LDAP key script. - ansible.builtin.template: - src: ssh-getkey-ldap-wrapper.sh.j2 - dest: /usr/local/bin/ssh-getkey-ldap-wrapper.sh - mode: "0555" - owner: root +- name: LDAP password handling. when: - - ldap_client.binddn is defined and ldap_client.binddn - - ldap_client.bindpw is defined and ldap_client.bindpw + - ldap_client.binddn|length > 0 + - ldap_client.bindpw|length > 0 + block: + - name: Create LDAP key script passwd file. + ansible.builtin.template: + src: ldap-bindpw.j2 + dest: /etc/ldap/ldap-bindpw + mode: "0600" + owner: root + + - name: Create wrapper script for LDAP key script. + ansible.builtin.template: + src: ssh-getkey-ldap-wrapper.sh.j2 + dest: /usr/local/bin/ssh-getkey-ldap-wrapper.sh + mode: "0555" + owner: root - name: Configure SSH pub key command if there is a binddn set. ansible.builtin.lineinfile: @@ -108,7 +107,7 @@ regexp: "AuthorizedKeysCommand " line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap-wrapper.sh when: - - ldap_client.binddn is defined and ldap_client.binddn + - ldap_client.binddn|length > 0 - name: Configure SSH pub key command if no binddn set. ansible.builtin.lineinfile: @@ -116,7 +115,7 @@ regexp: "AuthorizedKeysCommand " line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap when: - - not ldap_client.binddn + - not ldap_client.binddn|length > 0 - name: Configure SSH pub key command user. ansible.builtin.lineinfile: From 612c3c55ce8b6778089a0852956718e4942fcea9 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 25 Sep 2025 13:13:27 +0200 Subject: [PATCH 15/37] Slight block refactor for LDAP. --- roles/debian/pam_ldap/tasks/main.yml | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/roles/debian/pam_ldap/tasks/main.yml b/roles/debian/pam_ldap/tasks/main.yml index 8ea7995f1..6be7670cb 100644 --- a/roles/debian/pam_ldap/tasks/main.yml +++ b/roles/debian/pam_ldap/tasks/main.yml @@ -101,13 +101,12 @@ mode: "0555" owner: root -- name: Configure SSH pub key command if there is a binddn set. - ansible.builtin.lineinfile: - path: /etc/ssh/sshd_config - regexp: "AuthorizedKeysCommand " - line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap-wrapper.sh - when: - - ldap_client.binddn|length > 0 + # We don't support bind DN with no password because if there is no password the necessary script is not created. + - name: Configure SSH pub key command if there is a binddn set. + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: "AuthorizedKeysCommand " + line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap-wrapper.sh - name: Configure SSH pub key command if no binddn set. ansible.builtin.lineinfile: @@ -115,7 +114,7 @@ regexp: "AuthorizedKeysCommand " line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap when: - - not ldap_client.binddn|length > 0 + - not ldap_client.binddn == 0 - name: Configure SSH pub key command user. ansible.builtin.lineinfile: From 91ad6cae4add2b5adadc7945aa1d0a9d39ac5e42 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 25 Sep 2025 13:14:32 +0200 Subject: [PATCH 16/37] DN length check should not be negated. --- roles/debian/pam_ldap/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/debian/pam_ldap/tasks/main.yml b/roles/debian/pam_ldap/tasks/main.yml index 6be7670cb..021fa6c53 100644 --- a/roles/debian/pam_ldap/tasks/main.yml +++ b/roles/debian/pam_ldap/tasks/main.yml @@ -114,7 +114,7 @@ regexp: "AuthorizedKeysCommand " line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap when: - - not ldap_client.binddn == 0 + - ldap_client.binddn == 0 - name: Configure SSH pub key command user. ansible.builtin.lineinfile: From a60f424536fe977f42394c124b774f1b587a14d3 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 25 Sep 2025 13:15:39 +0200 Subject: [PATCH 17/37] Forgot to add the length filter. --- roles/debian/pam_ldap/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/debian/pam_ldap/tasks/main.yml b/roles/debian/pam_ldap/tasks/main.yml index 021fa6c53..9727b78e8 100644 --- a/roles/debian/pam_ldap/tasks/main.yml +++ b/roles/debian/pam_ldap/tasks/main.yml @@ -114,7 +114,7 @@ regexp: "AuthorizedKeysCommand " line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap when: - - ldap_client.binddn == 0 + - ldap_client.binddn|length == 0 - name: Configure SSH pub key command user. ansible.builtin.lineinfile: From 2b98f9f6733f778b861a9444ea556687a57bd480 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 13:00:29 +0200 Subject: [PATCH 18/37] Another boolean Ansible 12 error in AMI role. --- roles/aws/aws_ami/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/aws/aws_ami/tasks/main.yml b/roles/aws/aws_ami/tasks/main.yml index 2973ee816..1ce621463 100644 --- a/roles/aws/aws_ami/tasks/main.yml +++ b/roles/aws/aws_ami/tasks/main.yml @@ -17,7 +17,7 @@ ami_base_image_latest: "{{ ami_base_image.images | sort(attribute='creation_date') | last }}" when: - ami_base_image.images is defined - - ami_base_image.images + - ami_base_image.images|length > 0 - name: Delete existing image. ansible.builtin.include_tasks: delete.yml From 52b3ce17fc2e671351a3b1efff70af99a675186b Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 14:31:45 +0200 Subject: [PATCH 19/37] ALB port must be cast as a string for RedirectAction. --- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index f92b3c0df..e1845c65c 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -380,7 +380,7 @@ Host: "#{host}" Query: "#{query}" Path: "/#{path}" - Port: "{{ aws_ec2_autoscale_cluster.alb_https_port }}" + Port: "{{ aws_ec2_autoscale_cluster.alb_https_port|str }}" StatusCode: HTTP_301 _aws_ec2_autoscale_cluster_listeners_https: Protocol: HTTPS From 318dd420fdb40941a7466e457d4198090efd794f Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 14:34:08 +0200 Subject: [PATCH 20/37] Setting the correct Jinja filter, it's string, not str. --- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index e1845c65c..bc0ed9271 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -380,7 +380,7 @@ Host: "#{host}" Query: "#{query}" Path: "/#{path}" - Port: "{{ aws_ec2_autoscale_cluster.alb_https_port|str }}" + Port: "{{ aws_ec2_autoscale_cluster.alb_https_port|string }}" StatusCode: HTTP_301 _aws_ec2_autoscale_cluster_listeners_https: Protocol: HTTPS From f0026b77abb7d2c3b91643a3f25f52c8362502b6 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 16:14:40 +0200 Subject: [PATCH 21/37] Fixing more Ansible 12 length issues in autoscale role. --- .../aws_ec2_autoscale_cluster/tasks/main.yml | 113 ++++++++---------- 1 file changed, 51 insertions(+), 62 deletions(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index bc0ed9271..901199389 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -460,8 +460,8 @@ cmd: "aws elbv2 add-listener-certificates --region {{ aws_ec2_autoscale_cluster.region }} --profile {{ aws_ec2_autoscale_cluster.aws_profile }} --listener-arn {{ _aws_ec2_autoscale_cluster_alb_listener_ARN }} --certificates CertificateArn={{ item }}" when: - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs - - _ssl_certificate_ARN | length > 1 + - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs|length > 0 + - _ssl_certificate_ARN|length > 1 with_items: "{{ aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs }}" # EC2 - BUILD ASG @@ -475,7 +475,7 @@ group_names: "{{ aws_ec2_autoscale_cluster.cluster_security_groups }}" return_type: ids when: - - aws_ec2_autoscale_cluster.cluster_security_groups | length > 0 + - aws_ec2_autoscale_cluster.cluster_security_groups|length > 0 - aws_ec2_autoscale_cluster.asg_refresh - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster @@ -542,60 +542,56 @@ - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster -- name: Create step scaling AutoScale policies. - community.aws.autoscaling_policy: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - state: "present" - name: "{{ item.name }}-{{ item.policy_type }}" - adjustment_type: "{{ item.adjustment_type }}" - asg_name: "{{ aws_ec2_autoscale_cluster.name }}" - scaling_adjustment: "{{ item.adjustment }}" - min_adjustment_step: "{{ item.adjustment_step }}" - metric_aggregation: "{{ item.metric_aggregation }}" - step_adjustments: "{{ item.step_adjustments }}" - when: - - aws_ec2_autoscale_cluster.asg_scaling_policies - - item.policy_type == 'StepScaling' - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - register: _aws_ec2_autoscale_cluster_step_scaling_policies - with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" - -- name: Create simple scaling AutoScale policies. - community.aws.autoscaling_policy: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - state: "present" - name: "{{ item.name }}-{{ item.policy_type }}" - adjustment_type: "{{ item.adjustment_type }}" - asg_name: "{{ aws_ec2_autoscale_cluster.name }}" - scaling_adjustment: "{{ item.adjustment }}" - min_adjustment_step: "{{ item.adjustment_step }}" - cooldown: "{{ item.cooldown }}" - when: - - aws_ec2_autoscale_cluster.asg_scaling_policies - - item.policy_type == 'SimpleScaling' - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - register: _aws_ec2_autoscale_cluster_simple_scaling_policies - with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" - -- name: Fetch step scaling policies. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" +- name: Handle simple scaling AutoScale. when: - - _aws_ec2_autoscale_cluster_step_scaling_policies + - aws_ec2_autoscale_cluster.asg_scaling_policies|length > 0 + - item.policy_type == 'SimpleScaling' - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Fetch simple scaling policies. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_scaling_policies + _aws_ec2_autoscale_cluster_simple_scaling_policies.results }}" - when: - - _aws_ec2_autoscale_cluster_simple_scaling_policies + block: + - name: Create simple scaling AutoScale policies. + community.aws.autoscaling_policy: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + state: "present" + name: "{{ item.name }}-{{ item.policy_type }}" + adjustment_type: "{{ item.adjustment_type }}" + asg_name: "{{ aws_ec2_autoscale_cluster.name }}" + scaling_adjustment: "{{ item.adjustment }}" + min_adjustment_step: "{{ item.adjustment_step }}" + cooldown: "{{ item.cooldown }}" + register: _aws_ec2_autoscale_cluster_simple_scaling_policies + with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" + + - name: Fetch simple scaling policies. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_scaling_policies + _aws_ec2_autoscale_cluster_simple_scaling_policies.results }}" + +- name: Handle step scaling AustoScale. + when: + - aws_ec2_autoscale_cluster.asg_scaling_policies|length > 0 + - item.policy_type == 'StepScaling' - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster + block: + - name: Create step scaling AutoScale policies. + community.aws.autoscaling_policy: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + state: "present" + name: "{{ item.name }}-{{ item.policy_type }}" + adjustment_type: "{{ item.adjustment_type }}" + asg_name: "{{ aws_ec2_autoscale_cluster.name }}" + scaling_adjustment: "{{ item.adjustment }}" + min_adjustment_step: "{{ item.adjustment_step }}" + metric_aggregation: "{{ item.metric_aggregation }}" + step_adjustments: "{{ item.step_adjustments }}" + register: _aws_ec2_autoscale_cluster_step_scaling_policies + with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" + + - name: Fetch step scaling policies. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" - name: Create placeholder ARN variables for scaling policies. ansible.builtin.set_fact: @@ -740,7 +736,7 @@ _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases + [item.domain] }}" loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" when: - - aws_ec2_autoscale_cluster.acm.extra_domains | length > 0 + - aws_ec2_autoscale_cluster.acm.extra_domains|length > 0 - aws_ec2_autoscale_cluster.create_elb - aws_ec2_autoscale_cluster.cloudfront.create_distribution @@ -761,7 +757,7 @@ when: - aws_ec2_autoscale_cluster.create_elb - aws_ec2_autoscale_cluster.cloudfront.create_distribution - - _cf_certificate_ARN | length > 1 + - _cf_certificate_ARN|length > 1 # @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. @@ -777,11 +773,4 @@ loop: "{{ _aws_ec2_autoscale_cluster_dns_all_domains }}" when: - aws_ec2_autoscale_cluster.route_53.zone is defined - - aws_ec2_autoscale_cluster.route_53.zone | length > 0 - -#- name: Copy AMI to backup region. -# community.aws.ec2_ami_copy: -# aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" -# source_region: "{{ aws_ec2_autoscale_cluster.region }}" -# region: "{{ aws_backup.copy_vault.region }}" -# source_image_id: "{{ aws_ec2_autoscale_cluster_image_latest.image_id }}" + - aws_ec2_autoscale_cluster.route_53.zone|length > 0 From 26f1e24b7dbad5ccdf8a9fa6397d9f2bd829def0 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 17:45:33 +0200 Subject: [PATCH 22/37] Simplifying ASG role by refactoring into blocks. --- .../aws_ec2_autoscale_cluster/tasks/main.yml | 818 +++++++++--------- 1 file changed, 397 insertions(+), 421 deletions(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index 901199389..af34ada97 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -223,113 +223,98 @@ - aws_ec2_autoscale_cluster.type == "ecs" # EC2 - AMI BUILDING -- name: Add RDS endpoint address to extra vars for AMI building. - ansible.builtin.set_fact: - aws_ec2_autoscale_cluster: - ami_extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default([]) + ['_rds_endpoint: ' + _rds_instance_info.endpoint.address] }}" - when: - - _rds_instance_info.db_instance_identifier is defined - - aws_ec2_autoscale_cluster.rds.rds is defined - - aws_ec2_autoscale_cluster.rds.rds - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Add Aurora RDS endpoint address to extra vars for AMI building. - ansible.builtin.set_fact: - aws_ec2_autoscale_cluster: - ami_extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default([]) + ['_rds_endpoint: ' + _rds_instance_info_aurora.endpoint.address] }}" - when: - - _rds_instance_info_aurora.db_instance_identifier is defined - - aws_ec2_autoscale_cluster.rds.rds is defined - - aws_ec2_autoscale_cluster.rds.rds - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Gather running instances information. - amazon.aws.ec2_instance_info: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - filters: - "tag:Name": "{{ aws_ec2_autoscale_cluster.name }}" - instance-state-name: ["running"] - register: aws_ec2_autoscale_cluster_running_instances - when: - - aws_ec2_autoscale_cluster.asg_refresh or aws_ec2_autoscale_cluster.ami_refresh - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Gather subnet information for temporary EC2 instance if using the 'repack' operation to generate a new AMI. - amazon.aws.ec2_vpc_subnet_info: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - filters: - vpc-id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" - tag:Name: "{{ aws_ec2_autoscale_cluster.ami_subnet_name }}" - register: _aws_ec2_autoscale_ami_subnet - when: - - aws_ec2_autoscale_cluster.ami_refresh and aws_ec2_autoscale_cluster.ami_operation == 'repack' - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Create new AMI. - ansible.builtin.include_role: - name: aws/aws_ami - vars: - aws_ami: - aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - ami_name: "{{ _aws_ec2_autoscale_cluster_unique_name }}" - encrypt_boot: "{{ aws_ec2_autoscale_cluster.encrypt_boot }}" - name_filter: "{{ aws_ec2_autoscale_cluster.packer_name_filter }}" - repack: - root_volume_type: "{{ aws_ec2_autoscale_cluster.root_volume_type }}" - root_volume_size: "{{ aws_ec2_autoscale_cluster.root_volume_size }}" - cluster_name: "{{ aws_ec2_autoscale_cluster.name }}" - iam_role: "{{ aws_ec2_autoscale_cluster.iam_role_name | default(omit) }}" - vpc_id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" - vpc_subnet_id: "{{ _aws_ec2_autoscale_ami_subnet.subnets[0].subnet_id | default(omit) }}" - key_name: "{{ aws_ec2_autoscale_cluster.key_name }}" - ebs_optimized: "{{ aws_ec2_autoscale_cluster.ebs_optimized }}" - device_name: "{{ aws_ec2_autoscale_cluster.device_name }}" - playbook_file: "{{ aws_ec2_autoscale_cluster.ami_playbook_file }}" - on_error: "{{ aws_ec2_autoscale_cluster.packer_on_error }}" - vpc_filter: "{{ aws_ec2_autoscale_cluster.packer_vpc_filter }}" - subnet_filter_az: "{{ aws_ec2_autoscale_cluster.packer_subnet_filter_az }}" - force: "{{ aws_ec2_autoscale_cluster.packer_force }}" - operation: "{% if aws_ec2_autoscale_cluster_running_instances.instances | length > 0 %}{{ aws_ec2_autoscale_cluster.ami_operation }}{% else %}create{% endif %}" - tags: "{{ aws_ec2_autoscale_cluster.tags }}" - extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default(omit) }}" - when: - - aws_ec2_autoscale_cluster.ami_refresh - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -# No register in the previous task because we might not repack the AMI so we need to look it up. -- name: Gather AMI image from name. - amazon.aws.ec2_ami_info: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - owners: self - filters: - name: "{{ aws_ec2_autoscale_cluster.name }}*" - register: aws_ec2_autoscale_cluster_image - when: - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Register latest AMI image. - ansible.builtin.set_fact: - aws_ec2_autoscale_cluster_image_latest: "{{ aws_ec2_autoscale_cluster_image.images | sort(attribute='creation_date') | last }}" - when: - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster +- name: Create an AMI for EC2 clusters. + when: aws_ec2_autoscale_cluster.type == "ec2" + block: + - name: Add RDS endpoint address to extra vars for AMI building. + ansible.builtin.set_fact: + aws_ec2_autoscale_cluster: + ami_extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default([]) + ['_rds_endpoint: ' + _rds_instance_info.endpoint.address] }}" + when: + - _rds_instance_info.db_instance_identifier is defined + - aws_ec2_autoscale_cluster.rds.rds is defined + - aws_ec2_autoscale_cluster.rds.rds + + - name: Add Aurora RDS endpoint address to extra vars for AMI building. + ansible.builtin.set_fact: + aws_ec2_autoscale_cluster: + ami_extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default([]) + ['_rds_endpoint: ' + _rds_instance_info_aurora.endpoint.address] }}" + when: + - _rds_instance_info_aurora.db_instance_identifier is defined + - aws_ec2_autoscale_cluster.rds.rds is defined + - aws_ec2_autoscale_cluster.rds.rds + + - name: Gather running instances information. + amazon.aws.ec2_instance_info: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + filters: + "tag:Name": "{{ aws_ec2_autoscale_cluster.name }}" + instance-state-name: ["running"] + register: aws_ec2_autoscale_cluster_running_instances + when: aws_ec2_autoscale_cluster.asg_refresh or aws_ec2_autoscale_cluster.ami_refresh + + - name: Gather subnet information for temporary EC2 instance if using the 'repack' operation to generate a new AMI. + amazon.aws.ec2_vpc_subnet_info: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + filters: + vpc-id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" + tag:Name: "{{ aws_ec2_autoscale_cluster.ami_subnet_name }}" + register: _aws_ec2_autoscale_ami_subnet + when: aws_ec2_autoscale_cluster.ami_refresh and aws_ec2_autoscale_cluster.ami_operation == 'repack' + + - name: Create new AMI. + ansible.builtin.include_role: + name: aws/aws_ami + vars: + aws_ami: + aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + ami_name: "{{ _aws_ec2_autoscale_cluster_unique_name }}" + encrypt_boot: "{{ aws_ec2_autoscale_cluster.encrypt_boot }}" + name_filter: "{{ aws_ec2_autoscale_cluster.packer_name_filter }}" + repack: + root_volume_type: "{{ aws_ec2_autoscale_cluster.root_volume_type }}" + root_volume_size: "{{ aws_ec2_autoscale_cluster.root_volume_size }}" + cluster_name: "{{ aws_ec2_autoscale_cluster.name }}" + iam_role: "{{ aws_ec2_autoscale_cluster.iam_role_name | default(omit) }}" + vpc_id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" + vpc_subnet_id: "{{ _aws_ec2_autoscale_ami_subnet.subnets[0].subnet_id | default(omit) }}" + key_name: "{{ aws_ec2_autoscale_cluster.key_name }}" + ebs_optimized: "{{ aws_ec2_autoscale_cluster.ebs_optimized }}" + device_name: "{{ aws_ec2_autoscale_cluster.device_name }}" + playbook_file: "{{ aws_ec2_autoscale_cluster.ami_playbook_file }}" + on_error: "{{ aws_ec2_autoscale_cluster.packer_on_error }}" + vpc_filter: "{{ aws_ec2_autoscale_cluster.packer_vpc_filter }}" + subnet_filter_az: "{{ aws_ec2_autoscale_cluster.packer_subnet_filter_az }}" + force: "{{ aws_ec2_autoscale_cluster.packer_force }}" + operation: "{% if aws_ec2_autoscale_cluster_running_instances.instances | length > 0 %}{{ aws_ec2_autoscale_cluster.ami_operation }}{% else %}create{% endif %}" + tags: "{{ aws_ec2_autoscale_cluster.tags }}" + extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default(omit) }}" + when: + - aws_ec2_autoscale_cluster.ami_refresh + - aws_ec2_autoscale_cluster.deploy_cluster + + # No register in the previous task because we might not repack the AMI so we need to look it up. + - name: Gather AMI image from name. + amazon.aws.ec2_ami_info: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + owners: self + filters: + name: "{{ aws_ec2_autoscale_cluster.name }}*" + register: aws_ec2_autoscale_cluster_image + when: aws_ec2_autoscale_cluster.deploy_cluster -- name: Create ami cleanup function. - ansible.builtin.include_role: - name: aws/aws_ami_asg_cleanup + - name: Register latest AMI image. + ansible.builtin.set_fact: + aws_ec2_autoscale_cluster_image_latest: "{{ aws_ec2_autoscale_cluster_image.images | sort(attribute='creation_date') | last }}" + when: aws_ec2_autoscale_cluster.deploy_cluster -- name: Gather IAM role info. - amazon.aws.iam_role_info: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - name: "{{ aws_ec2_autoscale_cluster.iam_role_name }}" - register: _aws_ec2_autoscale_cluster_iam_role_info + - name: Create ami cleanup function. + ansible.builtin.include_role: + name: aws/aws_ami_asg_cleanup # LOAD BALANCING - name: "Create a Target group for port {{ aws_ec2_autoscale_cluster.target_group_http_port }}." @@ -361,186 +346,183 @@ when: - aws_ec2_autoscale_cluster.asg_refresh -- name: Define default ALB listeners. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_listeners_http: - Protocol: HTTP - Port: "{{ aws_ec2_autoscale_cluster.alb_http_port }}" - DefaultActions: - - Type: forward - TargetGroupName: "{{ aws_ec2_autoscale_cluster.name }}" - Rules: "{{ aws_ec2_autoscale_cluster.listeners_http.rules }}" - _aws_ec2_autoscale_cluster_listeners_redirect: - Protocol: HTTP - Port: "{{ aws_ec2_autoscale_cluster.alb_http_port }}" - DefaultActions: - - Type: redirect - RedirectConfig: - Protocol: HTTPS - Host: "#{host}" - Query: "#{query}" - Path: "/#{path}" - Port: "{{ aws_ec2_autoscale_cluster.alb_https_port|string }}" - StatusCode: HTTP_301 - _aws_ec2_autoscale_cluster_listeners_https: - Protocol: HTTPS - Port: "{{ aws_ec2_autoscale_cluster.alb_https_port }}" - SslPolicy: "{{ aws_ec2_autoscale_cluster.alb_ssl_policy }}" - Certificates: - - CertificateArn: "{{ _ssl_certificate_ARN }}" - DefaultActions: - - Type: forward - TargetGroupName: "{{ aws_ec2_autoscale_cluster.name }}" - Rules: "{{ aws_ec2_autoscale_cluster.listeners_https.rules }}" +- name: Build an ALB. when: aws_ec2_autoscale_cluster.create_elb + block: + - name: Define default ALB listeners. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_listeners_http: + Protocol: HTTP + Port: "{{ aws_ec2_autoscale_cluster.alb_http_port }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ aws_ec2_autoscale_cluster.name }}" + Rules: "{{ aws_ec2_autoscale_cluster.listeners_http.rules }}" + _aws_ec2_autoscale_cluster_listeners_redirect: + Protocol: HTTP + Port: "{{ aws_ec2_autoscale_cluster.alb_http_port }}" + DefaultActions: + - Type: redirect + RedirectConfig: + Protocol: HTTPS + Host: "#{host}" + Query: "#{query}" + Path: "/#{path}" + Port: "{{ aws_ec2_autoscale_cluster.alb_https_port|string }}" + StatusCode: HTTP_301 + _aws_ec2_autoscale_cluster_listeners_https: + Protocol: HTTPS + Port: "{{ aws_ec2_autoscale_cluster.alb_https_port }}" + SslPolicy: "{{ aws_ec2_autoscale_cluster.alb_ssl_policy }}" + Certificates: + - CertificateArn: "{{ _ssl_certificate_ARN }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ aws_ec2_autoscale_cluster.name }}" + Rules: "{{ aws_ec2_autoscale_cluster.listeners_https.rules }}" + + # @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. + + - name: Add HTTP listeners. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_listeners: "{{ [_aws_ec2_autoscale_cluster_listeners_http] }}" + when: + - _ssl_certificate_ARN|length < 1 -# @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. - -- name: Add HTTP listeners. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_listeners: "{{ [_aws_ec2_autoscale_cluster_listeners_http] }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - _ssl_certificate_ARN | length < 1 - -- name: Add HTTPS Listener. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_listeners: "{{ [_aws_ec2_autoscale_cluster_listeners_redirect, _aws_ec2_autoscale_cluster_listeners_https] }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - _ssl_certificate_ARN | length > 1 - -- name: Add custom Listeners. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_listeners: "{{ _aws_ec2_autoscale_cluster_listeners + aws_ec2_autoscale_cluster.listeners }}" - when: - - aws_ec2_autoscale_cluster is defined - - aws_ec2_autoscale_cluster | length > 0 - - aws_ec2_autoscale_cluster.create_elb - -- name: Generate security group information for the ALB. - ansible.builtin.include_role: - name: aws/aws_security_groups - vars: - aws_security_groups: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - group_names: "{{ aws_ec2_autoscale_cluster.alb_security_groups }}" - return_type: ids - when: - - aws_ec2_autoscale_cluster.alb_security_groups | length > 0 - - aws_ec2_autoscale_cluster.create_elb - -- name: Create the ALB. - amazon.aws.elb_application_lb: - name: "{{ aws_ec2_autoscale_cluster.name }}" - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - state: "{{ aws_ec2_autoscale_cluster.state }}" - tags: "{{ aws_ec2_autoscale_cluster.tags }}" - subnets: "{{ _aws_ec2_autoscale_cluster_public_subnets_ids }}" - security_groups: "{{ _aws_security_group_list + [_aws_ec2_autoscale_cluster_security_group.group_id] }}" - listeners: "{{ _aws_ec2_autoscale_cluster_listeners }}" - idle_timeout: "{{ aws_ec2_autoscale_cluster.alb_idle_timeout }}" - register: _aws_ec2_autoscale_cluster_alb - when: aws_ec2_autoscale_cluster.create_elb + - name: Add HTTPS Listener. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_listeners: "{{ [_aws_ec2_autoscale_cluster_listeners_redirect, _aws_ec2_autoscale_cluster_listeners_https] }}" + when: + - _ssl_certificate_ARN|length > 1 -- name: "Get ALB listener ARN for port {{ aws_ec2_autoscale_cluster.alb_https_port }}." - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_alb_listener_ARN: "{{ item.listener_arn }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - item.port == aws_ec2_autoscale_cluster.alb_https_port - - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs - - _ssl_certificate_ARN | length > 1 - with_items: "{{ _aws_ec2_autoscale_cluster_alb.listeners }}" - -- name: Add extra SSL certificates to the ALB. - ansible.builtin.command: - cmd: "aws elbv2 add-listener-certificates --region {{ aws_ec2_autoscale_cluster.region }} --profile {{ aws_ec2_autoscale_cluster.aws_profile }} --listener-arn {{ _aws_ec2_autoscale_cluster_alb_listener_ARN }} --certificates CertificateArn={{ item }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs|length > 0 - - _ssl_certificate_ARN|length > 1 - with_items: "{{ aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs }}" + - name: Add custom Listeners. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_listeners: "{{ _aws_ec2_autoscale_cluster_listeners + aws_ec2_autoscale_cluster.listeners }}" + when: + - aws_ec2_autoscale_cluster is defined + - aws_ec2_autoscale_cluster|length > 0 + + - name: Generate security group information for the ALB. + ansible.builtin.include_role: + name: aws/aws_security_groups + vars: + aws_security_groups: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + group_names: "{{ aws_ec2_autoscale_cluster.alb_security_groups }}" + return_type: ids + when: + - aws_ec2_autoscale_cluster.alb_security_groups|length > 0 + + - name: Create the ALB. + amazon.aws.elb_application_lb: + name: "{{ aws_ec2_autoscale_cluster.name }}" + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + state: "{{ aws_ec2_autoscale_cluster.state }}" + tags: "{{ aws_ec2_autoscale_cluster.tags }}" + subnets: "{{ _aws_ec2_autoscale_cluster_public_subnets_ids }}" + security_groups: "{{ _aws_security_group_list + [_aws_ec2_autoscale_cluster_security_group.group_id] }}" + listeners: "{{ _aws_ec2_autoscale_cluster_listeners }}" + idle_timeout: "{{ aws_ec2_autoscale_cluster.alb_idle_timeout }}" + register: _aws_ec2_autoscale_cluster_alb + + - name: "Get ALB listener ARN for port {{ aws_ec2_autoscale_cluster.alb_https_port }}." + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_alb_listener_ARN: "{{ item.listener_arn }}" + when: + - item.port == aws_ec2_autoscale_cluster.alb_https_port + - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs|length > 0 + - _ssl_certificate_ARN|length > 1 + with_items: "{{ _aws_ec2_autoscale_cluster_alb.listeners }}" + + - name: Add extra SSL certificates to the ALB. + ansible.builtin.command: + cmd: "aws elbv2 add-listener-certificates --region {{ aws_ec2_autoscale_cluster.region }} --profile {{ aws_ec2_autoscale_cluster.aws_profile }} --listener-arn {{ _aws_ec2_autoscale_cluster_alb_listener_ARN }} --certificates CertificateArn={{ item }}" + when: + - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs|length > 0 + - _ssl_certificate_ARN|length > 1 + with_items: "{{ aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs }}" # EC2 - BUILD ASG -- name: Generate security group information for the ASG. - ansible.builtin.include_role: - name: aws/aws_security_groups - vars: - aws_security_groups: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - group_names: "{{ aws_ec2_autoscale_cluster.cluster_security_groups }}" - return_type: ids - when: - - aws_ec2_autoscale_cluster.cluster_security_groups|length > 0 - - aws_ec2_autoscale_cluster.asg_refresh - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Create launch template. - amazon.aws.ec2_launch_template: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - name: "{{ aws_ec2_autoscale_cluster.name }}" - image_id: "{{ aws_ec2_autoscale_cluster.image_id if aws_ec2_autoscale_cluster.image_id is defined else aws_ec2_autoscale_cluster_image_latest.image_id }}" - key_name: "{{ aws_ec2_autoscale_cluster.key_name }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - state: "{{ aws_ec2_autoscale_cluster.state }}" - instance_type: "{{ aws_ec2_autoscale_cluster.instance_type }}" - iam_instance_profile: "{{ _aws_ec2_autoscale_cluster_iam_role_info.iam_roles[0].instance_profiles[0].arn }}" - disable_api_termination: "{{ aws_ec2_autoscale_cluster.instance_disable_api_termination }}" - ebs_optimized: "{{ aws_ec2_autoscale_cluster.ebs_optimized }}" - network_interfaces: - - associate_public_ip_address: "{{ aws_ec2_autoscale_cluster.assign_public_ip }}" - delete_on_termination: "{{ aws_ec2_autoscale_cluster.instance_nic_delete_on_termination }}" - subnet_id: "{{ subnet_id }}" # picked randomly from _aws_ec2_autoscale_cluster_subnets_ids, see with_random_choice - device_index: 0 # must be 0 - see https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html#change-network-interface - groups: "{{ _aws_security_group_list + [_aws_ec2_autoscale_cluster_security_group.group_id] }}" - block_device_mappings: - - ebs: - delete_on_termination: "{{ aws_ec2_autoscale_cluster.root_volume_delete_on_termination }}" - encrypted: "{{ aws_ec2_autoscale_cluster.encrypt_boot }}" - volume_size: "{{ aws_ec2_autoscale_cluster.root_volume_size }}" - volume_type: "{{ aws_ec2_autoscale_cluster.root_volume_type }}" - device_name: "{{ aws_ec2_autoscale_cluster.device_name }}" - credit_specification: "{{ aws_ec2_autoscale_cluster.instance_credit_specification | default(omit) }}" - with_random_choice: "{{ _aws_ec2_autoscale_cluster_subnets_ids }}" - loop_control: - loop_var: subnet_id +- name: Build the ASG. when: - - aws_ec2_autoscale_cluster.asg_refresh - - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Create AutoScale group and spin up new instances. - amazon.aws.autoscaling_group: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - name: "{{ aws_ec2_autoscale_cluster.name }}" - state: "{{ aws_ec2_autoscale_cluster.state }}" - launch_template: - launch_template_name: "{{ aws_ec2_autoscale_cluster.name }}" - health_check_type: "{% if aws_ec2_autoscale_cluster_running_instances.instances | length > 0 %}{{ aws_ec2_autoscale_cluster.alb_health_check_type }}{% else %}EC2{% endif %}" - health_check_period: "{{ aws_ec2_autoscale_cluster.alb_health_check_period | default(omit) }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - replace_all_instances: true - replace_batch_size: "{{ aws_ec2_autoscale_cluster.min_size if aws_ec2_autoscale_cluster.desired_capacity == 0 else aws_ec2_autoscale_cluster.desired_capacity }}" - wait_for_instances: true - lt_check: true - wait_timeout: 3000 - desired_capacity: "{{ aws_ec2_autoscale_cluster.min_size if aws_ec2_autoscale_cluster.desired_capacity == 0 else aws_ec2_autoscale_cluster.desired_capacity }}" - min_size: "{{ aws_ec2_autoscale_cluster.min_size }}" - max_size: "{{ aws_ec2_autoscale_cluster.max_size }}" - tags: "{{ aws_ec2_autoscale_cluster.tags | simpledict2list }}" - vpc_zone_identifier: "{{ _aws_ec2_autoscale_cluster_subnets_ids }}" - target_group_arns: - - "{{ _aws_ec2_target_group_created.target_group_arn }}" - register: _aws_ec2_asg_created - when: - aws_ec2_autoscale_cluster.asg_refresh - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster + block: + - name: Gather IAM role info. + amazon.aws.iam_role_info: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + name: "{{ aws_ec2_autoscale_cluster.iam_role_name }}" + register: _aws_ec2_autoscale_cluster_iam_role_info + + - name: Generate security group information for the ASG. + ansible.builtin.include_role: + name: aws/aws_security_groups + vars: + aws_security_groups: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + group_names: "{{ aws_ec2_autoscale_cluster.cluster_security_groups }}" + return_type: ids + when: + - aws_ec2_autoscale_cluster.cluster_security_groups|length > 0 + + - name: Create launch template. + amazon.aws.ec2_launch_template: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + name: "{{ aws_ec2_autoscale_cluster.name }}" + image_id: "{{ aws_ec2_autoscale_cluster.image_id if aws_ec2_autoscale_cluster.image_id is defined else aws_ec2_autoscale_cluster_image_latest.image_id }}" + key_name: "{{ aws_ec2_autoscale_cluster.key_name }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + state: "{{ aws_ec2_autoscale_cluster.state }}" + instance_type: "{{ aws_ec2_autoscale_cluster.instance_type }}" + iam_instance_profile: "{{ _aws_ec2_autoscale_cluster_iam_role_info.iam_roles[0].instance_profiles[0].arn }}" + disable_api_termination: "{{ aws_ec2_autoscale_cluster.instance_disable_api_termination }}" + ebs_optimized: "{{ aws_ec2_autoscale_cluster.ebs_optimized }}" + network_interfaces: + - associate_public_ip_address: "{{ aws_ec2_autoscale_cluster.assign_public_ip }}" + delete_on_termination: "{{ aws_ec2_autoscale_cluster.instance_nic_delete_on_termination }}" + subnet_id: "{{ subnet_id }}" # picked randomly from _aws_ec2_autoscale_cluster_subnets_ids, see with_random_choice + device_index: 0 # must be 0 - see https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html#change-network-interface + groups: "{{ _aws_security_group_list + [_aws_ec2_autoscale_cluster_security_group.group_id] }}" + block_device_mappings: + - ebs: + delete_on_termination: "{{ aws_ec2_autoscale_cluster.root_volume_delete_on_termination }}" + encrypted: "{{ aws_ec2_autoscale_cluster.encrypt_boot }}" + volume_size: "{{ aws_ec2_autoscale_cluster.root_volume_size }}" + volume_type: "{{ aws_ec2_autoscale_cluster.root_volume_type }}" + device_name: "{{ aws_ec2_autoscale_cluster.device_name }}" + credit_specification: "{{ aws_ec2_autoscale_cluster.instance_credit_specification | default(omit) }}" + with_random_choice: "{{ _aws_ec2_autoscale_cluster_subnets_ids }}" + loop_control: + loop_var: subnet_id + + - name: Create AutoScale group and spin up new instances. + amazon.aws.autoscaling_group: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + name: "{{ aws_ec2_autoscale_cluster.name }}" + state: "{{ aws_ec2_autoscale_cluster.state }}" + launch_template: + launch_template_name: "{{ aws_ec2_autoscale_cluster.name }}" + health_check_type: "{% if aws_ec2_autoscale_cluster_running_instances.instances | length > 0 %}{{ aws_ec2_autoscale_cluster.alb_health_check_type }}{% else %}EC2{% endif %}" + health_check_period: "{{ aws_ec2_autoscale_cluster.alb_health_check_period | default(omit) }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + replace_all_instances: true + replace_batch_size: "{{ aws_ec2_autoscale_cluster.min_size if aws_ec2_autoscale_cluster.desired_capacity == 0 else aws_ec2_autoscale_cluster.desired_capacity }}" + wait_for_instances: true + lt_check: true + wait_timeout: 3000 + desired_capacity: "{{ aws_ec2_autoscale_cluster.min_size if aws_ec2_autoscale_cluster.desired_capacity == 0 else aws_ec2_autoscale_cluster.desired_capacity }}" + min_size: "{{ aws_ec2_autoscale_cluster.min_size }}" + max_size: "{{ aws_ec2_autoscale_cluster.max_size }}" + tags: "{{ aws_ec2_autoscale_cluster.tags | simpledict2list }}" + vpc_zone_identifier: "{{ _aws_ec2_autoscale_cluster_subnets_ids }}" + target_group_arns: + - "{{ _aws_ec2_target_group_created.target_group_arn }}" + register: _aws_ec2_asg_created - name: Handle simple scaling AutoScale. when: @@ -593,124 +575,92 @@ ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" -- name: Create placeholder ARN variables for scaling policies. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_up_policy_ARN: "" - _aws_ec2_autoscale_cluster_scaling_down_policy_ARN: "" - when: - - _aws_ec2_autoscale_cluster_scaling_policies is defined - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -# @todo We should support multiple policies. If this built a list -# then we could potentially loop over it after. -- name: Set scaling up policy ARN. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_up_policy_ARN: "{{ item.arn }}" - loop: "{{ _aws_ec2_autoscale_cluster_scaling_policies }}" - when: - - _aws_ec2_autoscale_cluster_scaling_policies is defined - - item.item.name == aws_ec2_autoscale_cluster.asg_cloudwatch_policy_scale_up_name - - item.arn is defined - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -# @todo As above. -- name: Set scaling down policy ARN. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_down_policy_ARN: "{{ item.arn }}" - loop: "{{ _aws_ec2_autoscale_cluster_scaling_policies }}" +- name: Create scaling policies and alarms. when: - _aws_ec2_autoscale_cluster_scaling_policies is defined - - item.item.name == aws_ec2_autoscale_cluster.asg_cloudwatch_policy_scale_down_name - - item.arn is defined - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Create alarm in CloudWatch for auto scaling up. - ansible.builtin.include_role: - name: aws/aws_ec2_metric_alarm - vars: - aws_ec2_metric_alarm: - aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - name: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarm_scale_up_name }}" - description: "{{ item.description }}" - metric: "{{ item.metric }}" - namespace: "{{ item.namespace }}" - statistic: "{{ item.statistic }}" - comparison: "{{ item.comparison }}" - threshold: "{{ item.threshold }}" - unit: "{{ item.unit }}" - period: "{{ item.period }}" - evaluation_periods: "{{ item.evaluation_periods }}" - alarm_actions: - - "{{ _aws_ec2_autoscale_cluster_scaling_up_policy_ARN }}" - dimensions: - "AutoScalingGroupName": "{{ aws_ec2_autoscale_cluster.name }}" - with_items: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarms }}" - when: - - _aws_ec2_autoscale_cluster_scaling_up_policy_ARN is defined - - item.scale_direction == 'up' - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Create alarm in CloudWatch for auto scaling down. - ansible.builtin.include_role: - name: aws/aws_ec2_metric_alarm - vars: - aws_ec2_metric_alarm: - aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - name: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarm_scale_down_name }}" - description: "{{ item.description }}" - metric: "{{ item.metric }}" - namespace: "{{ item.namespace }}" - statistic: "{{ item.statistic }}" - comparison: "{{ item.comparison }}" - threshold: "{{ item.threshold }}" - unit: "{{ item.unit }}" - period: "{{ item.period }}" - evaluation_periods: "{{ item.evaluation_periods }}" - alarm_actions: - - "{{ _aws_ec2_autoscale_cluster_scaling_down_policy_ARN }}" - dimensions: - "AutoScalingGroupName": "{{ aws_ec2_autoscale_cluster.name }}" - with_items: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarms }}" - when: - - _aws_ec2_autoscale_cluster_scaling_down_policy_ARN is defined - - item.scale_direction == 'down' - aws_ec2_autoscale_cluster.type == "ec2" + block: + - name: Create placeholder ARN variables for scaling policies. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_up_policy_ARN: "" + _aws_ec2_autoscale_cluster_scaling_down_policy_ARN: "" + when: + - aws_ec2_autoscale_cluster.deploy_cluster + + # @todo We should support multiple policies. If this built a list + # then we could potentially loop over it after. + - name: Set scaling up policy ARN. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_up_policy_ARN: "{{ item.arn }}" + loop: "{{ _aws_ec2_autoscale_cluster_scaling_policies }}" + when: + - item.item.name == aws_ec2_autoscale_cluster.asg_cloudwatch_policy_scale_up_name + - item.arn is defined + - aws_ec2_autoscale_cluster.deploy_cluster + + # @todo As above. + - name: Set scaling down policy ARN. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_down_policy_ARN: "{{ item.arn }}" + loop: "{{ _aws_ec2_autoscale_cluster_scaling_policies }}" + when: + - item.item.name == aws_ec2_autoscale_cluster.asg_cloudwatch_policy_scale_down_name + - item.arn is defined + - aws_ec2_autoscale_cluster.deploy_cluster + + - name: Create alarm in CloudWatch for auto scaling up. + ansible.builtin.include_role: + name: aws/aws_ec2_metric_alarm + vars: + aws_ec2_metric_alarm: + aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + name: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarm_scale_up_name }}" + description: "{{ item.description }}" + metric: "{{ item.metric }}" + namespace: "{{ item.namespace }}" + statistic: "{{ item.statistic }}" + comparison: "{{ item.comparison }}" + threshold: "{{ item.threshold }}" + unit: "{{ item.unit }}" + period: "{{ item.period }}" + evaluation_periods: "{{ item.evaluation_periods }}" + alarm_actions: + - "{{ _aws_ec2_autoscale_cluster_scaling_up_policy_ARN }}" + dimensions: + "AutoScalingGroupName": "{{ aws_ec2_autoscale_cluster.name }}" + with_items: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarms }}" + when: + - _aws_ec2_autoscale_cluster_scaling_up_policy_ARN is defined + - item.scale_direction == 'up' + + - name: Create alarm in CloudWatch for auto scaling down. + ansible.builtin.include_role: + name: aws/aws_ec2_metric_alarm + vars: + aws_ec2_metric_alarm: + aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + name: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarm_scale_down_name }}" + description: "{{ item.description }}" + metric: "{{ item.metric }}" + namespace: "{{ item.namespace }}" + statistic: "{{ item.statistic }}" + comparison: "{{ item.comparison }}" + threshold: "{{ item.threshold }}" + unit: "{{ item.unit }}" + period: "{{ item.period }}" + evaluation_periods: "{{ item.evaluation_periods }}" + alarm_actions: + - "{{ _aws_ec2_autoscale_cluster_scaling_down_policy_ARN }}" + dimensions: + "AutoScalingGroupName": "{{ aws_ec2_autoscale_cluster.name }}" + with_items: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarms }}" + when: + - _aws_ec2_autoscale_cluster_scaling_down_policy_ARN is defined + - item.scale_direction == 'down' # CLOUDFRONT -- name: Create SSL certificate for CloudFront. - ansible.builtin.include_role: - name: aws/aws_acm - vars: - aws_acm: - export: false - region: us-east-1 # Certificate must be in us-east-1 for CloudFront. - domain_name: "{{ aws_ec2_autoscale_cluster.route_53.record }}" - extra_domains: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" - route_53: - aws_profile: "{{ aws_ec2_autoscale_cluster.acm.route_53.aws_profile }}" - zone: "{{ aws_ec2_autoscale_cluster.acm.route_53.zone }}" - when: - - aws_ec2_autoscale_cluster.cloudfront.create_cert - - aws_ec2_autoscale_cluster.region != 'us-east-1' - - aws_ec2_autoscale_cluster.cloudfront.create_distribution - -- name: Default to provided CloudFront SSL certificate ARN. - ansible.builtin.set_fact: - _cf_certificate_ARN: "{{ aws_ec2_autoscale_cluster.cloudfront.cf_certificate_ARN }}" - when: aws_ec2_autoscale_cluster.cloudfront.create_distribution - -- name: If provided, override CloudFront SSL certificate ARN with the one received from ACM. - ansible.builtin.set_fact: - _cf_certificate_ARN: "{{ aws_acm_certificate_arn }}" - when: - - aws_ec2_autoscale_cluster.cloudfront.create_cert - - aws_ec2_autoscale_cluster.cloudfront.create_distribution - - name: Initialise the domains loop var with main domain entry DNS settings. ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_dns_all_domains: @@ -724,40 +674,66 @@ loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" when: aws_ec2_autoscale_cluster.acm.extra_domains | length > 0 -- name: Initialise a list of CloudFront aliases with main domain name. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ [_domain_name] }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.cloudfront.create_distribution +- name: Handle CloudFront. + when: aws_ec2_autoscale_cluster.cloudfront.create_distribution + block: + - name: Create SSL certificate for CloudFront. + ansible.builtin.include_role: + name: aws/aws_acm + vars: + aws_acm: + export: false + region: us-east-1 # Certificate must be in us-east-1 for CloudFront. + domain_name: "{{ aws_ec2_autoscale_cluster.route_53.record }}" + extra_domains: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" + route_53: + aws_profile: "{{ aws_ec2_autoscale_cluster.acm.route_53.aws_profile }}" + zone: "{{ aws_ec2_autoscale_cluster.acm.route_53.zone }}" + when: + - aws_ec2_autoscale_cluster.cloudfront.create_cert + - aws_ec2_autoscale_cluster.region != 'us-east-1' + + - name: Default to provided CloudFront SSL certificate ARN. + ansible.builtin.set_fact: + _cf_certificate_ARN: "{{ aws_ec2_autoscale_cluster.cloudfront.cf_certificate_ARN }}" -- name: Add extra_domains so we can set up additional CloudFront aliases. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases + [item.domain] }}" - loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" - when: - - aws_ec2_autoscale_cluster.acm.extra_domains|length > 0 - - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.cloudfront.create_distribution + - name: If provided, override CloudFront SSL certificate ARN with the one received from ACM. + ansible.builtin.set_fact: + _cf_certificate_ARN: "{{ aws_acm_certificate_arn }}" + when: + - aws_ec2_autoscale_cluster.cloudfront.create_cert -- name: Create a CloudFront distribution. - ansible.builtin.include_role: - name: aws/aws_cloudfront_distribution - vars: - aws_cloudfront_distribution: - tags: "{{ aws_ec2_autoscale_cluster.tags | combine({'Name': aws_ec2_autoscale_cluster.name}) }}" - aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases }}" - viewer_certificate: - acm_certificate_arn: "{{ _cf_certificate_ARN }}" - origins: - - domain_name: "{{ _aws_ec2_autoscale_cluster_alb.dns_name }}" - id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" - default_cache_behavior: - target_origin_id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.cloudfront.create_distribution - - _cf_certificate_ARN|length > 1 + - name: Initialise a list of CloudFront aliases with main domain name. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ [_domain_name] }}" + when: + - aws_ec2_autoscale_cluster.create_elb + + - name: Add extra_domains so we can set up additional CloudFront aliases. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases + [item.domain] }}" + loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" + when: + - aws_ec2_autoscale_cluster.acm.extra_domains|length > 0 + - aws_ec2_autoscale_cluster.create_elb + + - name: Create a CloudFront distribution. + ansible.builtin.include_role: + name: aws/aws_cloudfront_distribution + vars: + aws_cloudfront_distribution: + tags: "{{ aws_ec2_autoscale_cluster.tags | combine({'Name': aws_ec2_autoscale_cluster.name}) }}" + aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases }}" + viewer_certificate: + acm_certificate_arn: "{{ _cf_certificate_ARN }}" + origins: + - domain_name: "{{ _aws_ec2_autoscale_cluster_alb.dns_name }}" + id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" + default_cache_behavior: + target_origin_id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" + when: + - aws_ec2_autoscale_cluster.create_elb + - _cf_certificate_ARN|length > 1 # @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. From 05f260071b33cc33de3381fac0031cc357c01f02 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 18:20:54 +0200 Subject: [PATCH 23/37] Further simplifying ASG CloudFront block. --- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index af34ada97..a4115baa7 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -675,7 +675,9 @@ when: aws_ec2_autoscale_cluster.acm.extra_domains | length > 0 - name: Handle CloudFront. - when: aws_ec2_autoscale_cluster.cloudfront.create_distribution + when: + - aws_ec2_autoscale_cluster.cloudfront.create_distribution + - aws_ec2_autoscale_cluster.create_elb block: - name: Create SSL certificate for CloudFront. ansible.builtin.include_role: @@ -706,8 +708,6 @@ - name: Initialise a list of CloudFront aliases with main domain name. ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ [_domain_name] }}" - when: - - aws_ec2_autoscale_cluster.create_elb - name: Add extra_domains so we can set up additional CloudFront aliases. ansible.builtin.set_fact: @@ -715,7 +715,6 @@ loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" when: - aws_ec2_autoscale_cluster.acm.extra_domains|length > 0 - - aws_ec2_autoscale_cluster.create_elb - name: Create a CloudFront distribution. ansible.builtin.include_role: @@ -732,7 +731,6 @@ default_cache_behavior: target_origin_id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" when: - - aws_ec2_autoscale_cluster.create_elb - _cf_certificate_ARN|length > 1 # @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. From 32cfd678475f5a0751497e7eec03b0b94ebc2c90 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 19:01:11 +0200 Subject: [PATCH 24/37] Scaling rules refactor needs work. --- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index a4115baa7..410e9d199 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -527,7 +527,6 @@ - name: Handle simple scaling AutoScale. when: - aws_ec2_autoscale_cluster.asg_scaling_policies|length > 0 - - item.policy_type == 'SimpleScaling' - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster block: @@ -544,18 +543,13 @@ cooldown: "{{ item.cooldown }}" register: _aws_ec2_autoscale_cluster_simple_scaling_policies with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" + when: item.policy_type == 'SimpleScaling' - name: Fetch simple scaling policies. ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_scaling_policies + _aws_ec2_autoscale_cluster_simple_scaling_policies.results }}" + when: _aws_ec2_autoscale_cluster_simple_scaling_policies.results is defined -- name: Handle step scaling AustoScale. - when: - - aws_ec2_autoscale_cluster.asg_scaling_policies|length > 0 - - item.policy_type == 'StepScaling' - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - block: - name: Create step scaling AutoScale policies. community.aws.autoscaling_policy: profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" @@ -570,10 +564,12 @@ step_adjustments: "{{ item.step_adjustments }}" register: _aws_ec2_autoscale_cluster_step_scaling_policies with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" + when: item.policy_type == 'StepScaling' - name: Fetch step scaling policies. ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" + when: _aws_ec2_autoscale_cluster_step_scaling_policies.results is defined - name: Create scaling policies and alarms. when: From 1d1f82086efc9d6b14226a5654bf2b3dbb57acb4 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 19:21:12 +0200 Subject: [PATCH 25/37] Scaling policies list needs to be defined in case it is empty and we try to concatenate. --- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index 410e9d199..42e1898d4 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -524,12 +524,16 @@ - "{{ _aws_ec2_target_group_created.target_group_arn }}" register: _aws_ec2_asg_created -- name: Handle simple scaling AutoScale. +- name: Handle AutoScale policies and alarms. when: - aws_ec2_autoscale_cluster.asg_scaling_policies|length > 0 - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster block: + - name: Set empty scaling policies fact. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_policies: [] + - name: Create simple scaling AutoScale policies. community.aws.autoscaling_policy: profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" @@ -545,9 +549,9 @@ with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" when: item.policy_type == 'SimpleScaling' - - name: Fetch simple scaling policies. + - name: Add simple scaling policies to scaling policies list. ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_scaling_policies + _aws_ec2_autoscale_cluster_simple_scaling_policies.results }}" + _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_simple_scaling_policies.results }}" when: _aws_ec2_autoscale_cluster_simple_scaling_policies.results is defined - name: Create step scaling AutoScale policies. @@ -566,9 +570,9 @@ with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" when: item.policy_type == 'StepScaling' - - name: Fetch step scaling policies. + - name: Add step scaling policies to scaling policies list. ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" + _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_scaling_policies + _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" when: _aws_ec2_autoscale_cluster_step_scaling_policies.results is defined - name: Create scaling policies and alarms. From 540f6d105b84f411f3275872a930c547a54f6222 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 19:30:14 +0200 Subject: [PATCH 26/37] Enhancing installer to accept an Ansible version and putting Ansible 12 back into GitHub Actions containers. --- install.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/install.sh b/install.sh index c11219cbd..3a3be01d8 100755 --- a/install.sh +++ b/install.sh @@ -18,6 +18,7 @@ usage(){ /usr/bin/echo '--hostname: the server hostname to set (default: depends on system or provider)' /usr/bin/echo '--no-firewall: skip installing iptables with ports 22, 80 and 443 open' /usr/bin/echo '--gitlab: install GitLab CE on this server (default: no, set to desired GitLab address to install, e.g. gitlab.example.com)' + /usr/bin/echo '--ansible-version: pass an Ansible version string such as <12 for less than version 12 (default: latest)' /usr/bin/echo '--letsencrypt: try to create an SSL certificate with LetsEncrypt (requires DNS pointing at this server for provided GitLab URL)' /usr/bin/echo '--aws: enable AWS support' /usr/bin/echo '--docker: script is running in a Docker container' @@ -52,6 +53,10 @@ parse_options(){ shift GITLAB_URL="$1" ;; + "--ansible-version") + shift + ANSIBLE_VERSION="$1" + ;; "--letsencrypt") LE_SUPPORT="yes" ;; @@ -84,7 +89,7 @@ FIREWALL="true" AWS_SUPPORT="false" IS_LOCAL="false" SERVER_HOSTNAME=$(hostname) -ANSIBLE_VERSION="<12" +ANSIBLE_VERSION="" # Parse options. parse_options "$@" From 8593d7583387c9619cefea9d9f7d4b2a034e5438 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 19:38:58 +0200 Subject: [PATCH 27/37] Trying a different approach to defaulting the venv username. --- roles/debian/python_pip_packages/defaults/main.yml | 2 +- roles/debian/python_pip_packages/tasks/main.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/debian/python_pip_packages/defaults/main.yml b/roles/debian/python_pip_packages/defaults/main.yml index 67d6d0120..56a4f77e9 100644 --- a/roles/debian/python_pip_packages/defaults/main.yml +++ b/roles/debian/python_pip_packages/defaults/main.yml @@ -3,7 +3,7 @@ python_pip_packages: # These are usually set in the _init role using _venv_path, _venv_command and _venv_install_username but can be overridden. #venv_path: /path/to/venv #venv_command: /usr/bin/python3.11 -m venv - #install_username: deploy # user to become when creating venv + install_username: "{{ _venv_install_username }}" # _venv_install_username is set in _init packages: [] # - name: pip diff --git a/roles/debian/python_pip_packages/tasks/main.yml b/roles/debian/python_pip_packages/tasks/main.yml index 50c038d25..ad855d12e 100644 --- a/roles/debian/python_pip_packages/tasks/main.yml +++ b/roles/debian/python_pip_packages/tasks/main.yml @@ -12,5 +12,5 @@ path: "{{ python_pip_packages.venv_path | default(_venv_path) }}" state: directory recurse: true - owner: "{{ python_pip_packages.install_username | default(_venv_install_username) }}" - group: "{{ python_pip_packages.install_username | default(_venv_install_username) }}" + owner: "{{ python_pip_packages.install_username }}" + group: "{{ python_pip_packages.install_username }}" From 4448fa6833ab2f8ce0e257de7efcdec1ee3a4dbd Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 20:05:20 +0200 Subject: [PATCH 28/37] Removing default() filter from python_pip_packages role. --- roles/debian/python_pip_packages/defaults/main.yml | 8 ++++---- roles/debian/python_pip_packages/tasks/main.yml | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/roles/debian/python_pip_packages/defaults/main.yml b/roles/debian/python_pip_packages/defaults/main.yml index 56a4f77e9..c2e179208 100644 --- a/roles/debian/python_pip_packages/defaults/main.yml +++ b/roles/debian/python_pip_packages/defaults/main.yml @@ -1,9 +1,9 @@ --- python_pip_packages: - # These are usually set in the _init role using _venv_path, _venv_command and _venv_install_username but can be overridden. - #venv_path: /path/to/venv - #venv_command: /usr/bin/python3.11 -m venv - install_username: "{{ _venv_install_username }}" # _venv_install_username is set in _init + # These are usually set in the _init role but can be overridden here. + venv_path: "{{ _venv_path }}" + venv_command: "{{ _venv_command }}" + install_username: "{{ _venv_install_username }}" packages: [] # - name: pip diff --git a/roles/debian/python_pip_packages/tasks/main.yml b/roles/debian/python_pip_packages/tasks/main.yml index ad855d12e..0bdbcd85b 100644 --- a/roles/debian/python_pip_packages/tasks/main.yml +++ b/roles/debian/python_pip_packages/tasks/main.yml @@ -2,14 +2,14 @@ - name: Install packages. ansible.builtin.pip: name: "{{ item.name }}" - state: "{{ item.state | default(omit) }}" - virtualenv: "{{ python_pip_packages.venv_path | default(_venv_path) }}" - virtualenv_command: "{{ python_pip_packages.venv_command | default(_venv_command) }}" + state: "{{ item.state|default(omit) }}" + virtualenv: "{{ python_pip_packages.venv_path }}" + virtualenv_command: "{{ python_pip_packages.venv_command }}" with_items: "{{ python_pip_packages.packages }}" - name: Ensure venv permissions. ansible.builtin.file: - path: "{{ python_pip_packages.venv_path | default(_venv_path) }}" + path: "{{ python_pip_packages.venv_path }}" state: directory recurse: true owner: "{{ python_pip_packages.install_username }}" From 08ea87e13ea9eab383013d6fa8b24ac46bd08166 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 20:29:38 +0200 Subject: [PATCH 29/37] Fixing up the ce_ansible role for Ansible 12. --- roles/debian/ansible/defaults/main.yml | 16 ++++++++-------- roles/debian/ansible/tasks/main.yml | 8 +------- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/roles/debian/ansible/defaults/main.yml b/roles/debian/ansible/defaults/main.yml index 47707d7d0..bacce1d11 100644 --- a/roles/debian/ansible/defaults/main.yml +++ b/roles/debian/ansible/defaults/main.yml @@ -1,14 +1,14 @@ --- ce_ansible: - # These are usually set in the _init role using _venv_path, _venv_command and _venv_install_username but can be overridden. - #venv_path: "/home/{{ ce_provision.username }}/ansible" - #venv_command: /usr/bin/python3.11 -m venv - #venv_install_username: ansible # user to become when creating venv - ansible_version: "<12.0" # also check install.sh script in the repo root and set the version there accordingly. + # These are usually set in the _init role but can be overridden here. + venv_path: "{{ _venv_path }}" + venv_command: "{{ _venv_command }}" + venv_install_username: "{{ _venv_install_username }}" + ansible_version: "" # if used with the install.sh script in the repo root, version strings should match upgrade: - enabled: false # create systemd timer to auto-upgrade Ansible. Temporary disabled due to ansible 2.19 breaking changes. + enabled: false # create systemd timer to auto-upgrade Ansible. Temporary disabled due to ansible 2.19 breaking changes command: "{{ _venv_path }}/bin/python3 -m pip install --upgrade ansible" # if you set venv_path above then set it here too - on_calendar: "*-*-* 01:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events + on_calendar: "*-*-* 01:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events #timer_name: upgrade_ansible linters: - enabled: true # will not install linters if false, installing linters breaks cloud-init + enabled: true # will not install linters if false, installing linters breaks cloud-init diff --git a/roles/debian/ansible/tasks/main.yml b/roles/debian/ansible/tasks/main.yml index cdf6d0862..146c62e8d 100644 --- a/roles/debian/ansible/tasks/main.yml +++ b/roles/debian/ansible/tasks/main.yml @@ -21,20 +21,14 @@ - name: Override Python venv path if provided. ansible.builtin.set_fact: _venv_path: "{{ ce_ansible.venv_path }}" - when: - - ce_ansible.venv_path is defined - name: Override Python venv command if provided. ansible.builtin.set_fact: _venv_command: "{{ ce_ansible.venv_command }}" - when: - - ce_ansible.venv_command is defined - name: Override Python user if provided. ansible.builtin.set_fact: _venv_install_username: "{{ ce_ansible.venv_install_username }}" - when: - - ce_ansible.venv_install_username is defined - name: Set up Python packages. ansible.builtin.include_role: @@ -75,7 +69,7 @@ - name: Add the venv to $PATH using profile.d. ansible.builtin.copy: - content: "export PATH=$PATH:{{ ce_ansible.venv_path | default(_venv_path) }}/bin" + content: "export PATH=$PATH:{{ ce_ansible.venv_path }}/bin" dest: "/etc/profile.d/ansible-path.sh" mode: '0644' From 01623ebb5894f87474db67a5f520b98dfe4a837e Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 11:52:15 +0200 Subject: [PATCH 30/37] Removing unnecessary from_json filter from CloudFront acc ID lookup. --- roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml b/roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml index a91c48477..f87ec0f7c 100644 --- a/roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml +++ b/roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml @@ -7,7 +7,7 @@ - name: Setting previous command output into variable. ansible.builtin.set_fact: - _acc_id: "{{ _acc_id.stdout | from_json }}" + _acc_id: "{{ _acc_id.stdout }}" - name: Get CloudFront info. ansible.builtin.shell: "aws cloudfront get-distribution-config --id {{ _aws_cloudfront_distribution.id }} --output json > /tmp/dist-config.json" From c5446f9800cc8e51323c0da6e6ce908b751b8f07 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 14:03:37 +0200 Subject: [PATCH 31/37] Trying to fix AWS standalone builds. --- roles/aws/aws_ec2_with_eip/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/aws/aws_ec2_with_eip/tasks/main.yml b/roles/aws/aws_ec2_with_eip/tasks/main.yml index 7f13d4d1f..d03ddcc5c 100644 --- a/roles/aws/aws_ec2_with_eip/tasks/main.yml +++ b/roles/aws/aws_ec2_with_eip/tasks/main.yml @@ -15,7 +15,7 @@ ansible.builtin.set_fact: _aws_hostname: "{{ item }}" with_inventory_hostnames: - - "{{ aws_ec2_with_eip.hostname }}" + - "_{{ aws_ec2_with_eip.hostname|regex_replace('-', '_') }}" # Subnet ID is stored in ce-provision's data directory - name: Ensure server data directory exists. @@ -133,7 +133,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname | length == 0) or (_aws_hostname == aws_ec2_with_eip.hostname) or aws_ec2_with_eip.force + when: (_aws_hostname|length == 0) or aws_ec2_with_eip.force # This task deliberately omits `image_id` so it cannot create a new instance, only refresh the state of an existing one. - name: Refresh EC2 instance. @@ -159,7 +159,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname | length > 0) or (_aws_hostname != aws_ec2_with_eip.hostname) or not aws_ec2_with_eip.force + when: (_aws_hostname|length > 0) or not aws_ec2_with_eip.force - name: Check if we have an existing EIP. amazon.aws.ec2_eip_info: From f62404c586fc54eab3c0e3f34de22055da135950 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 15:33:29 +0200 Subject: [PATCH 32/37] Fixing standalone EC2 playbooks. --- plays/aws_ec2_standalone/ec2.yml | 2 +- plays/aws_ec2_standalone/launch.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plays/aws_ec2_standalone/ec2.yml b/plays/aws_ec2_standalone/ec2.yml index e64a9c7c7..bab077c83 100644 --- a/plays/aws_ec2_standalone/ec2.yml +++ b/plays/aws_ec2_standalone/ec2.yml @@ -1,6 +1,6 @@ --- # First step. Spin up a "blank" instance and add the controller user and Ansible via user-data. -- hosts: "{{ _aws_resource_name }}" +- hosts: "_{{ _aws_resource_name | regex_replace('-', '_') }}" connection: local become: false diff --git a/plays/aws_ec2_standalone/launch.yml b/plays/aws_ec2_standalone/launch.yml index 5f207ca44..cb1ed7373 100644 --- a/plays/aws_ec2_standalone/launch.yml +++ b/plays/aws_ec2_standalone/launch.yml @@ -25,7 +25,7 @@ - "_{{ _aws_resource_name | regex_replace('-', '_') }}" - name: If an Ansible host is not found, create it so we can execute EC2 orchestration. ansible.builtin.add_host: - name: "{{ _aws_resource_name }}" + name: "_{{ _aws_resource_name | regex_replace('-', '_') }}" groups: "_new_servers" when: _aws_hostname | length == 0 - ansible.builtin.import_role: From 2d885f00313a65db1480ba9b996503bfab47db3e Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 15:43:56 +0200 Subject: [PATCH 33/37] Adding hostname print out for debug. --- roles/aws/aws_ec2_with_eip/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/aws/aws_ec2_with_eip/tasks/main.yml b/roles/aws/aws_ec2_with_eip/tasks/main.yml index d03ddcc5c..5a50c15b0 100644 --- a/roles/aws/aws_ec2_with_eip/tasks/main.yml +++ b/roles/aws/aws_ec2_with_eip/tasks/main.yml @@ -17,6 +17,10 @@ with_inventory_hostnames: - "_{{ aws_ec2_with_eip.hostname|regex_replace('-', '_') }}" +- name: Check the hostname. + ansible.builtin.debug: + msg: "Ansible hostname set to: {{ _aws_hostname }}" + # Subnet ID is stored in ce-provision's data directory - name: Ensure server data directory exists. ansible.builtin.file: From fecad193fa871216fd9c48a438eb2c9e83dc03bd Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 16:11:02 +0200 Subject: [PATCH 34/37] Adding back in the hostname check. --- roles/aws/aws_ec2_with_eip/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/aws/aws_ec2_with_eip/tasks/main.yml b/roles/aws/aws_ec2_with_eip/tasks/main.yml index 5a50c15b0..224d4b5c6 100644 --- a/roles/aws/aws_ec2_with_eip/tasks/main.yml +++ b/roles/aws/aws_ec2_with_eip/tasks/main.yml @@ -137,7 +137,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname|length == 0) or aws_ec2_with_eip.force + when: (_aws_hostname|length == 0) or (_aws_hostname == aws_ec2_with_eip.hostname|regex_replace('-', '_')) or aws_ec2_with_eip.force # This task deliberately omits `image_id` so it cannot create a new instance, only refresh the state of an existing one. - name: Refresh EC2 instance. @@ -163,7 +163,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname|length > 0) or not aws_ec2_with_eip.force + when: (_aws_hostname|length > 0) or (_aws_hostname != aws_ec2_with_eip.hostname|regex_replace('-', '_')) or not aws_ec2_with_eip.force - name: Check if we have an existing EIP. amazon.aws.ec2_eip_info: From 7f7afdaf81bc5bd6f8593ccab3e9eb14e094c367 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 16:17:53 +0200 Subject: [PATCH 35/37] Fixing AWS hostname variable in comparisons. --- roles/aws/aws_ec2_with_eip/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/aws/aws_ec2_with_eip/tasks/main.yml b/roles/aws/aws_ec2_with_eip/tasks/main.yml index 224d4b5c6..6ac8dee5a 100644 --- a/roles/aws/aws_ec2_with_eip/tasks/main.yml +++ b/roles/aws/aws_ec2_with_eip/tasks/main.yml @@ -137,7 +137,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname|length == 0) or (_aws_hostname == aws_ec2_with_eip.hostname|regex_replace('-', '_')) or aws_ec2_with_eip.force + when: (_aws_hostname|length == 0) or (_aws_hostname == '_' + aws_ec2_with_eip.hostname|regex_replace('-', '_')) or aws_ec2_with_eip.force # This task deliberately omits `image_id` so it cannot create a new instance, only refresh the state of an existing one. - name: Refresh EC2 instance. @@ -163,7 +163,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname|length > 0) or (_aws_hostname != aws_ec2_with_eip.hostname|regex_replace('-', '_')) or not aws_ec2_with_eip.force + when: (_aws_hostname|length > 0) or (_aws_hostname != '_' + aws_ec2_with_eip.hostname|regex_replace('-', '_')) or not aws_ec2_with_eip.force - name: Check if we have an existing EIP. amazon.aws.ec2_eip_info: From 7cb856cb9dedff7bbb541777cd2b85a51a8736e7 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 16:50:03 +0200 Subject: [PATCH 36/37] Trying to find a hostname variation that meets all requirements. --- plays/aws_ec2_standalone/ec2.yml | 2 +- plays/aws_ec2_standalone/launch.yml | 4 ++-- roles/aws/aws_ec2_with_eip/tasks/main.yml | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plays/aws_ec2_standalone/ec2.yml b/plays/aws_ec2_standalone/ec2.yml index bab077c83..e64a9c7c7 100644 --- a/plays/aws_ec2_standalone/ec2.yml +++ b/plays/aws_ec2_standalone/ec2.yml @@ -1,6 +1,6 @@ --- # First step. Spin up a "blank" instance and add the controller user and Ansible via user-data. -- hosts: "_{{ _aws_resource_name | regex_replace('-', '_') }}" +- hosts: "{{ _aws_resource_name }}" connection: local become: false diff --git a/plays/aws_ec2_standalone/launch.yml b/plays/aws_ec2_standalone/launch.yml index cb1ed7373..c10de7bc7 100644 --- a/plays/aws_ec2_standalone/launch.yml +++ b/plays/aws_ec2_standalone/launch.yml @@ -25,8 +25,8 @@ - "_{{ _aws_resource_name | regex_replace('-', '_') }}" - name: If an Ansible host is not found, create it so we can execute EC2 orchestration. ansible.builtin.add_host: - name: "_{{ _aws_resource_name | regex_replace('-', '_') }}" + name: "{{ _aws_resource_name }}" groups: "_new_servers" - when: _aws_hostname | length == 0 + when: _aws_hostname|length == 0 - ansible.builtin.import_role: name: _exit diff --git a/roles/aws/aws_ec2_with_eip/tasks/main.yml b/roles/aws/aws_ec2_with_eip/tasks/main.yml index 6ac8dee5a..4739abf52 100644 --- a/roles/aws/aws_ec2_with_eip/tasks/main.yml +++ b/roles/aws/aws_ec2_with_eip/tasks/main.yml @@ -109,7 +109,7 @@ msg: "No AMI found using the provided filters, exiting!" when: _aws_ec2_with_eip_ami_images.images | length == 0 -# Do not create an instance if _aws_hostname is not an EC2 generated address unless `force: true` +# Create an instance if _aws_hostname is set (it cannot be unless there is an EC2 generated address in a tagged hostgroup) unless `force: true` - name: Create new EC2 instance. amazon.aws.ec2_instance: profile: "{{ aws_ec2_with_eip.aws_profile }}" @@ -137,7 +137,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname|length == 0) or (_aws_hostname == '_' + aws_ec2_with_eip.hostname|regex_replace('-', '_')) or aws_ec2_with_eip.force + when: (_aws_hostname|length == 0) or aws_ec2_with_eip.force # This task deliberately omits `image_id` so it cannot create a new instance, only refresh the state of an existing one. - name: Refresh EC2 instance. @@ -163,7 +163,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname|length > 0) or (_aws_hostname != '_' + aws_ec2_with_eip.hostname|regex_replace('-', '_')) or not aws_ec2_with_eip.force + when: (_aws_hostname|length > 0) or not aws_ec2_with_eip.force - name: Check if we have an existing EIP. amazon.aws.ec2_eip_info: From 3de1120be4fc432b7835371148e4ec89276c77d1 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 16:57:03 +0200 Subject: [PATCH 37/37] Adding both hostnames to ec2.yml. --- plays/aws_ec2_standalone/ec2.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plays/aws_ec2_standalone/ec2.yml b/plays/aws_ec2_standalone/ec2.yml index e64a9c7c7..be2bd41e7 100644 --- a/plays/aws_ec2_standalone/ec2.yml +++ b/plays/aws_ec2_standalone/ec2.yml @@ -1,6 +1,6 @@ --- # First step. Spin up a "blank" instance and add the controller user and Ansible via user-data. -- hosts: "{{ _aws_resource_name }}" +- hosts: "{{ _aws_resource_name }}, _{{ _aws_resource_name | regex_replace('-', '_') }}" connection: local become: false