From a8ba8e44e5dc7146ab22aa424c67f46a23299af1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matej=20=C5=A0tajduhar?= <30931414+matej5@users.noreply.github.com> Date: Thu, 4 Sep 2025 16:49:48 +0200 Subject: [PATCH 01/61] Fixing-email-title-for-backup-validation (#2657) Co-authored-by: Matej Stajduhar --- .../aws/aws_backup_validation/templates/validation_report.py.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/aws/aws_backup_validation/templates/validation_report.py.j2 b/roles/aws/aws_backup_validation/templates/validation_report.py.j2 index bc80f32a7..7501db7ca 100644 --- a/roles/aws/aws_backup_validation/templates/validation_report.py.j2 +++ b/roles/aws/aws_backup_validation/templates/validation_report.py.j2 @@ -126,7 +126,7 @@ failed_job = backup_cli.list_restore_jobs( }, 'Subject': { 'Charset': 'UTF-8', - 'Data': 'Restore testing - {{ _aws_profile }}: ' + mail_title, + 'Data': 'Restore testing - {{ _infra_name }}: ' + mail_title, }, }, Source='Lambda Backup Validation ', From 057138d196ffc97cbdf7a821362cea2d2d0a74c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matej=20=C5=A0tajduhar?= <30931414+matej5@users.noreply.github.com> Date: Mon, 8 Sep 2025 13:56:58 +0200 Subject: [PATCH 02/61] Adding-task-to-create-aurora-cluster (#2659) * Adding-task-to-create-aurora-cluster * Adding-region-profile-and-tags-to-aurora-cluster * Updating-engine-for-aurora-cluster * Updating-parameter-group-engine * Updating-engine-version * Updating-engine-version-2 * Disabling-automated-backups * Disabling-automated-backups-2 * Disabling-automated-backups-3 * Disabling-automated-backups-4 * Skipping-task-if-not-aurora * Adding-subnet-group-to-instances * Adding-subnet-group-to-instances * Updating-SG-return-values * Updating-SG-return-values-2 * Updating-SG-return-values-3 * Updating-SG-return-values-4 * Updating-SG-return-value-debug * Updating-SG-return-value-debug-2 * Updating-SG-return-value-debug-3 * Removing-debug-tasks * Removing-init-var-for-SG-list * Adding-character-set-option --------- Co-authored-by: Matej Stajduhar --- roles/aws/aws_rds/tasks/main.yml | 41 +++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/roles/aws/aws_rds/tasks/main.yml b/roles/aws/aws_rds/tasks/main.yml index 977e9959c..28aff345a 100644 --- a/roles/aws/aws_rds/tasks/main.yml +++ b/roles/aws/aws_rds/tasks/main.yml @@ -23,12 +23,40 @@ - aws_rds.db_parameters is defined - aws_rds.db_parameters | length > 0 +- name: Generate security group information. + ansible.builtin.include_role: + name: aws/aws_security_groups + vars: + aws_security_groups: + profile: "{{ aws_rds.aws_profile }}" + region: "{{ aws_rds.region }}" + group_names: "{{ aws_rds.security_groups }}" + return_type: ids + when: aws_rds.security_groups | length > 0 + +- name: Create Aurora cluster. + amazon.aws.rds_cluster: + profile: "{{ aws_rds.aws_profile }}" + region: "{{ aws_rds.region }}" + cluster_id: "{{ aws_rds.name }}" + engine: "{{ aws_rds.engine }}" + engine_version: "{{ aws_rds.engine_version }}" + username: "{{ aws_rds.master_username }}" + password: "{{ aws_rds.master_user_password }}" + db_subnet_group_name: "{{ aws_rds.name }}" + vpc_security_group_ids: "{{ _aws_security_group_list }}" + backup_retention_period: "{{ aws_rds.backup_retention_period | default(35) }}" + character_set_name: "{{ aws_rds.character_set_name | default(omit) }}" + tags: "{{ aws_rds.tags | combine({'Name': aws_rds.name}) }}" + when: "'aurora' in aws_rds.engine" + - name: Create Aurora RDS instance. amazon.aws.rds_instance: db_instance_identifier: "{{ aws_rds.name }}-{{ aws_rds.aurora_suffix }}" db_instance_class: "{{ aws_rds.db_instance_class }}" db_cluster_identifier: "{{ aws_rds.db_cluster_identifier | default(aws_rds.name) }}" db_parameter_group_name: "{{ aws_rds.db_parameter_group_name | default(omit) }}" + db_subnet_group_name: "{{ aws_rds.name }}" state: "{{ aws_rds.state }}" engine: "{{ aws_rds.engine }}" copy_tags_to_snapshot: true @@ -49,6 +77,7 @@ db_cluster_identifier: "{{ aws_rds.db_cluster_identifier | default(aws_rds.name) }}" db_instance_class: "{{ aws_rds.db_instance_class }}" db_parameter_group_name: "{{ aws_rds.db_parameter_group_name | default(omit) }}" + db_subnet_group_name: "{{ aws_rds.name }}" state: "{{ aws_rds.state }}" engine: "{{ aws_rds.engine }}" copy_tags_to_snapshot: true @@ -64,17 +93,6 @@ - "'aurora' in aws_rds.engine" - aws_rds.aurora_reader -- name: Generate security group information. - ansible.builtin.include_role: - name: aws/aws_security_groups - vars: - aws_security_groups: - profile: "{{ aws_rds.aws_profile }}" - region: "{{ aws_rds.region }}" - group_names: "{{ aws_rds.security_groups }}" - return_type: ids - when: aws_rds.security_groups | length > 0 - - name: Create RDS instance. amazon.aws.rds_instance: profile: "{{ aws_rds.aws_profile }}" @@ -214,3 +232,4 @@ when: - aws_rds.backup is defined - aws_rds.backup | length > 0 + - "'aurora' not in aws_rds.engine" From 17115af44e2d926c84e1e8a0c797620380ee91af Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 10:55:22 +0200 Subject: [PATCH 03/61] Fixing installer variable bug. --- install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/install.sh b/install.sh index b74950a87..c8c8b8db4 100755 --- a/install.sh +++ b/install.sh @@ -210,7 +210,7 @@ ce_provision: venv_install_username: ${CONTROLLER_USER} upgrade_timer_name: upgrade_ce_provision_ansible aws_support: ${AWS_SUPPORT} - new_user: ${CONTROLLER_USER} + new_user: true username: ${CONTROLLER_USER} ssh_key_bits: "521" ssh_key_type: ed25519 From 176df44f01cc27a353dc28eae8d5f4da2521421f Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 11:11:21 +0200 Subject: [PATCH 04/61] Fixing tests for external PRs. --- .github/workflows/ce-provision-test-gitlab.yml | 2 +- .github/workflows/ce-provision-test-web.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ce-provision-test-gitlab.yml b/.github/workflows/ce-provision-test-gitlab.yml index 9e5b46a30..566ea1377 100644 --- a/.github/workflows/ce-provision-test-gitlab.yml +++ b/.github/workflows/ce-provision-test-gitlab.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/codeenigma/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall diff --git a/.github/workflows/ce-provision-test-web.yml b/.github/workflows/ce-provision-test-web.yml index 595905064..c8ae20bbe 100644 --- a/.github/workflows/ce-provision-test-web.yml +++ b/.github/workflows/ce-provision-test-web.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/codeenigma/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall From 24845320411170dc5ac23d90c05837de38183dbe Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 11:15:22 +0200 Subject: [PATCH 05/61] Testing with a fork. --- .github/workflows/ce-provision-test-gitlab.yml | 2 +- .github/workflows/ce-provision-test-web.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ce-provision-test-gitlab.yml b/.github/workflows/ce-provision-test-gitlab.yml index 566ea1377..9d6670617 100644 --- a/.github/workflows/ce-provision-test-gitlab.yml +++ b/.github/workflows/ce-provision-test-gitlab.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall diff --git a/.github/workflows/ce-provision-test-web.yml b/.github/workflows/ce-provision-test-web.yml index c8ae20bbe..509d0e2d3 100644 --- a/.github/workflows/ce-provision-test-web.yml +++ b/.github/workflows/ce-provision-test-web.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall From cde9a6037638f76b2e2615146fb2550eaaf4d820 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 11:18:41 +0200 Subject: [PATCH 06/61] Adding repo owner's username into installer string. --- .github/workflows/ce-provision-test-gitlab.yml | 2 +- .github/workflows/ce-provision-test-web.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ce-provision-test-gitlab.yml b/.github/workflows/ce-provision-test-gitlab.yml index 9d6670617..6da7b5a4b 100644 --- a/.github/workflows/ce-provision-test-gitlab.yml +++ b/.github/workflows/ce-provision-test-gitlab.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall diff --git a/.github/workflows/ce-provision-test-web.yml b/.github/workflows/ce-provision-test-web.yml index 509d0e2d3..e95bf6337 100644 --- a/.github/workflows/ce-provision-test-web.yml +++ b/.github/workflows/ce-provision-test-web.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall From a2c4bac692aa51a4eee16f268988fc7cdfe42a4c Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 11:35:23 +0200 Subject: [PATCH 07/61] Refactoring config repo detection to simplify. --- roles/debian/ce_provision/tasks/main.yml | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/roles/debian/ce_provision/tasks/main.yml b/roles/debian/ce_provision/tasks/main.yml index 792eea3d6..0ccd6e680 100644 --- a/roles/debian/ce_provision/tasks/main.yml +++ b/roles/debian/ce_provision/tasks/main.yml @@ -57,11 +57,6 @@ filename: "{{ ce_provision.username }}" when: _ce_provision_username != ce_provision.username -# This prevent the original var to be re-evaluated when we move things around. -- name: Register config repository. - ansible.builtin.set_fact: - ce_provision_has_config_repo: "{{ 'yes' if ce_provision.config_repository else 'no' }}" - - name: Ensure APT dependencies are installed. ansible.builtin.apt: pkg: ["git", "parallel"] @@ -102,7 +97,7 @@ become: true become_user: "{{ ce_provision.username }}" when: - - ce_provision_has_config_repo + - ce_provision.config_repository | length > 0 - not ce_provision.config_repository_skip_checkout - name: Create defaults folders. @@ -111,13 +106,13 @@ state: directory with_items: - hosts - when: not ce_provision_has_config_repo + when: not ce_provision.config_repository | length > 0 - name: Create default config. ansible.builtin.copy: src: ansible.cfg dest: "{{ ce_provision.local_dir }}/ansible.cfg" - when: not ce_provision_has_config_repo + when: not ce_provision.config_repository | length > 0 - name: Symlink config folders to /etc/ansible. ansible.builtin.file: @@ -129,7 +124,7 @@ - files - templates - ansible.cfg - when: ce_provision_has_config_repo + when: ce_provision.config_repository | length > 0 - name: Create data dir. ansible.builtin.file: From 7ce204b56cfe7039f4a462fa97be46a824cd7fad Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 11:48:58 +0200 Subject: [PATCH 08/61] No longer permitted to use an integer as a truthy value. --- roles/debian/user_ansible/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/debian/user_ansible/tasks/main.yml b/roles/debian/user_ansible/tasks/main.yml index 93290f410..5f138f26a 100644 --- a/roles/debian/user_ansible/tasks/main.yml +++ b/roles/debian/user_ansible/tasks/main.yml @@ -13,7 +13,7 @@ with_items: "{{ user_ansible.groups }}" loop_control: loop_var: group - when: user_ansible.groups | length + when: user_ansible.groups | length > 0 - name: Create the system user. ansible.builtin.user: @@ -74,7 +74,7 @@ owner: "{{ user_ansible.username }}" group: "{{ user_ansible.username }}" mode: '0600' - when: user_ansible.known_hosts | length + when: user_ansible.known_hosts | length > 0 - name: Add public keys to known_hosts. ansible.builtin.known_hosts: From c7ae00387857c07e59e8ef8a9e74d0c3a1dee172 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 11:56:53 +0200 Subject: [PATCH 09/61] No longer permitted to use existence check as a truthy value. --- roles/_init/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/_init/tasks/main.yml b/roles/_init/tasks/main.yml index c401fefb3..82668f566 100644 --- a/roles/_init/tasks/main.yml +++ b/roles/_init/tasks/main.yml @@ -121,9 +121,9 @@ - name: Load custom vars file. ansible.builtin.include_tasks: allowed_vars.yml when: - - _init.ce_provision_extra_repository - - _init.ce_provision_extra_repository_vars_file - - _init.ce_provision_extra_repository_allowed_vars + - _init.ce_provision_extra_repository | length > 0 + - _init.ce_provision_extra_repository_vars_file | length > 0 + - _init.ce_provision_extra_repository_allowed_vars | length > 0 # Install Ansible under the controller user for all servers # Ensure ansible_connection == 'ssh' (i.e. we are connecting to a server) before executing From 6379b2e39df24121df90b8a6fae8e7a1faf9f22e Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 12:27:22 +0200 Subject: [PATCH 10/61] Can't see a reason why linotp var shouldn't be a boolean. --- roles/debian/apt_unattended_upgrades/defaults/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/debian/apt_unattended_upgrades/defaults/main.yml b/roles/debian/apt_unattended_upgrades/defaults/main.yml index 855c7f924..a63f3e140 100644 --- a/roles/debian/apt_unattended_upgrades/defaults/main.yml +++ b/roles/debian/apt_unattended_upgrades/defaults/main.yml @@ -4,7 +4,7 @@ _apt_unattended_upgrades_default_origins: - "origin=Debian,codename=${distro_codename}-security,label=Debian-Security" apt_unattended_upgrades: enable: true - linotp: "false" + linotp: false # unattended-upgrades template vars. # booleans must be strings to avoid Jinja2 interpretting. origins: "{{ _apt_unattended_upgrades_default_origins }}" From 318f532d6e145c044d901f90b3f3b5572df22de5 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 12:38:29 +0200 Subject: [PATCH 11/61] No longer permitted to use existence check as a truthy value. --- roles/_exit/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/_exit/tasks/main.yml b/roles/_exit/tasks/main.yml index 51d676278..b9dce908d 100644 --- a/roles/_exit/tasks/main.yml +++ b/roles/_exit/tasks/main.yml @@ -3,8 +3,8 @@ - name: Generate/Update custom vars file. ansible.builtin.include_tasks: allowed_vars.yml when: - - _init.ce_provision_extra_repository - - _init.ce_provision_extra_repository_vars_file + - _init.ce_provision_extra_repository | length > 0 + - _init.ce_provision_extra_repository_vars_file | length > 0 - _init.ce_provision_extra_repository_push - name: Store current playbook md5. From 1466d24f87123b195af6e77b2f4ea755a9e68704 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 12:47:27 +0200 Subject: [PATCH 12/61] Fixing truthy errors in ce_deploy role. --- roles/debian/ce_deploy/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/debian/ce_deploy/tasks/main.yml b/roles/debian/ce_deploy/tasks/main.yml index 15f2265dd..526d2bf86 100644 --- a/roles/debian/ce_deploy/tasks/main.yml +++ b/roles/debian/ce_deploy/tasks/main.yml @@ -62,7 +62,7 @@ version: "{{ ce_deploy.config_repository_branch | default('main') }}" become: false delegate_to: localhost - when: ce_deploy.config_repository is defined and ce_deploy.config_repository + when: ce_deploy.config_repository is defined and ce_deploy.config_repository | length > 0 - name: Synchronize config directory. ansible.posix.synchronize: @@ -71,7 +71,7 @@ delete: true rsync_opts: - "--chown={{ ce_deploy.username }}:{{ ce_deploy.username }}" - when: ce_deploy.config_repository is defined and ce_deploy.config_repository + when: ce_deploy.config_repository is defined and ce_deploy.config_repository | length > 0 - name: Check if we have a config directory. ansible.builtin.stat: @@ -81,7 +81,7 @@ - name: Register config repository. ansible.builtin.set_fact: key_value: ce_deploy_has_config_repo - ce_deploy_has_config_repo: "{{ 'yes' if ce_deploy_config_repo.stat.isdir is defined and ce_deploy_config_repo.stat.isdir else 'no' }}" + ce_deploy_has_config_repo: "{{ true if ce_deploy_config_repo.stat.isdir is defined and ce_deploy_config_repo.stat.isdir else false }}" - name: Create defaults folders. ansible.builtin.file: From 3c14dfa005a2ece232f3ea65574d8666e93d2fef Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 12:58:45 +0200 Subject: [PATCH 13/61] No longer permitted to use an integer as a truthy value. --- roles/debian/ssh_server/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/debian/ssh_server/tasks/main.yml b/roles/debian/ssh_server/tasks/main.yml index 8d52d8eee..47c07ed41 100644 --- a/roles/debian/ssh_server/tasks/main.yml +++ b/roles/debian/ssh_server/tasks/main.yml @@ -18,7 +18,7 @@ with_items: "{{ sshd.groups }}" loop_control: loop_var: group - when: sshd.groups | length + when: sshd.groups | length > 0 - name: Generate group section of the sshd_config file. ansible.builtin.blockinfile: @@ -29,7 +29,7 @@ with_items: "{{ sshd.groups }}" loop_control: loop_var: group - when: sshd.groups | length + when: sshd.groups | length > 0 - name: Generate user section of the sshd_config file. ansible.builtin.blockinfile: @@ -40,7 +40,7 @@ with_items: "{{ sshd.users }}" loop_control: loop_var: users - when: sshd.users | length + when: sshd.users | length > 0 # - name: Trigger overrides # include_role: From 2b30a7848829b2eae2e82c9871888e97741520ea Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 10 Sep 2025 13:23:01 +0200 Subject: [PATCH 14/61] Fixing truthy variable mistakes. (#2662) * Fixing installer variable bug. * Fixing tests for external PRs. * Testing with a fork. * Adding repo owner's username into installer string. * Refactoring config repo detection to simplify. * No longer permitted to use an integer as a truthy value. * No longer permitted to use existence check as a truthy value. * Can't see a reason why linotp var shouldn't be a boolean. * No longer permitted to use existence check as a truthy value. * Fixing truthy errors in ce_deploy role. * No longer permitted to use an integer as a truthy value. --- .github/workflows/ce-provision-test-gitlab.yml | 2 +- .github/workflows/ce-provision-test-web.yml | 2 +- install.sh | 2 +- roles/_exit/tasks/main.yml | 4 ++-- roles/_init/tasks/main.yml | 6 +++--- .../apt_unattended_upgrades/defaults/main.yml | 2 +- roles/debian/ce_deploy/tasks/main.yml | 6 +++--- roles/debian/ce_provision/tasks/main.yml | 13 ++++--------- roles/debian/ssh_server/tasks/main.yml | 6 +++--- roles/debian/user_ansible/tasks/main.yml | 4 ++-- 10 files changed, 21 insertions(+), 26 deletions(-) diff --git a/.github/workflows/ce-provision-test-gitlab.yml b/.github/workflows/ce-provision-test-gitlab.yml index 9e5b46a30..6da7b5a4b 100644 --- a/.github/workflows/ce-provision-test-gitlab.yml +++ b/.github/workflows/ce-provision-test-gitlab.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/codeenigma/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall diff --git a/.github/workflows/ce-provision-test-web.yml b/.github/workflows/ce-provision-test-web.yml index 595905064..e95bf6337 100644 --- a/.github/workflows/ce-provision-test-web.yml +++ b/.github/workflows/ce-provision-test-web.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Install ce-provision run: | - /usr/bin/curl -LO https://raw.githubusercontent.com/codeenigma/ce-provision/${{ github.event.pull_request.head.ref }}/install.sh + /usr/bin/curl -LO https://raw.githubusercontent.com/${{ github.event.pull_request.head.repo.owner.login }}/${{ github.event.pull_request.head.repo.name }}/${{ github.event.pull_request.head.ref }}/install.sh /usr/bin/chmod +x ./install.sh /usr/bin/sudo ./install.sh --version ${{ github.event.pull_request.head.ref }} --config-branch ${{ github.event.pull_request.base.ref }} --docker --no-firewall diff --git a/install.sh b/install.sh index b74950a87..c8c8b8db4 100755 --- a/install.sh +++ b/install.sh @@ -210,7 +210,7 @@ ce_provision: venv_install_username: ${CONTROLLER_USER} upgrade_timer_name: upgrade_ce_provision_ansible aws_support: ${AWS_SUPPORT} - new_user: ${CONTROLLER_USER} + new_user: true username: ${CONTROLLER_USER} ssh_key_bits: "521" ssh_key_type: ed25519 diff --git a/roles/_exit/tasks/main.yml b/roles/_exit/tasks/main.yml index 51d676278..b9dce908d 100644 --- a/roles/_exit/tasks/main.yml +++ b/roles/_exit/tasks/main.yml @@ -3,8 +3,8 @@ - name: Generate/Update custom vars file. ansible.builtin.include_tasks: allowed_vars.yml when: - - _init.ce_provision_extra_repository - - _init.ce_provision_extra_repository_vars_file + - _init.ce_provision_extra_repository | length > 0 + - _init.ce_provision_extra_repository_vars_file | length > 0 - _init.ce_provision_extra_repository_push - name: Store current playbook md5. diff --git a/roles/_init/tasks/main.yml b/roles/_init/tasks/main.yml index c401fefb3..82668f566 100644 --- a/roles/_init/tasks/main.yml +++ b/roles/_init/tasks/main.yml @@ -121,9 +121,9 @@ - name: Load custom vars file. ansible.builtin.include_tasks: allowed_vars.yml when: - - _init.ce_provision_extra_repository - - _init.ce_provision_extra_repository_vars_file - - _init.ce_provision_extra_repository_allowed_vars + - _init.ce_provision_extra_repository | length > 0 + - _init.ce_provision_extra_repository_vars_file | length > 0 + - _init.ce_provision_extra_repository_allowed_vars | length > 0 # Install Ansible under the controller user for all servers # Ensure ansible_connection == 'ssh' (i.e. we are connecting to a server) before executing diff --git a/roles/debian/apt_unattended_upgrades/defaults/main.yml b/roles/debian/apt_unattended_upgrades/defaults/main.yml index 855c7f924..a63f3e140 100644 --- a/roles/debian/apt_unattended_upgrades/defaults/main.yml +++ b/roles/debian/apt_unattended_upgrades/defaults/main.yml @@ -4,7 +4,7 @@ _apt_unattended_upgrades_default_origins: - "origin=Debian,codename=${distro_codename}-security,label=Debian-Security" apt_unattended_upgrades: enable: true - linotp: "false" + linotp: false # unattended-upgrades template vars. # booleans must be strings to avoid Jinja2 interpretting. origins: "{{ _apt_unattended_upgrades_default_origins }}" diff --git a/roles/debian/ce_deploy/tasks/main.yml b/roles/debian/ce_deploy/tasks/main.yml index 15f2265dd..526d2bf86 100644 --- a/roles/debian/ce_deploy/tasks/main.yml +++ b/roles/debian/ce_deploy/tasks/main.yml @@ -62,7 +62,7 @@ version: "{{ ce_deploy.config_repository_branch | default('main') }}" become: false delegate_to: localhost - when: ce_deploy.config_repository is defined and ce_deploy.config_repository + when: ce_deploy.config_repository is defined and ce_deploy.config_repository | length > 0 - name: Synchronize config directory. ansible.posix.synchronize: @@ -71,7 +71,7 @@ delete: true rsync_opts: - "--chown={{ ce_deploy.username }}:{{ ce_deploy.username }}" - when: ce_deploy.config_repository is defined and ce_deploy.config_repository + when: ce_deploy.config_repository is defined and ce_deploy.config_repository | length > 0 - name: Check if we have a config directory. ansible.builtin.stat: @@ -81,7 +81,7 @@ - name: Register config repository. ansible.builtin.set_fact: key_value: ce_deploy_has_config_repo - ce_deploy_has_config_repo: "{{ 'yes' if ce_deploy_config_repo.stat.isdir is defined and ce_deploy_config_repo.stat.isdir else 'no' }}" + ce_deploy_has_config_repo: "{{ true if ce_deploy_config_repo.stat.isdir is defined and ce_deploy_config_repo.stat.isdir else false }}" - name: Create defaults folders. ansible.builtin.file: diff --git a/roles/debian/ce_provision/tasks/main.yml b/roles/debian/ce_provision/tasks/main.yml index 792eea3d6..0ccd6e680 100644 --- a/roles/debian/ce_provision/tasks/main.yml +++ b/roles/debian/ce_provision/tasks/main.yml @@ -57,11 +57,6 @@ filename: "{{ ce_provision.username }}" when: _ce_provision_username != ce_provision.username -# This prevent the original var to be re-evaluated when we move things around. -- name: Register config repository. - ansible.builtin.set_fact: - ce_provision_has_config_repo: "{{ 'yes' if ce_provision.config_repository else 'no' }}" - - name: Ensure APT dependencies are installed. ansible.builtin.apt: pkg: ["git", "parallel"] @@ -102,7 +97,7 @@ become: true become_user: "{{ ce_provision.username }}" when: - - ce_provision_has_config_repo + - ce_provision.config_repository | length > 0 - not ce_provision.config_repository_skip_checkout - name: Create defaults folders. @@ -111,13 +106,13 @@ state: directory with_items: - hosts - when: not ce_provision_has_config_repo + when: not ce_provision.config_repository | length > 0 - name: Create default config. ansible.builtin.copy: src: ansible.cfg dest: "{{ ce_provision.local_dir }}/ansible.cfg" - when: not ce_provision_has_config_repo + when: not ce_provision.config_repository | length > 0 - name: Symlink config folders to /etc/ansible. ansible.builtin.file: @@ -129,7 +124,7 @@ - files - templates - ansible.cfg - when: ce_provision_has_config_repo + when: ce_provision.config_repository | length > 0 - name: Create data dir. ansible.builtin.file: diff --git a/roles/debian/ssh_server/tasks/main.yml b/roles/debian/ssh_server/tasks/main.yml index 8d52d8eee..47c07ed41 100644 --- a/roles/debian/ssh_server/tasks/main.yml +++ b/roles/debian/ssh_server/tasks/main.yml @@ -18,7 +18,7 @@ with_items: "{{ sshd.groups }}" loop_control: loop_var: group - when: sshd.groups | length + when: sshd.groups | length > 0 - name: Generate group section of the sshd_config file. ansible.builtin.blockinfile: @@ -29,7 +29,7 @@ with_items: "{{ sshd.groups }}" loop_control: loop_var: group - when: sshd.groups | length + when: sshd.groups | length > 0 - name: Generate user section of the sshd_config file. ansible.builtin.blockinfile: @@ -40,7 +40,7 @@ with_items: "{{ sshd.users }}" loop_control: loop_var: users - when: sshd.users | length + when: sshd.users | length > 0 # - name: Trigger overrides # include_role: diff --git a/roles/debian/user_ansible/tasks/main.yml b/roles/debian/user_ansible/tasks/main.yml index 93290f410..5f138f26a 100644 --- a/roles/debian/user_ansible/tasks/main.yml +++ b/roles/debian/user_ansible/tasks/main.yml @@ -13,7 +13,7 @@ with_items: "{{ user_ansible.groups }}" loop_control: loop_var: group - when: user_ansible.groups | length + when: user_ansible.groups | length > 0 - name: Create the system user. ansible.builtin.user: @@ -74,7 +74,7 @@ owner: "{{ user_ansible.username }}" group: "{{ user_ansible.username }}" mode: '0600' - when: user_ansible.known_hosts | length + when: user_ansible.known_hosts | length > 0 - name: Add public keys to known_hosts. ansible.builtin.known_hosts: From cb636682cd8b8a28d4081948cf1bbe7e1dcf0312 Mon Sep 17 00:00:00 2001 From: Klaus Purer Date: Wed, 10 Sep 2025 13:55:24 +0200 Subject: [PATCH 15/61] feat(php): Add FPM slow logrotate (#2625) * feat(php): Support removal of APCU, add FPM slow logrotate * simplify condition * revert apcu installed setting, not needed From 9f05b904fd7c1e12a7d0a9ec6c457d8faa592946 Mon Sep 17 00:00:00 2001 From: nfawbert <62660788+nfawbert@users.noreply.github.com> Date: Wed, 10 Sep 2025 12:57:51 +0100 Subject: [PATCH 16/61] r73458-install-php-gmp-by-default2 (#2667) * r73458-install-php-gmp-by-default2 * re-add required packages --- roles/debian/php-common/tasks/main.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/roles/debian/php-common/tasks/main.yml b/roles/debian/php-common/tasks/main.yml index 15ff896a6..d9f5ac786 100644 --- a/roles/debian/php-common/tasks/main.yml +++ b/roles/debian/php-common/tasks/main.yml @@ -41,9 +41,12 @@ - name: Install PHP packages. ansible.builtin.apt: pkg: + - "php{{ version }}-apcu" + - "php{{ version }}-bcmath" - "php{{ version }}-curl" - "php{{ version }}-dev" - "php{{ version }}-gd" + - "php{{ version }}-gmp" - "php{{ version }}-imap" - "php{{ version }}-ldap" - "php{{ version }}-mbstring" @@ -54,8 +57,6 @@ - "php{{ version }}-soap" - "php{{ version }}-xml" - "php{{ version }}-zip" - - "php{{ version }}-bcmath" - - "php{{ version }}-apcu" state: present with_items: "{{ php.version }}" loop_control: From a89ae4537c013302ee6b1868d05e814c04c6ac98 Mon Sep 17 00:00:00 2001 From: drazenCE <140631110+drazenCE@users.noreply.github.com> Date: Thu, 11 Sep 2025 07:53:22 +0200 Subject: [PATCH 17/61] Wazuh-mitre-report-setup (#2588) * Wazuh-mitre-report-setup * Wazuh-mitre-shellshock-longurl-block * Fixing-vars * Wazuh-mitre-report-setup-PR-2.x --- roles/debian/wazuh/defaults/main.yml | 2 +- roles/debian/wazuh/tasks/main.yml | 52 ++++++++++++++++--- .../templates/generate_weekly_report.sh.j2 | 46 ++++++++++++++++ 3 files changed, 92 insertions(+), 8 deletions(-) create mode 100644 roles/debian/wazuh/templates/generate_weekly_report.sh.j2 diff --git a/roles/debian/wazuh/defaults/main.yml b/roles/debian/wazuh/defaults/main.yml index c98a57e03..fd28d0ffe 100644 --- a/roles/debian/wazuh/defaults/main.yml +++ b/roles/debian/wazuh/defaults/main.yml @@ -92,7 +92,7 @@ wazuh: active_responses: - command: "firewall-drop" location: "all" - rules_id: "31151,5712,104130,101071,101132,101238,101251,103011" + rules_id: "31115,31151,31168,5712,104130,101071,101132,101238,101251,103011" repeated_offenders: "30,60,120" timeout: 600 - command: "firewall-drop" diff --git a/roles/debian/wazuh/tasks/main.yml b/roles/debian/wazuh/tasks/main.yml index 808b9b77d..e37e70b29 100644 --- a/roles/debian/wazuh/tasks/main.yml +++ b/roles/debian/wazuh/tasks/main.yml @@ -139,6 +139,10 @@ ignore_errors: true changed_when: false +- name: Set fact if wazuh-manager service exists + ansible.builtin.set_fact: + wazuh_manager_exists: "{{ 'wazuh-manager.service' in wazuh_service.stdout }}" + - name: Deploy custom Wazuh local rules ansible.builtin.copy: src: custom_wazuh_rules.xml @@ -149,7 +153,7 @@ notify: restart wazuh-manager tags: - rules - when: "'wazuh-manager.service' in wazuh_service.stdout" + when: wazuh_manager_exists - name: Write the password to /var/ossec/etc/authd.pass ansible.builtin.copy: @@ -158,16 +162,50 @@ mode: '0640' owner: root group: wazuh - when: "'wazuh-manager.service' in wazuh_service.stdout or 'wazuh-agent.service' in wazuh_service.stdout" + when: wazuh_manager_exists or 'wazuh-agent.service' in wazuh_service.stdout -- name: Restart wazuh-manager to apply changes - ansible.builtin.systemd_service: +- name: Restart wazuh-manager to apply changes. + ansible.builtin.systemd: name: wazuh-manager state: restarted - when: "'wazuh-manager.service' in wazuh_service.stdout" + when: wazuh_manager_exists -- name: Restart wazuh-agent to apply changes - ansible.builtin.systemd_service: +- name: Restart wazuh-agent to apply changes. + ansible.builtin.systemd: name: wazuh-agent state: restarted when: "'wazuh-agent.service' in wazuh_service.stdout" + +- name: Read filebeat.yml content. + ansible.builtin.shell: | + set -o pipefail && awk -F'"' '/password:/ {print $2}' {{ wazuh.mitre_report.password_file }} + register: _wazuh_filebeat_password + no_log: true + args: + executable: /bin/bash + when: wazuh_manager_exists + +- name: Set password fact. + ansible.builtin.set_fact: + filebeat_password: "{{ _wazuh_filebeat_password.stdout }}" + no_log: true + when: wazuh_manager_exists + +- name: Deploy the weekly report script. + ansible.builtin.template: + src: generate_weekly_report.sh.j2 + dest: /usr/local/bin/generate_weekly_report.sh + owner: root + group: root + mode: '0755' + when: wazuh_manager_exists + +- name: Ensure weekly report cron job is present. + ansible.builtin.cron: + name: "Weekly OpenSearch report generation" + user: root + minute: 0 + hour: 2 + weekday: 1 # Monday + job: "/usr/local/bin/generate_weekly_report.sh >> /var/log/opensearch-reports.log 2>&1" + when: wazuh_manager_exists diff --git a/roles/debian/wazuh/templates/generate_weekly_report.sh.j2 b/roles/debian/wazuh/templates/generate_weekly_report.sh.j2 new file mode 100644 index 000000000..eb93662e1 --- /dev/null +++ b/roles/debian/wazuh/templates/generate_weekly_report.sh.j2 @@ -0,0 +1,46 @@ +#!/bin/bash + +# This script generates a PDF report from wazuh-dashboard visualization and emails it + +# Set variables +REPORT_DATE=$(date +"%Y-%m-%d") +REPORT_NAME="weekly-report-${REPORT_DATE}" +LOG_FILE="/var/log/opensearch-reports.log" +USERNAME= {{ wazuh.mitre_report.username }} +PASSWORD= {{ _wazuh_filebeat_password }} + +# Function to log messages +log_message() { + echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "$LOG_FILE" +} + +log_message "Starting weekly report generation" + +# Generate and send the report +opensearch-reporting-cli \ + -u "{{ wazuh.mitre_report.visualization_url }}" \ + -a basic \ + -c "$USERNAME:$PASSWORD" \ + --selfsignedcerts true \ + -f pdf \ + -n "$REPORT_NAME" \ + -e smtp \ + -s "{{ wazuh.mitre_report.e-mail_from }}" \ + -r "{{ wazuh.manager.wazuh_manager_mailto}}" \ + --subject "Weekly OpenSearch Report - $(date '+%B %d, %Y')" \ + --note "Hi,\n\nPlease find attached the weekly Wazuh Mitre report covering the last 7 days.\n\nReport generated on: $(date '+%Y-%m-%d %H:%M:%S')\n\nBest regards,\nAutomated Reporting System" \ + --smtphost localhost \ + --smtpport 25 + +# Check if the command was successful +if [ $? -eq 0 ]; then + log_message "Weekly report generated and sent successfully" +else + log_message "ERROR: Failed to generate or send weekly report" + exit 1 +fi + +# Optional: Clean up old report files (keep last 2 weeks) +find /tmp -name "weekly-report-*.pdf" -mtime +14 -delete 2>/dev/null + +log_message "Weekly report process completed" From 71278e9146b6238bc19ee274f5c8ceb9430d087b Mon Sep 17 00:00:00 2001 From: drazenCE <140631110+drazenCE@users.noreply.github.com> Date: Thu, 11 Sep 2025 09:26:35 +0200 Subject: [PATCH 18/61] Wazuh mitre report setup pr 2.x (#2669) * Wazuh-mitre-report-setup * Wazuh-mitre-shellshock-longurl-block * Fixing-vars * Wazuh-mitre-report-setup-PR-2.x * Wazuh-mitre-report-setup-PR-2.x --- roles/debian/wazuh/templates/generate_weekly_report.sh.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/debian/wazuh/templates/generate_weekly_report.sh.j2 b/roles/debian/wazuh/templates/generate_weekly_report.sh.j2 index eb93662e1..de717209a 100644 --- a/roles/debian/wazuh/templates/generate_weekly_report.sh.j2 +++ b/roles/debian/wazuh/templates/generate_weekly_report.sh.j2 @@ -25,7 +25,7 @@ opensearch-reporting-cli \ -f pdf \ -n "$REPORT_NAME" \ -e smtp \ - -s "{{ wazuh.mitre_report.e-mail_from }}" \ + -s "{{ wazuh.mitre_report.e_mail_from }}" \ -r "{{ wazuh.manager.wazuh_manager_mailto}}" \ --subject "Weekly OpenSearch Report - $(date '+%B %d, %Y')" \ --note "Hi,\n\nPlease find attached the weekly Wazuh Mitre report covering the last 7 days.\n\nReport generated on: $(date '+%Y-%m-%d %H:%M:%S')\n\nBest regards,\nAutomated Reporting System" \ From 65a6a0dd23f3512f3b16deead555ea0504553b13 Mon Sep 17 00:00:00 2001 From: tymofiisobchenko <104431720+tymofiisobchenko@users.noreply.github.com> Date: Fri, 12 Sep 2025 16:55:54 +0300 Subject: [PATCH 19/61] pin_ansible_version (#2671) * pin_ansible_version * pin_ansible_version * pin_ansible_version * pin_ansible_version * pin_ansible_version_fix_upgrade_timer * pin_ansible_version_fix_upgrade_timer * pin_ansible_version_fix_upgrade_timer * pin_ansible_version_disable_upgrade_timer * pin_ansible_version_disable_upgrade_timer * pin_ansible_version_disable_upgrade_timer * pin_ansible_version_disable_upgrade_timer --- install.sh | 3 ++- roles/debian/ansible/defaults/main.yml | 3 ++- roles/debian/ansible/tasks/main.yml | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/install.sh b/install.sh index c8c8b8db4..c11219cbd 100755 --- a/install.sh +++ b/install.sh @@ -84,6 +84,7 @@ FIREWALL="true" AWS_SUPPORT="false" IS_LOCAL="false" SERVER_HOSTNAME=$(hostname) +ANSIBLE_VERSION="<12" # Parse options. parse_options "$@" @@ -146,7 +147,7 @@ fi /usr/bin/echo "-------------------------------------------------" /usr/bin/su - "$CONTROLLER_USER" -c "/usr/bin/python3 -m venv /home/$CONTROLLER_USER/ce-python" /usr/bin/su - "$CONTROLLER_USER" -c "/home/$CONTROLLER_USER/ce-python/bin/python3 -m pip install --upgrade pip" -/usr/bin/su - "$CONTROLLER_USER" -c "/home/$CONTROLLER_USER/ce-python/bin/pip install ansible netaddr python-debian" +/usr/bin/su - "$CONTROLLER_USER" -c "/home/$CONTROLLER_USER/ce-python/bin/pip install 'ansible$ANSIBLE_VERSION' netaddr python-debian" if [ "$AWS_SUPPORT" = "true" ]; then /usr/bin/su - "$CONTROLLER_USER" -c "/home/$CONTROLLER_USER/ce-python/bin/pip install boto3" fi diff --git a/roles/debian/ansible/defaults/main.yml b/roles/debian/ansible/defaults/main.yml index e7256e5f8..47707d7d0 100644 --- a/roles/debian/ansible/defaults/main.yml +++ b/roles/debian/ansible/defaults/main.yml @@ -4,8 +4,9 @@ ce_ansible: #venv_path: "/home/{{ ce_provision.username }}/ansible" #venv_command: /usr/bin/python3.11 -m venv #venv_install_username: ansible # user to become when creating venv + ansible_version: "<12.0" # also check install.sh script in the repo root and set the version there accordingly. upgrade: - enabled: true # create systemd timer to auto-upgrade Ansible + enabled: false # create systemd timer to auto-upgrade Ansible. Temporary disabled due to ansible 2.19 breaking changes. command: "{{ _venv_path }}/bin/python3 -m pip install --upgrade ansible" # if you set venv_path above then set it here too on_calendar: "*-*-* 01:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events #timer_name: upgrade_ansible diff --git a/roles/debian/ansible/tasks/main.yml b/roles/debian/ansible/tasks/main.yml index 57af8cbf0..cdf6d0862 100644 --- a/roles/debian/ansible/tasks/main.yml +++ b/roles/debian/ansible/tasks/main.yml @@ -60,7 +60,7 @@ packages: - name: pip state: latest - - name: ansible + - name: "ansible{{ ce_ansible.ansible_version }}" - name: python-debian - name: Install linters. From 854a245a4c7b47690316cc1060c7432be75d8311 Mon Sep 17 00:00:00 2001 From: drazenCE <140631110+drazenCE@users.noreply.github.com> Date: Tue, 16 Sep 2025 10:11:46 +0200 Subject: [PATCH 20/61] Fixing-ce-provision-vars (#2678) --- roles/debian/ce_provision/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/debian/ce_provision/tasks/main.yml b/roles/debian/ce_provision/tasks/main.yml index 0ccd6e680..6d65d25f9 100644 --- a/roles/debian/ce_provision/tasks/main.yml +++ b/roles/debian/ce_provision/tasks/main.yml @@ -16,7 +16,7 @@ with_items: "{{ ce_provision.groups }}" loop_control: loop_var: group - when: ce_provision.groups | length + when: ce_provision.groups is defined and ce_provision.groups | length > 0 # User normally created already in the _init role. - name: Generate SSH key for the controller user for provisioning. From 18502308d09a499d5a22fae282648963c2c54dd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matej=20=C5=A0tajduhar?= <30931414+matej5@users.noreply.github.com> Date: Wed, 17 Sep 2025 10:43:12 +0200 Subject: [PATCH 21/61] Updating-string (#2507) * Updating-string * Updating-string-3 --------- Co-authored-by: Matej Stajduhar --- roles/aws/aws_admin_tools/tasks/create_methods.yml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/roles/aws/aws_admin_tools/tasks/create_methods.yml b/roles/aws/aws_admin_tools/tasks/create_methods.yml index e06acf07a..7dfff713e 100644 --- a/roles/aws/aws_admin_tools/tasks/create_methods.yml +++ b/roles/aws/aws_admin_tools/tasks/create_methods.yml @@ -95,14 +95,18 @@ {{ '--credentials "arn:aws:iam::' + _acc_id + ':role/api_get_s3"' if item.resource == 's3' else '' }} when: item.url_params is not defined or item.url_params | length == 0 -- name: Generate template parts for each param - set_fact: - template_parts: "{{ item.url_params | map('regex_replace', '^(.*)$', '\\\"\\1\\\": \\\"$input.params(''\\1'')\\\"') | list }}" +- name: Generate URL parameters string + ansible.builtin.set_fact: + url_params_string: >- + {% for _url in item.url_params %} + {{ '' if loop.first else ',' }} + \"{{ _url }}\": \"$input.params('{{ _url }}')\" + {% endfor %} when: item.url_params is defined and item.url_params | length > 0 - name: Create final template string set_fact: - template_string: "{ \"application/json\": \"{ {{ template_parts | join(',') }} }\" }" + template_string: "{ \"application/json\": \"{ {{ url_params_string }} }\" }" when: item.url_params is defined and item.url_params | length > 0 - name: Write template to file From d62d4e6b5e2023118839a57461b20df464543226 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matej=20=C5=A0tajduhar?= <30931414+matej5@users.noreply.github.com> Date: Thu, 18 Sep 2025 13:33:18 +0200 Subject: [PATCH 22/61] Added-tasks-to-backup-Aurora-and-copy-AMI-to-safe-region (#2682) * Added-tasks-to-backup-Aurora-and-copy-AMI-to-safe-region * Fixing-aurora-backup-tasks * Fixing-aurora-backup-tasks-2 * Fixing-aurora-backup-tasks-3 * Fixing-aurora-backup-tasks-5 * Adding-aurora-template * Updating-aurora-vars * Adding-handler-to-defaults-for-CF --------- Co-authored-by: Matej Stajduhar --- roles/aws/aws_backup/tasks/resource.yml | 3 +- .../tasks/testing_resources.yml | 47 +++++++++++-------- .../templates/AURORA_restore_testing.j2 | 16 +++++++ .../defaults/main.yml | 1 + .../aws_ec2_autoscale_cluster/tasks/main.yml | 7 +++ roles/aws/aws_rds/tasks/main.yml | 15 ++++++ 6 files changed, 68 insertions(+), 21 deletions(-) create mode 100644 roles/aws/aws_backup_validation/templates/AURORA_restore_testing.j2 diff --git a/roles/aws/aws_backup/tasks/resource.yml b/roles/aws/aws_backup/tasks/resource.yml index 44924f6ff..c939a5805 100644 --- a/roles/aws/aws_backup/tasks/resource.yml +++ b/roles/aws/aws_backup/tasks/resource.yml @@ -52,8 +52,9 @@ instance: "ec2" file-system: "elasticfilesystem" db: "rds" + cluster: "rds" ansible.builtin.set_fact: - _resource_arn: "arn:aws:{{ arn_construct[backup.resource_type] }}:{{ _aws_region }}:{{ caller_info.account }}:{{ backup.resource_type }}{% if backup.resource_type == 'db' %}:{% else %}/{% endif %}{{ backup.resource_id }}" + _resource_arn: "arn:aws:{{ arn_construct[backup.resource_type] }}:{{ _aws_region }}:{{ caller_info.account }}:{{ backup.resource_type }}{% if backup.resource_type == 'db' or backup.resource_type == 'cluster' %}:{% else %}/{% endif %}{{ backup.resource_id }}" - name: Check if the resource selection exists. ansible.builtin.command: > diff --git a/roles/aws/aws_backup_validation/tasks/testing_resources.yml b/roles/aws/aws_backup_validation/tasks/testing_resources.yml index 56fd50eda..63c79421d 100644 --- a/roles/aws/aws_backup_validation/tasks/testing_resources.yml +++ b/roles/aws/aws_backup_validation/tasks/testing_resources.yml @@ -30,26 +30,32 @@ register: _main_subnets_info - name: Create SG for restored instances. - amazon.aws.ec2_security_group: - name: Restore_testing - description: This SG is used to allow SSM and SSH access to the server - region: "{{ _aws_region }}" - vpc_id: "{{ _main_vpc_info.vpcs[0].vpc_id }}" - rules: - - proto: tcp - from_port: 80 - to_port: 80 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 443 - to_port: 443 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 0.0.0.0/0 - rules_egress: [] - register: _restore_testing_sg + ansible.builtin.include_role: + name: aws/aws_vpc + tasks_from: security_group + vars: + aws_vpc: + name: "Restore_testing" + region: "{{ aws_ec2_autoscale_cluster.region }}" + id: "{{ _main_vpc_info.vpcs[0].vpc_id }}" + description: "This SG is used to allow SSM and SSH access to the server" + rules: + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 443 + to_port: 443 + cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: 0.0.0.0/0 + +- name: Construct AWS instance type dict. + ansible.builtin.set_fact: + _restore_testing_sg: "{{ aws_vpc._result['Restore_testing'] }}" - name: Remove restore testing query file. ansible.builtin.file: @@ -62,6 +68,7 @@ instance: "EC2" file-system: "EFS" db: "RDS" + cluster: "AURORA" - name: Set instance type for template. ansible.builtin.set_fact: diff --git a/roles/aws/aws_backup_validation/templates/AURORA_restore_testing.j2 b/roles/aws/aws_backup_validation/templates/AURORA_restore_testing.j2 new file mode 100644 index 000000000..1cb7e06f7 --- /dev/null +++ b/roles/aws/aws_backup_validation/templates/AURORA_restore_testing.j2 @@ -0,0 +1,16 @@ +{ + "RestoreTestingPlanName": "{{ _testing_plan_info.stdout | from_json | json_query("RestoreTestingPlanName") }}", + "RestoreTestingSelection": { + "IamRoleArn": "{{ _default_backup_role_arn.iam_roles[0].arn }}", + "ProtectedResourceArns": [ + "{{ _resource_arn }}" + ], + "ProtectedResourceType": "{{ _instance_type_restore }}", + "RestoreMetadataOverrides": { + "vpcSecurityGroupIds": "[\"{{ _restore_testing_sg.group_id }}\"]", + "dbsubnetgroupname": "{{ aws_rds.name }}" + }, + "RestoreTestingSelectionName": "{{ backup.selection_name | replace("-", "_") }}", + "ValidationWindowHours": 1 + } +} diff --git a/roles/aws/aws_cloudfront_distribution/defaults/main.yml b/roles/aws/aws_cloudfront_distribution/defaults/main.yml index f264cac35..66b17fd9d 100644 --- a/roles/aws/aws_cloudfront_distribution/defaults/main.yml +++ b/roles/aws/aws_cloudfront_distribution/defaults/main.yml @@ -12,6 +12,7 @@ aws_cloudfront_distribution: # description: "This is example function." # runtime: "nodejs22.x" # lambda runtimes can be found here https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html # code: "lambda-function-example.zip" # Name of the function file in files folder next to plays and vars, can handle git URLs +# handler: "main_file.main_function" aws_profile: "{{ _aws_profile }}" region: "{{ _aws_region }}" tags: {} diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index e8f63de73..36ed43672 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -772,3 +772,10 @@ when: - aws_ec2_autoscale_cluster.route_53.zone is defined - aws_ec2_autoscale_cluster.route_53.zone | length > 0 + +- name: Copy AMI to backup region. + community.aws.ec2_ami_copy: + aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + source_region: "{{ aws_ec2_autoscale_cluster.region }}" + region: "{{ aws_backup.copy_vault.region }}" + source_image_id: "{{ aws_ec2_autoscale_cluster_image_latest }}" diff --git a/roles/aws/aws_rds/tasks/main.yml b/roles/aws/aws_rds/tasks/main.yml index 28aff345a..602f13439 100644 --- a/roles/aws/aws_rds/tasks/main.yml +++ b/roles/aws/aws_rds/tasks/main.yml @@ -233,3 +233,18 @@ - aws_rds.backup is defined - aws_rds.backup | length > 0 - "'aurora' not in aws_rds.engine" + +- name: Assign Aurora resource to backup plan. + ansible.builtin.include_role: + name: aws/aws_backup + tasks_from: resource + vars: + backup: + backup_plan_name: "{{ aws_rds.backup }}" + selection_name: "AURORA-{{ aws_rds.name }}-{{ _env_type }}" + resource_id: "{{ aws_rds.name }}" + resource_type: "cluster" + when: + - aws_rds.backup is defined + - aws_rds.backup | length > 0 + - "'aurora' in aws_rds.engine" From 6717286e0747165947d5ccf9b9cfb8b05c3133d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matej=20=C5=A0tajduhar?= <30931414+matej5@users.noreply.github.com> Date: Thu, 18 Sep 2025 15:10:52 +0200 Subject: [PATCH 23/61] SG-creation-update (#2605) * SG-creation-update * Updating-lambda-tasks-to-handle-various-file-options * Updating-lambda-tasks-for-url-handling * Updating-aws_admin_tools-for-aws_lambda * Updating-aws_admin_tools-for-aws_lambda * Setting-loop-item * Setting-loop-item-2 * Updating-vpc-sec-group-vars * Removing-extra-vars-for-git-module * Adding-default-for-git_url * Cleaning-up-tasks * Updating-ansible-lint * Updating-ansible-lint * Ommiting-name-if-no-sec_group-name-defined * Removing-loop-var --------- Co-authored-by: Matej Stajduhar --- .ansible-lint | 3 + roles/aws/aws_admin_tools/defaults/main.yml | 1 + .../tasks/lambda_functions.yml | 12 +++- .../templates/api_change_asg_scaling.py.j2 | 30 --------- .../templates/api_get_acl_list.py.j2 | 63 ------------------- .../templates/api_get_forecasted_costs.py.j2 | 39 ------------ .../templates/api_get_ip_set.py.j2 | 21 ------- .../templates/api_update_ip_set.py.j2 | 19 ------ roles/aws/aws_ami/tasks/repack.yml | 34 +++++----- .../aws_ec2_autoscale_cluster/tasks/main.yml | 32 ++++++---- roles/aws/aws_lambda/tasks/handle_single.yml | 15 +++++ roles/aws/aws_lambda/tasks/handle_url.yml | 33 ++++++++++ roles/aws/aws_lambda/tasks/handle_zip.yml | 7 +++ roles/aws/aws_lambda/tasks/main.yml | 19 +++--- roles/aws/aws_vpc/tasks/main.yml | 21 +++---- roles/aws/aws_vpc/tasks/security_group.yml | 23 +++++-- roles/aws/aws_vpc_subnet/tasks/subnet.yml | 27 ++++---- 17 files changed, 159 insertions(+), 240 deletions(-) delete mode 100644 roles/aws/aws_admin_tools/templates/api_change_asg_scaling.py.j2 delete mode 100644 roles/aws/aws_admin_tools/templates/api_get_acl_list.py.j2 delete mode 100644 roles/aws/aws_admin_tools/templates/api_get_forecasted_costs.py.j2 delete mode 100644 roles/aws/aws_admin_tools/templates/api_get_ip_set.py.j2 delete mode 100644 roles/aws/aws_admin_tools/templates/api_update_ip_set.py.j2 create mode 100644 roles/aws/aws_lambda/tasks/handle_single.yml create mode 100644 roles/aws/aws_lambda/tasks/handle_url.yml create mode 100644 roles/aws/aws_lambda/tasks/handle_zip.yml diff --git a/.ansible-lint b/.ansible-lint index 02d2d1c31..040449dff 100644 --- a/.ansible-lint +++ b/.ansible-lint @@ -14,5 +14,8 @@ skip_list: - template-instead-of-copy # to skip over roles/ssl/tasks/copy.yml errors, temporarily. - name[template] # it doesn't like Jinja templates being in the middle of a task name, which seems silly to me. - name[casing] # sometimes included Galaxy roles break linting rules and cause failures + - args[module] # causing odd issue with ACL role + - jinja[spacing] # pendantic! we get these from GitHub Actions anyway + - latest[git] # Breaks if there is no version control in task exclude_paths: - roles/contrib/ # we don't control these roles diff --git a/roles/aws/aws_admin_tools/defaults/main.yml b/roles/aws/aws_admin_tools/defaults/main.yml index b13c2aff1..40b8c7ddb 100644 --- a/roles/aws/aws_admin_tools/defaults/main.yml +++ b/roles/aws/aws_admin_tools/defaults/main.yml @@ -7,6 +7,7 @@ aws_admin_tools: - name: "change_asg_scaling" # Name used for creating API Gateway and Lambda functions resource: api # Refers to type of resource for needed function (can be api, schedule and s3) type: POST # Type of HTTP method, can be GET and POST + git_url: "git@codeenigma.net:functions/example_function.git" # If git_url is provided it will be used to download code from gitlab/github policies: [] # List of policies to use for lambda function inline_policies: # Inline policies to allow defining least privilages access name: "change_asg_scaling" # Name of inline policies diff --git a/roles/aws/aws_admin_tools/tasks/lambda_functions.yml b/roles/aws/aws_admin_tools/tasks/lambda_functions.yml index e5be0b24f..0c04382b5 100644 --- a/roles/aws/aws_admin_tools/tasks/lambda_functions.yml +++ b/roles/aws/aws_admin_tools/tasks/lambda_functions.yml @@ -3,6 +3,16 @@ name: "{{ item.resource }}_{{ item.name }}" register: _iam_lambda +- name: Set previous command output into variable. + ansible.builtin.set_fact: + _function_file: "{{ lookup('template', item.resource + '_' + item.name + '.py.j2') }}" + when: item.git_url is not defined + +- name: Set previous command output into variable. + ansible.builtin.set_fact: + _function_file: "{{ item.git_url }}" + when: item.git_url is defined + - name: Create Lambda function. ansible.builtin.include_role: name: aws/aws_lambda @@ -13,7 +23,7 @@ timeout: "{{ item.timeout | default(aws_admin_tools.timeout) }}" role: "{{ aws_iam_role._result[item.resource + '_' + item.name] }}" runtime: "{{ aws_admin_tools.runtime }}" - function_file: "{{ lookup('template', item.resource + '_' + item.name + '.py.j2') }}" + function_file: "{{ _function_file }}" s3_bucket: "{{ _general_bucket }}" s3_bucket_prefix: "lambda-functions" tags: diff --git a/roles/aws/aws_admin_tools/templates/api_change_asg_scaling.py.j2 b/roles/aws/aws_admin_tools/templates/api_change_asg_scaling.py.j2 deleted file mode 100644 index 2cfc32e09..000000000 --- a/roles/aws/aws_admin_tools/templates/api_change_asg_scaling.py.j2 +++ /dev/null @@ -1,30 +0,0 @@ -import json -import boto3 - -asg_cli = boto3.client('autoscaling', region_name="{{ _aws_region }}") - -def lambda_handler(event, context): - - policies = asg_cli.describe_policies( - AutoScalingGroupName=event['asg_name'] - ) - - if policies['ScalingPolicies'][0]['Enabled']: - enable = False - else: - enable = True - - for inst in policies['ScalingPolicies']: - put_result = asg_cli.put_scaling_policy( - AutoScalingGroupName=inst['AutoScalingGroupName'], - PolicyName=inst['PolicyName'], - PolicyType=inst['PolicyType'], - AdjustmentType=inst['AdjustmentType'], - ScalingAdjustment=inst['ScalingAdjustment'], - Enabled=enable - ) - - return { - 'statusCode': 200, - 'body': event - } diff --git a/roles/aws/aws_admin_tools/templates/api_get_acl_list.py.j2 b/roles/aws/aws_admin_tools/templates/api_get_acl_list.py.j2 deleted file mode 100644 index 6271f2acf..000000000 --- a/roles/aws/aws_admin_tools/templates/api_get_acl_list.py.j2 +++ /dev/null @@ -1,63 +0,0 @@ -import json -import boto3 - -waf_regional = boto3.client("wafv2", region_name="{{ _aws_region }}") -waf_cf = boto3.client("wafv2", region_name="us-east-1") -cf_client = boto3.client('cloudfront', region_name="us-east-1") - -def get_rules(waf_client, acl_name, acl_id, scope): - rule_details = waf_client.get_web_acl(Name=acl_name, Scope=scope, Id=acl_id) - return [ - { - 'Name': rule['Name'], - 'Priority': rule['Priority'] - } - for rule in rule_details['WebACL']['Rules'] - ] - -def get_cf_associations(cf_client, web_acl_arn): - dist_list = cf_client.list_distributions_by_web_acl_id(WebACLId=web_acl_arn) - return [item['DomainName'] for item in dist_list.get('DistributionList', {}).get('Items', [])] - -def get_regional_associations(waf_client, web_acl_arn): - associations = [] - for res_type in ['APPLICATION_LOAD_BALANCER', 'API_GATEWAY']: - res_list = waf_client.list_resources_for_web_acl(WebACLArn=web_acl_arn, ResourceType=res_type) - if res_list.get('ResourceArns'): - associations.append({res_type: res_list['ResourceArns']}) - return associations - -def get_web_acls(waf_client, scope, include_cf_associations=False, cf_client=None): - response = waf_client.list_web_acls(Scope=scope) - web_acls = [] - - for acl in response['WebACLs']: - rules = get_rules(waf_client, acl['Name'], acl['Id'], scope) - associations = ( - get_cf_associations(cf_client, acl['ARN']) if include_cf_associations - else get_regional_associations(waf_client, acl['ARN']) - ) - web_acls.append({ - 'Name': acl['Name'], - 'Id': acl['Id'], - 'Rules': rules, - 'Association': associations - }) - return web_acls - -def lambda_handler(event, context): - # CloudFront ACLs (Global Scope) - cf_acls = get_web_acls(waf_cf, scope='CLOUDFRONT', include_cf_associations=True, cf_client=cf_client) - - # Regional ACLs (EU-West-1) - regional_acls = get_web_acls(waf_regional, scope='REGIONAL') - - return { - 'statusCode': 200, - 'ACLs': { - 'CloudFront': cf_acls, - 'Regional': { - "{{ _aws_region }}": regional_acls - } - } - } diff --git a/roles/aws/aws_admin_tools/templates/api_get_forecasted_costs.py.j2 b/roles/aws/aws_admin_tools/templates/api_get_forecasted_costs.py.j2 deleted file mode 100644 index 6bed7668b..000000000 --- a/roles/aws/aws_admin_tools/templates/api_get_forecasted_costs.py.j2 +++ /dev/null @@ -1,39 +0,0 @@ -import json -import calendar -from datetime import datetime -import boto3 - -costExpl = boto3.client('ce') - -def lambda_handler(event, context): - currDay=datetime.now().day - currMonth=datetime.now().month - print(currMonth) - currYear=datetime.now().year - print(currYear) - lastDay=calendar.monthrange(currYear, currMonth) - - if currMonth < 10: - currMonth = '0' + str(currMonth) - nextDay = currDay + 1 - if currDay < 10: - currDay = '0' + str(currDay) - if nextDay < 10: - nextDay = '0' + str(nextDay) - - startDate=str(currYear) + '-' + str(currMonth) + '-' + str(currDay) - endDate=str(currYear) + '-' + str(currMonth) + '-' + str(nextDay) - - estimatedCost = costExpl.get_cost_forecast( - TimePeriod={ - 'Start': startDate, - 'End': endDate - }, - Granularity='MONTHLY', - Metric='BLENDED_COST' - ) - return { - 'statusCode': 200, - 'Amount': estimatedCost['Total']['Amount'] + ' ' + estimatedCost['Total']['Unit'], - 'Between': estimatedCost['ForecastResultsByTime'][0]['TimePeriod']['Start'] + ' - ' + estimatedCost['ForecastResultsByTime'][0]['TimePeriod']['End'] - } diff --git a/roles/aws/aws_admin_tools/templates/api_get_ip_set.py.j2 b/roles/aws/aws_admin_tools/templates/api_get_ip_set.py.j2 deleted file mode 100644 index c44843bda..000000000 --- a/roles/aws/aws_admin_tools/templates/api_get_ip_set.py.j2 +++ /dev/null @@ -1,21 +0,0 @@ -import json -import boto3 - -waf_cli = boto3.client("wafv2") - -def lambda_handler(event, context): - - print("Gathering instance details.") - ip_set=waf_cli.get_ip_set( - Name=event['set_name'], - Scope='REGIONAL', - Id=event['id'] - ) - - return { - 'statusCode': 200, - 'name': ip_set['IPSet']['Name'], - 'id': ip_set['IPSet']['Id'], - 'addresses': ip_set['IPSet']['Addresses'], - 'lock_token': ip_set['LockToken'], - } diff --git a/roles/aws/aws_admin_tools/templates/api_update_ip_set.py.j2 b/roles/aws/aws_admin_tools/templates/api_update_ip_set.py.j2 deleted file mode 100644 index 08781fb2b..000000000 --- a/roles/aws/aws_admin_tools/templates/api_update_ip_set.py.j2 +++ /dev/null @@ -1,19 +0,0 @@ -import json -import boto3 - -waf_cli = boto3.client("wafv2") - -def lambda_handler(event, context): - - response = waf_cli.update_ip_set( - Name=event['name'], - Scope=event['scope'], - Id=event['id'], - Addresses=event['addresses'], - LockToken=event['lock_token'] -) - - return { - 'statusCode': 200, - 'body': response - } diff --git a/roles/aws/aws_ami/tasks/repack.yml b/roles/aws/aws_ami/tasks/repack.yml index 47ba4904d..9a9c899ca 100644 --- a/roles/aws/aws_ami/tasks/repack.yml +++ b/roles/aws/aws_ami/tasks/repack.yml @@ -9,21 +9,25 @@ register: aws_ami_running_instances - name: Create a Security Group to access the controller. - amazon.aws.ec2_security_group: - profile: "{{ aws_ami.aws_profile }}" - region: "{{ aws_ami.region }}" - name: "{{ aws_ami.repack.cluster_name }}-repacker" - tags: "{{ aws_ami.tags }}" - state: present - vpc_id: "{{ aws_ami.repack.vpc_id }}" - description: "Allow controller to access the {{ aws_ami.ami_name }}-repacking instance" - rules: - - proto: tcp - ports: - - 22 - cidr_ip: "{{ aws_ami.repack.controller_cidr }}" - rule_desc: "Allow controller to access the {{ aws_ami.ami_name }}-repacking instance" - rules_egress: [] + ansible.builtin.include_role: + name: aws/aws_vpc + tasks_from: security_group + vars: + aws_vpc: + profile: "{{ aws_ami.aws_profile }}" + region: "{{ aws_ami.region }}" + name: "{{ aws_ami.repack.cluster_name }}-repacker" + tags: "{{ aws_ami.tags }}" + state: present + id: "{{ aws_ami.repack.vpc_id }}" + description: "Allow controller to access the {{ aws_ami.ami_name }}-repacking instance" + rules: + - proto: tcp + ports: + - 22 + cidr_ip: "{{ aws_ami.repack.controller_cidr }}" + rule_desc: "Allow controller to access the {{ aws_ami.ami_name }}-repacking instance" + rules_egress: [] - name: Create an AMI with an existing EC2 instance. amazon.aws.ec2_ami: diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index 36ed43672..9ea852e3c 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -30,23 +30,29 @@ when: aws_ec2_autoscale_cluster.vpc_name is not defined or (aws_ec2_autoscale_cluster.vpc_name | length) == 0 - name: Create matching Security Group. - amazon.aws.ec2_security_group: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - name: "{{ aws_ec2_autoscale_cluster.name }}" - tags: "{{ aws_ec2_autoscale_cluster.tags | combine({'Name': aws_ec2_autoscale_cluster.name}) }}" - state: "{{ aws_ec2_autoscale_cluster.state }}" - vpc_id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" - description: "Allow internal traffic for cluster {{ aws_ec2_autoscale_cluster.name }}" - rules: - - proto: all - group_name: "{{ aws_ec2_autoscale_cluster.name }}" - rule_desc: "Allow internal traffic for cluster {{ aws_ec2_autoscale_cluster.name }}" + ansible.builtin.include_role: + name: aws/aws_vpc + tasks_from: security_group + vars: + aws_vpc: + name: "{{ aws_ec2_autoscale_cluster.name }}" + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + tags: "{{ aws_ec2_autoscale_cluster.tags | combine({'Name': aws_ec2_autoscale_cluster.name}) }}" + state: "{{ aws_ec2_autoscale_cluster.state }}" + id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" + description: "Allow internal traffic for cluster {{ aws_ec2_autoscale_cluster.name }}" + rules: + - proto: all + group_name: "{{ aws_ec2_autoscale_cluster.name }}" rules_egress: - proto: all group_name: "{{ aws_ec2_autoscale_cluster.name }}" rule_desc: "Allow internal traffic for cluster {{ aws_ec2_autoscale_cluster.name }}" - register: _aws_ec2_autoscale_cluster_security_group + +- name: Set _aws_ec2_autoscale_cluster_security_group variable. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_security_group: "{{ aws_vpc._result[aws_ec2_autoscale_cluster.name] }}" - name: Reset subnets lists. ansible.builtin.set_fact: diff --git a/roles/aws/aws_lambda/tasks/handle_single.yml b/roles/aws/aws_lambda/tasks/handle_single.yml new file mode 100644 index 000000000..45afa3fab --- /dev/null +++ b/roles/aws/aws_lambda/tasks/handle_single.yml @@ -0,0 +1,15 @@ +- name: Check and clean previous Lambda function. + ansible.builtin.file: + path: "{{ _ce_provision_build_dir }}/{{ aws_lambda.name }}.py" + state: absent + +- name: Write Lambda function. + ansible.builtin.copy: + content: "{{ aws_lambda.function_file }}" + dest: "{{ _ce_provision_build_dir }}/{{ aws_lambda.name }}.py" + +- name: Create a zip archive of Lambda function. + community.general.archive: + path: "{{ _ce_provision_build_dir }}/{{ aws_lambda.name }}.py" + dest: "{{ _ce_provision_build_dir }}/{{ aws_lambda.name }}.zip" + format: zip diff --git a/roles/aws/aws_lambda/tasks/handle_url.yml b/roles/aws/aws_lambda/tasks/handle_url.yml new file mode 100644 index 000000000..fa5ca0202 --- /dev/null +++ b/roles/aws/aws_lambda/tasks/handle_url.yml @@ -0,0 +1,33 @@ +- name: Clone git repo. + ansible.builtin.git: + repo: "{{ aws_lambda.function_file }}" + dest: /tmp/funct + +- name: Find all .j2 template files. + ansible.builtin.find: + paths: "{{ work_dir }}/{{ repo_name }}" + patterns: "*.j2" + recurse: true + register: _j2_files + +- name: Template all .j2 files. + ansible.builtin.template: + src: "{{ item.path }}" + dest: "{{ item.path | regex_replace('\\.j2$', '') }}" + loop: "{{ _j2_files.files }}" + loop_control: + label: "{{ item.path }}" + +- name: Remove original .j2 files after templating. + ansible.builtin.file: + path: "{{ item.path }}" + state: absent + loop: "{{ _j2_files.files }}" + loop_control: + label: "{{ item.path }}" + +- name: Copy a zip archive of Lambda function. + community.general.archive: + path: "/tmp/funct" + dest: "{{ _ce_provision_build_dir }}/{{ aws_lambda.name }}.zip" + format: zip diff --git a/roles/aws/aws_lambda/tasks/handle_zip.yml b/roles/aws/aws_lambda/tasks/handle_zip.yml new file mode 100644 index 000000000..47be33206 --- /dev/null +++ b/roles/aws/aws_lambda/tasks/handle_zip.yml @@ -0,0 +1,7 @@ +- name: Copy a zip archive of Lambda function. + ansible.builtin.copy: + src: "{{ aws_lambda.function_file }}" + dest: "{{ _ce_provision_build_dir }}/{{ aws_lambda.name }}.zip" + owner: controller + group: controller + mode: '0644' diff --git a/roles/aws/aws_lambda/tasks/main.yml b/roles/aws/aws_lambda/tasks/main.yml index 0ad4d4876..d0b5d6e8f 100644 --- a/roles/aws/aws_lambda/tasks/main.yml +++ b/roles/aws/aws_lambda/tasks/main.yml @@ -22,16 +22,17 @@ vars: input_string: "{{ aws_lambda.function_file }}" -- name: Write Lambda function. - ansible.builtin.copy: - content: "{{ aws_lambda.function_file }}" - dest: "{{ _ce_provision_build_dir }}/{{ aws_lambda.name }}.py" +- name: Handle single file. + ansible.builtin.include_tasks: handle_single.yml + when: _string_type == 'single' -- name: Create a zip archive of Lambda function. - community.general.archive: - path: "{{ _ce_provision_build_dir }}/{{ aws_lambda.name }}.py" - dest: "{{ _ce_provision_build_dir }}/{{ aws_lambda.name }}.zip" - format: zip +- name: Handle zip file. + ansible.builtin.include_tasks: handle_zip.yml + when: _string_type == 'zip' + +- name: Handle url. + ansible.builtin.include_tasks: handle_url.yml + when: _string_type == 'url' - name: Place Lambda function in S3 bucket. amazon.aws.s3_object: diff --git a/roles/aws/aws_vpc/tasks/main.yml b/roles/aws/aws_vpc/tasks/main.yml index 9901abd85..ad461150c 100644 --- a/roles/aws/aws_vpc/tasks/main.yml +++ b/roles/aws/aws_vpc/tasks/main.yml @@ -10,22 +10,19 @@ register: _aws_vpc_vpc - name: Ensure default Security group is tagged. - amazon.aws.ec2_security_group: - name: "default" - profile: "{{ aws_vpc.aws_profile }}" - region: "{{ aws_vpc.region }}" - tags: "{{ aws_vpc.tags }}" - state: "{{ aws_vpc.state }}" - vpc_id: "{{ _aws_vpc_vpc.vpc.id }}" - description: "default VPC security group" - purge_rules: false + ansible.builtin.include_tasks: "security_group.yml" + vars: + aws_vpc: + name: "default" + id: "{{ _aws_vpc_vpc.vpc.id }}" + description: "default VPC security group" + purge_rules: false - name: Create VPC Security groups. ansible.builtin.include_tasks: "security_group.yml" - with_items: "{{ aws_vpc.security_groups }}" + loop: "{{ aws_vpc.security_groups | list }}" loop_control: - loop_var: security_group - label: "{{ security_group.name }}" + loop_var: _sec_group - name: Create IGW. amazon.aws.ec2_vpc_igw: diff --git a/roles/aws/aws_vpc/tasks/security_group.yml b/roles/aws/aws_vpc/tasks/security_group.yml index 4e737b3cd..9af4121dd 100644 --- a/roles/aws/aws_vpc/tasks/security_group.yml +++ b/roles/aws/aws_vpc/tasks/security_group.yml @@ -1,14 +1,25 @@ +- name: Configure vars if looping over list. + ansible.builtin.set_fact: + aws_vpc: + name: "{{ _sec_group.name | default('') }}" + tags: "{{ _aws_vpc_vpc.vpc.tags | combine({'Name': _sec_group.name}) }}" + id: "{{ _aws_vpc_vpc.vpc.id }}" + description: "{{ _sec_group.description }}" + rules: "{{ _sec_group.rules | default(omit) }}" + rules_egress: "{{ _sec_group.rules_egress | default(omit) }}" + when: _sec_group is defined + - name: Create Security Group. amazon.aws.ec2_security_group: - name: "{{ security_group.name }}" + name: "{{ aws_vpc.name }}" profile: "{{ aws_vpc.aws_profile }}" region: "{{ aws_vpc.region }}" - tags: "{{ aws_vpc.tags | combine({'Name': security_group.name}) }}" + tags: "{{ aws_vpc.tags }}" state: "{{ aws_vpc.state }}" - vpc_id: "{{ _aws_vpc_vpc.vpc.id }}" - description: "{{ security_group.description | default('') }}" - rules: "{{ security_group.rules | default(omit) }}" - rules_egress: "{{ security_group.rules_egress | default(omit) }}" + vpc_id: "{{ aws_vpc.id }}" + description: "{{ aws_vpc.description | default('') }}" + rules: "{{ aws_vpc.rules | default(omit) }}" + rules_egress: "{{ aws_vpc.rules_egress | default(omit) }}" purge_rules: "{{ aws_vpc.purge_rules | default(omit) }}" register: _aws_vpc_result diff --git a/roles/aws/aws_vpc_subnet/tasks/subnet.yml b/roles/aws/aws_vpc_subnet/tasks/subnet.yml index 3d89ccc5e..52ceaa6af 100644 --- a/roles/aws/aws_vpc_subnet/tasks/subnet.yml +++ b/roles/aws/aws_vpc_subnet/tasks/subnet.yml @@ -23,18 +23,21 @@ when: subnet.nat_ipv4 is defined and subnet.nat_ipv4 - name: Create matching Security Group. - amazon.aws.ec2_security_group: - name: "{{ subnet.name }}" - profile: "{{ aws_vpc_subnet.aws_profile }}" - region: "{{ aws_vpc_subnet.region }}" - tags: "{{ aws_vpc_subnet.tags | combine({'Name': subnet.name}) }}" - state: "{{ aws_vpc_subnet.state }}" - vpc_id: "{{ _aws_vpc_subnet_vpc_id }}" - description: "Allow internal traffic for subnet {{ subnet.name }}" - rules: - - proto: all - group_name: "{{ subnet.name }}" - rule_desc: "Allow internal traffic for subnet {{ subnet.name }}" + ansible.builtin.include_role: + name: aws/aws_vpc + tasks_from: security_group + vars: + aws_vpc: + name: "{{ subnet.name }}" + profile: "{{ aws_vpc_subnet.aws_profile }}" + region: "{{ aws_vpc_subnet.region }}" + tags: "{{ aws_vpc_subnet.tags | combine({'Name': subnet.name}) }}" + state: "{{ aws_vpc_subnet.state }}" + id: "{{ _aws_vpc_subnet_vpc_id }}" + description: "Allow internal traffic for subnet {{ subnet.name }}" + rules: + - proto: all + group_name: "{{ subnet.name }}" rules_egress: - proto: all group_name: "{{ subnet.name }}" From 3bcee174aa4a8be678c2362a1d2fe847740e0ccf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matej=20=C5=A0tajduhar?= <30931414+matej5@users.noreply.github.com> Date: Thu, 18 Sep 2025 17:42:40 +0200 Subject: [PATCH 24/61] Fixing-copy-AMI-to-backup-region (#2684) Co-authored-by: Matej Stajduhar --- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index 9ea852e3c..3dcf5766b 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -784,4 +784,4 @@ aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" source_region: "{{ aws_ec2_autoscale_cluster.region }}" region: "{{ aws_backup.copy_vault.region }}" - source_image_id: "{{ aws_ec2_autoscale_cluster_image_latest }}" + source_image_id: "{{ aws_ec2_autoscale_cluster_image_latest.image_id }}" From 7bd773f756cf15d2fbf9e85b7ba2694d20915573 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matej=20=C5=A0tajduhar?= <30931414+matej5@users.noreply.github.com> Date: Fri, 19 Sep 2025 11:04:17 +0200 Subject: [PATCH 25/61] Fixing-ami-copy-task (#2686) Co-authored-by: Matej Stajduhar --- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index 3dcf5766b..db6eea26f 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -779,9 +779,9 @@ - aws_ec2_autoscale_cluster.route_53.zone is defined - aws_ec2_autoscale_cluster.route_53.zone | length > 0 -- name: Copy AMI to backup region. - community.aws.ec2_ami_copy: - aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - source_region: "{{ aws_ec2_autoscale_cluster.region }}" - region: "{{ aws_backup.copy_vault.region }}" - source_image_id: "{{ aws_ec2_autoscale_cluster_image_latest.image_id }}" +#- name: Copy AMI to backup region. +# community.aws.ec2_ami_copy: +# aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" +# source_region: "{{ aws_ec2_autoscale_cluster.region }}" +# region: "{{ aws_backup.copy_vault.region }}" +# source_image_id: "{{ aws_ec2_autoscale_cluster_image_latest.image_id }}" From 9472416296a10db67de2321dc1092ca5f55a991a Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Mon, 22 Sep 2025 13:27:41 +0200 Subject: [PATCH 26/61] Updating clamav command to use flock avoiding duplicate processes running. --- roles/debian/clamav/defaults/main.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/debian/clamav/defaults/main.yml b/roles/debian/clamav/defaults/main.yml index 4706b665f..de2ee3029 100644 --- a/roles/debian/clamav/defaults/main.yml +++ b/roles/debian/clamav/defaults/main.yml @@ -12,12 +12,12 @@ clamav: # scheduled scans, set to an empty list for no timers timers: - clamscan_daily: - timer_command: /usr/local/clamav/script/clamscan_daily # path to clamscan wrapper script, ensure it is defined in clamav.scripts - timer_OnCalendar: "*-*-* 02:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events - server_name: "{{ inventory_hostname }}" # for identification via email, defaults to Ansible inventory name. + timer_command: /usr/bin/flock -n /var/run/clamscan.lock -c /usr/local/clamav/script/clamscan_daily # command to run clamscan wrapper script, ensure script location is defined in clamav.scripts + timer_OnCalendar: "*-*-* 02:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events + server_name: "{{ inventory_hostname }}" # for identification via email, defaults to Ansible inventory name. log_location: /var/log/clamav - send_mail: false # Important - will not send any emails by default. - send_on_fail: true # Only sends emails on scan failure, will not email for successful scans. + send_mail: false # Important - will not send any emails by default. + send_on_fail: true # Only sends emails on scan failure, will not email for successful scans. report_recipient_email: mail@example.com report_sender_email: admin@server.example.com - install_clamdscan: false # flag to install additional 'clamdscan' package + install_clamdscan: false # flag to install additional 'clamdscan' package From 88851cee107bb00fbf46fa855dd1d92d98f93a86 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Mon, 22 Sep 2025 13:41:19 +0200 Subject: [PATCH 27/61] Bug fixes pr 2.x (#2690) * Fixing installer variable bug. * Fixing tests for external PRs. * Testing with a fork. * Adding repo owner's username into installer string. * Refactoring config repo detection to simplify. * No longer permitted to use an integer as a truthy value. * No longer permitted to use existence check as a truthy value. * Can't see a reason why linotp var shouldn't be a boolean. * No longer permitted to use existence check as a truthy value. * Fixing truthy errors in ce_deploy role. * No longer permitted to use an integer as a truthy value. * Updating clamav command to use flock avoiding duplicate processes running. --- roles/debian/clamav/defaults/main.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/debian/clamav/defaults/main.yml b/roles/debian/clamav/defaults/main.yml index 4706b665f..de2ee3029 100644 --- a/roles/debian/clamav/defaults/main.yml +++ b/roles/debian/clamav/defaults/main.yml @@ -12,12 +12,12 @@ clamav: # scheduled scans, set to an empty list for no timers timers: - clamscan_daily: - timer_command: /usr/local/clamav/script/clamscan_daily # path to clamscan wrapper script, ensure it is defined in clamav.scripts - timer_OnCalendar: "*-*-* 02:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events - server_name: "{{ inventory_hostname }}" # for identification via email, defaults to Ansible inventory name. + timer_command: /usr/bin/flock -n /var/run/clamscan.lock -c /usr/local/clamav/script/clamscan_daily # command to run clamscan wrapper script, ensure script location is defined in clamav.scripts + timer_OnCalendar: "*-*-* 02:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events + server_name: "{{ inventory_hostname }}" # for identification via email, defaults to Ansible inventory name. log_location: /var/log/clamav - send_mail: false # Important - will not send any emails by default. - send_on_fail: true # Only sends emails on scan failure, will not email for successful scans. + send_mail: false # Important - will not send any emails by default. + send_on_fail: true # Only sends emails on scan failure, will not email for successful scans. report_recipient_email: mail@example.com report_sender_email: admin@server.example.com - install_clamdscan: false # flag to install additional 'clamdscan' package + install_clamdscan: false # flag to install additional 'clamdscan' package From cf6129bfdd6ab485d58b2e153612c4d7545f3508 Mon Sep 17 00:00:00 2001 From: Filip Rupic <123341158+filiprupic@users.noreply.github.com> Date: Mon, 22 Sep 2025 14:26:17 +0200 Subject: [PATCH 28/61] 73569 allowing webp nginx pr 2.x (#2692) * allowing webp extension * adding webp mime type --------- Co-authored-by: filip --- roles/debian/nginx/defaults/main.yml | 1 + roles/debian/nginx/templates/drupal10.j2 | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/roles/debian/nginx/defaults/main.yml b/roles/debian/nginx/defaults/main.yml index 0d5568b55..e79e7fe36 100644 --- a/roles/debian/nginx/defaults/main.yml +++ b/roles/debian/nginx/defaults/main.yml @@ -41,6 +41,7 @@ nginx: text/xml: ["xml", "rss"] image/gif: ["gif"] image/jpeg: ["jpeg", "jpg"] + image/webp: ["webp"] application/x-javascript: ["js"] application/atom+xml: ["atom"] text/mathml: ["mml"] diff --git a/roles/debian/nginx/templates/drupal10.j2 b/roles/debian/nginx/templates/drupal10.j2 index e02ff44ee..f02519c3d 100644 --- a/roles/debian/nginx/templates/drupal10.j2 +++ b/roles/debian/nginx/templates/drupal10.j2 @@ -124,7 +124,7 @@ location ~ ^/sites/.*/files/.* { # This Nginx config is DENY FIRST, so only these file extensions are permitted. # Core and contrib assets can be pretty much anywhere. -location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|eot|woff2|ttf|otf|webm)$ { +location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|eot|woff2|ttf|otf|webm|webp)$ { try_files $uri @rewrite =404; {% if nginx.http.cache_behavior_public %} {{ nginx.http.cache_behavior_public }}; From 72021afcd7ed11d13d499b010587980d29fa82ec Mon Sep 17 00:00:00 2001 From: Filip Rupic <123341158+filiprupic@users.noreply.github.com> Date: Wed, 24 Sep 2025 12:35:24 +0200 Subject: [PATCH 29/61] extending provision.sh to support tags in plays (#2431) Co-authored-by: filip --- scripts/_common.sh | 8 ++++++++ scripts/provision.sh | 1 + 2 files changed, 9 insertions(+) diff --git a/scripts/_common.sh b/scripts/_common.sh index 2eb8d4197..c95560935 100755 --- a/scripts/_common.sh +++ b/scripts/_common.sh @@ -24,6 +24,7 @@ LINT="no" ABSOLUTE_PLAYBOOK_PATH="no" PARALLEL_RUN="no" BOTO_PROFILE="" +TAGS="" # Ensure build workspace exists. if [ ! -d "$BUILD_WORKSPACE_BASE" ]; then mkdir "$BUILD_WORKSPACE_BASE" @@ -80,6 +81,10 @@ parse_options(){ "--list-tasks") LIST_TASKS="yes" ;; + "--tags") + shift + TAGS="$1" + ;; "--verbose") VERBOSE="yes" ;; @@ -196,6 +201,9 @@ ansible_play(){ if [ "$LIST_TASKS" = "yes" ]; then ANSIBLE_CMD="$ANSIBLE_CMD --list-tasks" fi + if [ -n "$TAGS" ]; then + ANSIBLE_CMD="$ANSIBLE_CMD --tags $TAGS" + fi if [ "$VERBOSE" = "yes" ]; then ANSIBLE_CMD="$ANSIBLE_CMD -vvvv" fi diff --git a/scripts/provision.sh b/scripts/provision.sh index fda465e05..76dd21a40 100755 --- a/scripts/provision.sh +++ b/scripts/provision.sh @@ -27,6 +27,7 @@ usage(){ echo '--boto-profile: Name of a profile to export as AWS_PROFILE before calling Ansible.' echo '--parallel: Run all playbooks in the --playbook directory in parallel (using ansible-parallel).' echo '--lint: Run ansible-lint against the playbooks instead of executing them.' + echo '--tags: Only tagged parts of the playbooks. Can be used in combo with --list tasks' } # Common processing. From e783690f0f3072f84d7a6046291f6b8e63468e49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matej=20=C5=A0tajduhar?= <30931414+matej5@users.noreply.github.com> Date: Wed, 24 Sep 2025 12:41:05 +0200 Subject: [PATCH 30/61] Adding-option-for-Aurora-RDS-for-backup-validation (#2635) Co-authored-by: Matej Stajduhar --- .../tasks/testing_resources.yml | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/roles/aws/aws_backup_validation/tasks/testing_resources.yml b/roles/aws/aws_backup_validation/tasks/testing_resources.yml index 63c79421d..551888dba 100644 --- a/roles/aws/aws_backup_validation/tasks/testing_resources.yml +++ b/roles/aws/aws_backup_validation/tasks/testing_resources.yml @@ -73,21 +73,29 @@ - name: Set instance type for template. ansible.builtin.set_fact: _instance_type_restore: "{{ instance_type[backup.resource_type] }}" + _template_prefix: "{{ instance_type[backup.resource_type] }}" when: backup.resource_type != 'file-system' +- name: Set instance type to Aurora if defined. + ansible.builtin.set_fact: + _instance_type_restore: "Aurora" + when: + - backup.resource_type == 'db' + - "'aurora' in aws_rds.engine" + - name: Create restore testing query file. ansible.builtin.template: - src: "{{ _instance_type_restore }}_restore_testing.j2" + src: "{{ _template_prefix }}_restore_testing.j2" dest: /tmp/restore_testing.json register: _restore_testing_query - when: _instance_type_restore is defined + when: _template_prefix is defined - name: Check if protected reource exist. ansible.builtin.command: > aws backup list-protected-resources --query "Results[?ResourceArn=='{{ _resource_arn }}']" --region {{ _aws_region }} register: _protected_res -- name: Assign {{ _instance_type_restore }} resource to AWS restore testing plan. +- name: Assign {{ _template_prefix }} resource to AWS restore testing plan. ansible.builtin.command: > aws backup create-restore-testing-selection --cli-input-json file:///tmp/restore_testing.json --region {{ _aws_region }} - when: _instance_type_restore is defined and _testing_plan_info.stdout != "null" and _testing_selection_exists.stdout | length == 0 and _protected_res.stdout | length != 0 + when: _template_prefix is defined and _testing_plan_info.stdout != "null" and _testing_selection_exists.stdout | length == 0 and _protected_res.stdout | length != 0 From 572f4b0d1119d102090ba58f6efd88d135683e44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matej=20=C5=A0tajduhar?= <30931414+matej5@users.noreply.github.com> Date: Wed, 24 Sep 2025 12:43:38 +0200 Subject: [PATCH 31/61] Fixing-aws_vpc-override (#2688) * Fixing-aws_vpc-override * Adding-defaults * Fixing-register-command * Defaulting-tags * Defaulting-tags-2 * Updating-region * Updating-iam_role-vars * Updating-iam_role-vars-2 * Updating-when-statement * Updating-when-statement-for-backups * Updating-when-statement-for-iam-policy * Updating-when-statement-for-iam-policy * Updating-vars-for-SG-creation * Updating-when-statement-for-iam-role * Updating-handle-git-url * Updating-handle-git-url-2 * Updating-handle-git-url-3 * Updating-handle-git-url-4 * Updating-handle-git-url-5 * Updating-handle-git-url-6 * Updating-handle-git-url-7 * Fixing-indentation --------- Co-authored-by: Matej Stajduhar --- roles/aws/aws_ami/tasks/repack.yml | 2 +- roles/aws/aws_backup/tasks/main.yml | 2 +- .../tasks/testing_resources.yml | 4 ++-- .../aws_ec2_autoscale_cluster/tasks/main.yml | 12 +++++----- roles/aws/aws_iam_role/tasks/main.yml | 12 ++++++---- roles/aws/aws_lambda/tasks/handle_url.yml | 13 ++++++++-- roles/aws/aws_vpc/defaults/main.yml | 6 +++++ roles/aws/aws_vpc/tasks/main.yml | 2 +- roles/aws/aws_vpc/tasks/security_group.yml | 24 +++++++++---------- roles/aws/aws_vpc_subnet/tasks/subnet.yml | 2 +- 10 files changed, 49 insertions(+), 30 deletions(-) diff --git a/roles/aws/aws_ami/tasks/repack.yml b/roles/aws/aws_ami/tasks/repack.yml index 9a9c899ca..ba74e0aec 100644 --- a/roles/aws/aws_ami/tasks/repack.yml +++ b/roles/aws/aws_ami/tasks/repack.yml @@ -13,7 +13,7 @@ name: aws/aws_vpc tasks_from: security_group vars: - aws_vpc: + aws_vpc_sg: profile: "{{ aws_ami.aws_profile }}" region: "{{ aws_ami.region }}" name: "{{ aws_ami.repack.cluster_name }}-repacker" diff --git a/roles/aws/aws_backup/tasks/main.yml b/roles/aws/aws_backup/tasks/main.yml index 5da2d0a14..10dc5889a 100644 --- a/roles/aws/aws_backup/tasks/main.yml +++ b/roles/aws/aws_backup/tasks/main.yml @@ -64,7 +64,7 @@ with_items: "{{ aws_backup.plans }}" loop_control: loop_var: plan - when: aws_backup.plans | length + when: aws_backup.plans | length > 0 - name: Include aws backup validation role. ansible.builtin.include_role: diff --git a/roles/aws/aws_backup_validation/tasks/testing_resources.yml b/roles/aws/aws_backup_validation/tasks/testing_resources.yml index 551888dba..3e996ec22 100644 --- a/roles/aws/aws_backup_validation/tasks/testing_resources.yml +++ b/roles/aws/aws_backup_validation/tasks/testing_resources.yml @@ -34,7 +34,7 @@ name: aws/aws_vpc tasks_from: security_group vars: - aws_vpc: + aws_vpc_sg: name: "Restore_testing" region: "{{ aws_ec2_autoscale_cluster.region }}" id: "{{ _main_vpc_info.vpcs[0].vpc_id }}" @@ -55,7 +55,7 @@ - name: Construct AWS instance type dict. ansible.builtin.set_fact: - _restore_testing_sg: "{{ aws_vpc._result['Restore_testing'] }}" + _restore_testing_sg: "{{ aws_vpc_sg._result['Restore_testing'] }}" - name: Remove restore testing query file. ansible.builtin.file: diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index db6eea26f..2bb19d861 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -34,7 +34,7 @@ name: aws/aws_vpc tasks_from: security_group vars: - aws_vpc: + aws_vpc_sg: name: "{{ aws_ec2_autoscale_cluster.name }}" profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" region: "{{ aws_ec2_autoscale_cluster.region }}" @@ -45,14 +45,14 @@ rules: - proto: all group_name: "{{ aws_ec2_autoscale_cluster.name }}" - rules_egress: - - proto: all - group_name: "{{ aws_ec2_autoscale_cluster.name }}" - rule_desc: "Allow internal traffic for cluster {{ aws_ec2_autoscale_cluster.name }}" + rules_egress: + - proto: all + group_name: "{{ aws_ec2_autoscale_cluster.name }}" + rule_desc: "Allow internal traffic for cluster {{ aws_ec2_autoscale_cluster.name }}" - name: Set _aws_ec2_autoscale_cluster_security_group variable. ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_security_group: "{{ aws_vpc._result[aws_ec2_autoscale_cluster.name] }}" + _aws_ec2_autoscale_cluster_security_group: "{{ aws_vpc_sg._result[aws_ec2_autoscale_cluster.name] }}" - name: Reset subnets lists. ansible.builtin.set_fact: diff --git a/roles/aws/aws_iam_role/tasks/main.yml b/roles/aws/aws_iam_role/tasks/main.yml index f51b0aecd..fe75c1c50 100644 --- a/roles/aws/aws_iam_role/tasks/main.yml +++ b/roles/aws/aws_iam_role/tasks/main.yml @@ -21,15 +21,19 @@ _combined_policies: "{{ aws_iam_role.managed_policies }}" when: aws_iam_role.inline_policies.action is not defined or aws_iam_role.inline_policies.action | length == 0 +- name: Create list of strings for predefined policies. + ansible.builtin.set_fact: + allowed_strings: ["ec2", "ecs", "backup"] + - name: Create assume role policy document if predefined string is passed. ansible.builtin.set_fact: _assume_role_policy: "{{ lookup('file', aws_iam_role.policy_document + '_document_policy.json') }}" - when: aws_iam_role.policy_document | type_debug == 'AnsibleUnicode' + when: aws_iam_role.policy_document in allowed_strings - name: Create assume role policy document if template is provided. ansible.builtin.set_fact: _assume_role_policy: "{{ aws_iam_role.policy_document }}" - when: aws_iam_role.policy_document | type_debug != 'AnsibleUnicode' + when: aws_iam_role.policy_document not in allowed_strings - name: Create an IAM role. amazon.aws.iam_role: @@ -37,8 +41,8 @@ name: "{{ aws_iam_role.name }}" assume_role_policy_document: "{{ _assume_role_policy }}" managed_policies: "{{ _combined_policies }}" - purge_policies: "{{ aws_iam_role.purge_policies }}" - tags: "{{ aws_iam_role.tags }}" + purge_policies: "{{ aws_iam_role.purge_policies | default(true) }}" + tags: "{{ aws_iam_role.tags | default({}) }}" create_instance_profile: "{% if aws_iam_role.policy_document == 'ec2' %}true{% else %}false{% endif %}" wait: true register: _aws_iam_role_result diff --git a/roles/aws/aws_lambda/tasks/handle_url.yml b/roles/aws/aws_lambda/tasks/handle_url.yml index fa5ca0202..36a2eec22 100644 --- a/roles/aws/aws_lambda/tasks/handle_url.yml +++ b/roles/aws/aws_lambda/tasks/handle_url.yml @@ -2,10 +2,14 @@ ansible.builtin.git: repo: "{{ aws_lambda.function_file }}" dest: /tmp/funct + update: true + accept_hostkey: true + become: true + become_user: "{{ ce_provision.username }}" - name: Find all .j2 template files. ansible.builtin.find: - paths: "{{ work_dir }}/{{ repo_name }}" + paths: "/tmp/funct" patterns: "*.j2" recurse: true register: _j2_files @@ -28,6 +32,11 @@ - name: Copy a zip archive of Lambda function. community.general.archive: - path: "/tmp/funct" + path: "/tmp/funct/" dest: "{{ _ce_provision_build_dir }}/{{ aws_lambda.name }}.zip" format: zip + +- name: Remove function directory + ansible.builtin.file: + path: /tmp/funct + state: absent diff --git a/roles/aws/aws_vpc/defaults/main.yml b/roles/aws/aws_vpc/defaults/main.yml index 998ce60d4..ef11f5442 100644 --- a/roles/aws/aws_vpc/defaults/main.yml +++ b/roles/aws/aws_vpc/defaults/main.yml @@ -1,3 +1,9 @@ +aws_vpc_sg: + aws_profile: "{{ _aws_profile }}" + region: "{{ _aws_region }}" + tags: {} + state: present + description: "" aws_vpc: aws_profile: "{{ _aws_profile }}" region: "{{ _aws_region }}" diff --git a/roles/aws/aws_vpc/tasks/main.yml b/roles/aws/aws_vpc/tasks/main.yml index ad461150c..710c66505 100644 --- a/roles/aws/aws_vpc/tasks/main.yml +++ b/roles/aws/aws_vpc/tasks/main.yml @@ -12,7 +12,7 @@ - name: Ensure default Security group is tagged. ansible.builtin.include_tasks: "security_group.yml" vars: - aws_vpc: + aws_vpc_sg: name: "default" id: "{{ _aws_vpc_vpc.vpc.id }}" description: "default VPC security group" diff --git a/roles/aws/aws_vpc/tasks/security_group.yml b/roles/aws/aws_vpc/tasks/security_group.yml index 9af4121dd..b999c343e 100644 --- a/roles/aws/aws_vpc/tasks/security_group.yml +++ b/roles/aws/aws_vpc/tasks/security_group.yml @@ -1,6 +1,6 @@ - name: Configure vars if looping over list. ansible.builtin.set_fact: - aws_vpc: + aws_vpc_sg: name: "{{ _sec_group.name | default('') }}" tags: "{{ _aws_vpc_vpc.vpc.tags | combine({'Name': _sec_group.name}) }}" id: "{{ _aws_vpc_vpc.vpc.id }}" @@ -11,18 +11,18 @@ - name: Create Security Group. amazon.aws.ec2_security_group: - name: "{{ aws_vpc.name }}" - profile: "{{ aws_vpc.aws_profile }}" - region: "{{ aws_vpc.region }}" - tags: "{{ aws_vpc.tags }}" - state: "{{ aws_vpc.state }}" - vpc_id: "{{ aws_vpc.id }}" - description: "{{ aws_vpc.description | default('') }}" - rules: "{{ aws_vpc.rules | default(omit) }}" - rules_egress: "{{ aws_vpc.rules_egress | default(omit) }}" - purge_rules: "{{ aws_vpc.purge_rules | default(omit) }}" + name: "{{ aws_vpc_sg.name }}" + profile: "{{ aws_vpc_sg.aws_profile }}" + region: "{{ aws_vpc_sg.region }}" + tags: "{{ aws_vpc_sg.tags }}" + state: "{{ aws_vpc_sg.state }}" + vpc_id: "{{ aws_vpc_sg.id }}" + description: "{{ aws_vpc_sg.description }}" + rules: "{{ aws_vpc_sg.rules | default(omit) }}" + rules_egress: "{{ aws_vpc_sg.rules_egress | default(omit) }}" + purge_rules: "{{ aws_vpc_sg.purge_rules | default(omit) }}" register: _aws_vpc_result - name: Register aws_vpc SG results. ansible.builtin.set_fact: - aws_vpc: "{{ aws_vpc | combine({'_result': {aws_vpc.name: _aws_vpc_result}}, recursive=True) }}" + aws_vpc_sg: "{{ aws_vpc_sg | combine({'_result': {aws_vpc_sg.name: _aws_vpc_result}}, recursive=True) }}" diff --git a/roles/aws/aws_vpc_subnet/tasks/subnet.yml b/roles/aws/aws_vpc_subnet/tasks/subnet.yml index 52ceaa6af..9051b066a 100644 --- a/roles/aws/aws_vpc_subnet/tasks/subnet.yml +++ b/roles/aws/aws_vpc_subnet/tasks/subnet.yml @@ -27,7 +27,7 @@ name: aws/aws_vpc tasks_from: security_group vars: - aws_vpc: + aws_vpc_sg: name: "{{ subnet.name }}" profile: "{{ aws_vpc_subnet.aws_profile }}" region: "{{ aws_vpc_subnet.region }}" From c75b16a8395dc5ab6c5bdabab68afe7940b76f99 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 24 Sep 2025 13:08:05 +0200 Subject: [PATCH 32/61] More truthy length fixes. --- roles/_overrides/tasks/main.yml | 4 ++-- roles/aws/aws_ami/templates/packer.json.j2 | 6 +++--- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 4 ++-- roles/aws/aws_elb/tasks/main.yml | 2 +- .../aws/aws_iam_saml/templates/simplesamlphp_sp.j2 | 2 +- .../apache/templates/cloudwatch-main.json.j2 | 6 +++--- .../apache/templates/cloudwatch-vhost.json.j2 | 10 +++++----- .../aws_cloudwatch_agent/templates/config.json.j2 | 14 +++++++------- .../templates/include-exclude-filelist.j2 | 2 +- .../debian/nginx/templates/cloudwatch-main.json.j2 | 4 ++-- .../nginx/templates/cloudwatch-vhost.json.j2 | 8 ++++---- .../templates/headless-openvpn-install.sh.j2 | 4 ++-- .../templates/cloudwatch-php-fpm-fixedport.json.j2 | 8 ++++---- .../php-fpm/templates/cloudwatch-php-fpm.json.j2 | 8 ++++---- roles/debian/postfix/templates/transport.j2 | 2 +- roles/debian/ssh_server/templates/sshd_config.j2 | 4 ++-- 16 files changed, 44 insertions(+), 44 deletions(-) diff --git a/roles/_overrides/tasks/main.yml b/roles/_overrides/tasks/main.yml index 18365b2e6..3fcfdd4eb 100644 --- a/roles/_overrides/tasks/main.yml +++ b/roles/_overrides/tasks/main.yml @@ -6,7 +6,7 @@ loop_var: override_file when: - _overrides.files is defined - - _overrides.files | length + - _overrides.files|length > 0 - name: Generate links overrides. ansible.builtin.include_tasks: link.yml @@ -15,4 +15,4 @@ loop_var: override_link when: - _overrides.links is defined - - _overrides.links | length + - _overrides.links|length > 0 diff --git a/roles/aws/aws_ami/templates/packer.json.j2 b/roles/aws/aws_ami/templates/packer.json.j2 index faa3074a1..0a27cdbc7 100755 --- a/roles/aws/aws_ami/templates/packer.json.j2 +++ b/roles/aws/aws_ami/templates/packer.json.j2 @@ -31,7 +31,7 @@ "owners": ["{{ aws_ami.owner }}"], "most_recent": true }, - {% if aws_ami.vpc_filter is defined and aws_ami.vpc_filter | length > 0 %} + {% if aws_ami.vpc_filter is defined and aws_ami.vpc_filter|length > 0 %} "vpc_filter": { "filters": { "tag:Name": "{{ aws_ami.vpc_filter }}" @@ -53,7 +53,7 @@ "playbook_file": "{{ aws_ami.playbook_file }}", "inventory_directory": "{{ _ce_provision_base_dir }}/hosts", "ssh_authorized_key_file": "/home/{{ user_provision.username }}/.ssh/{{ aws_ami.public_key_name }}", - {% if aws_ami.groups is defined and aws_ami.groups | length %} + {% if aws_ami.groups is defined and aws_ami.groups|length > 0 %} "groups": {{ aws_ami.groups | to_json }}, {% endif %} "ansible_env_vars": @@ -68,7 +68,7 @@ {% if ansible_verbosity >= 1 %} "-vvvv", {% endif %} - {% if _aws_ami_extra_vars is defined and _aws_ami_extra_vars | length %} + {% if _aws_ami_extra_vars is defined and _aws_ami_extra_vars|length > 0 %} "--extra-vars", "{{ _aws_ami_extra_vars }}", {% endif %} diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index 2bb19d861..f92b3c0df 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -27,7 +27,7 @@ - name: Use provided VPC id. ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_vpc_id: "{{ aws_ec2_autoscale_cluster.vpc_id }}" - when: aws_ec2_autoscale_cluster.vpc_name is not defined or (aws_ec2_autoscale_cluster.vpc_name | length) == 0 + when: (aws_ec2_autoscale_cluster.vpc_name is not defined) or (aws_ec2_autoscale_cluster.vpc_name|length == 0) - name: Create matching Security Group. ansible.builtin.include_role: @@ -415,7 +415,7 @@ _aws_ec2_autoscale_cluster_listeners: "{{ _aws_ec2_autoscale_cluster_listeners + aws_ec2_autoscale_cluster.listeners }}" when: - aws_ec2_autoscale_cluster is defined - - aws_ec2_autoscale_cluster | length + - aws_ec2_autoscale_cluster | length > 0 - aws_ec2_autoscale_cluster.create_elb - name: Generate security group information for the ALB. diff --git a/roles/aws/aws_elb/tasks/main.yml b/roles/aws/aws_elb/tasks/main.yml index 9ea51db71..8c988ff1c 100644 --- a/roles/aws/aws_elb/tasks/main.yml +++ b/roles/aws/aws_elb/tasks/main.yml @@ -111,7 +111,7 @@ _aws_ec2_listeners: "{{ _aws_ec2_listeners + aws_elb.listeners }}" when: - aws_elb is defined - - aws_elb | length + - aws_elb | length > 0 - name: Generate security group information. ansible.builtin.include_role: diff --git a/roles/aws/aws_iam_saml/templates/simplesamlphp_sp.j2 b/roles/aws/aws_iam_saml/templates/simplesamlphp_sp.j2 index 509fd4dbe..d931cdbaa 100644 --- a/roles/aws/aws_iam_saml/templates/simplesamlphp_sp.j2 +++ b/roles/aws/aws_iam_saml/templates/simplesamlphp_sp.j2 @@ -55,7 +55,7 @@ $metadata['urn:amazon:{{ _aws_account_info.account }}'] = array ( 'groups' => 'urn:oid:1.3.6.1.4.1.5923.1.1.1.1', ), -{% if aws_iam_saml.linotp_server is defined and aws_iam_saml.linotp_server|length %} +{% if aws_iam_saml.linotp_server is defined and aws_iam_saml.linotp_server|length > 0 %} # LinOTP settings 55 => array( 'class' => 'linotp2:OTP', diff --git a/roles/debian/apache/templates/cloudwatch-main.json.j2 b/roles/debian/apache/templates/cloudwatch-main.json.j2 index e5e899a15..38b8a0772 100644 --- a/roles/debian/apache/templates/cloudwatch-main.json.j2 +++ b/roles/debian/apache/templates/cloudwatch-main.json.j2 @@ -5,7 +5,7 @@ "collect_list": [ { "file_path": "/var/log/apache2/access.log", -{% if apache.log_group_prefix is defined and apache.log_group_prefix|length %} +{% if apache.log_group_prefix is defined and apache.log_group_prefix|length > 0 %} "log_group_name": "{{ apache.log_group_prefix }}apache-access", {% else %} "log_group_name": "apache-access", @@ -14,7 +14,7 @@ }, { "file_path": "/var/log/apache2/error.log", -{% if apache.log_group_prefix is defined and apache.log_group_prefix|length %} +{% if apache.log_group_prefix is defined and apache.log_group_prefix|length > 0 %} "log_group_name": "{{ apache.log_group_prefix }}apache-error", {% else %} "log_group_name": "apache-error", @@ -25,4 +25,4 @@ } } } -} \ No newline at end of file +} diff --git a/roles/debian/apache/templates/cloudwatch-vhost.json.j2 b/roles/debian/apache/templates/cloudwatch-vhost.json.j2 index 331e30ff5..7299936c0 100644 --- a/roles/debian/apache/templates/cloudwatch-vhost.json.j2 +++ b/roles/debian/apache/templates/cloudwatch-vhost.json.j2 @@ -5,12 +5,12 @@ "collect_list": [ { "file_path": "{{ domain.access_log }}", -{% if apache.log_group_prefix is defined and apache.log_group_prefix|length %} +{% if apache.log_group_prefix is defined and apache.log_group_prefix|length > 0 %} "log_group_name": "{{ apache.log_group_prefix }}apache2-access", {% else %} "log_group_name": "apache2-access", {% endif %} -{% if domain.log_stream_name is defined and domain.log_stream_name|length %} +{% if domain.log_stream_name is defined and domain.log_stream_name|length > 0 %} "log_stream_name": "{{ domain.log_stream_name }}" {% else %} "log_stream_name": "{{ apache.log_stream_name }}" @@ -18,12 +18,12 @@ }, { "file_path": "{{ domain.error_log }}", -{% if apache.log_group_prefix is defined and apache.log_group_prefix|length %} +{% if apache.log_group_prefix is defined and apache.log_group_prefix|length > 0 %} "log_group_name": "{{ apache.log_group_prefix }}apache2-error", {% else %} "log_group_name": "apache2-error", {% endif %} -{% if domain.log_stream_name is defined and domain.log_stream_name|length %} +{% if domain.log_stream_name is defined and domain.log_stream_name|length > 0 %} "log_stream_name": "{{ domain.log_stream_name }}" {% else %} "log_stream_name": "{{ apache.log_stream_name }}" @@ -33,4 +33,4 @@ } } } -} \ No newline at end of file +} diff --git a/roles/debian/aws_cloudwatch_agent/templates/config.json.j2 b/roles/debian/aws_cloudwatch_agent/templates/config.json.j2 index 169ea4c53..6dce2d3fe 100755 --- a/roles/debian/aws_cloudwatch_agent/templates/config.json.j2 +++ b/roles/debian/aws_cloudwatch_agent/templates/config.json.j2 @@ -9,7 +9,7 @@ "collect_list": [ { "file_path": "/var/log/syslog", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}syslog", {% else %} "log_group_name": "syslog", @@ -18,7 +18,7 @@ }, { "file_path": "/var/log/auth.log", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}auth", {% else %} "log_group_name": "auth", @@ -27,7 +27,7 @@ }, { "file_path": "/var/log/daemon.log", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}daemon", {% else %} "log_group_name": "daemon", @@ -36,7 +36,7 @@ }, { "file_path": "/var/log/messages", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}messages", {% else %} "log_group_name": "messages", @@ -45,7 +45,7 @@ }, { "file_path": "/var/log/alternatives.log", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}alternatives", {% else %} "log_group_name": "alternatives", @@ -57,7 +57,7 @@ } }, "metrics": { -{% if aws_cloudwatch_agent.metrics_namespace is defined and aws_cloudwatch_agent.metrics_namespace|length %} +{% if aws_cloudwatch_agent.metrics_namespace is defined and aws_cloudwatch_agent.metrics_namespace|length > 0 %} "namespace": "{{ aws_cloudwatch_agent.metrics_namespace }}", {% endif %} "append_dimensions": { @@ -110,4 +110,4 @@ } } } -} \ No newline at end of file +} diff --git a/roles/debian/duplicity/templates/include-exclude-filelist.j2 b/roles/debian/duplicity/templates/include-exclude-filelist.j2 index bf491cd28..61f745439 100644 --- a/roles/debian/duplicity/templates/include-exclude-filelist.j2 +++ b/roles/debian/duplicity/templates/include-exclude-filelist.j2 @@ -1,5 +1,5 @@ {% for rule in dir.rules %} -{% if rule|length %} +{% if rule|length > 0 %} {{ rule }} {% endif %} {% endfor %} diff --git a/roles/debian/nginx/templates/cloudwatch-main.json.j2 b/roles/debian/nginx/templates/cloudwatch-main.json.j2 index 8ba152202..4c17bb09b 100644 --- a/roles/debian/nginx/templates/cloudwatch-main.json.j2 +++ b/roles/debian/nginx/templates/cloudwatch-main.json.j2 @@ -5,7 +5,7 @@ "collect_list": [ { "file_path": "/var/log/nginx/access.log", -{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length %} +{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length > 0 %} "log_group_name": "{{ nginx.log_group_prefix }}nginx-access", {% else %} "log_group_name": "nginx-access", @@ -14,7 +14,7 @@ }, { "file_path": "/var/log/nginx/error.log", -{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length %} +{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length > 0 %} "log_group_name": "{{ nginx.log_group_prefix }}nginx-error", {% else %} "log_group_name": "nginx-error", diff --git a/roles/debian/nginx/templates/cloudwatch-vhost.json.j2 b/roles/debian/nginx/templates/cloudwatch-vhost.json.j2 index 285252767..a278f674d 100644 --- a/roles/debian/nginx/templates/cloudwatch-vhost.json.j2 +++ b/roles/debian/nginx/templates/cloudwatch-vhost.json.j2 @@ -5,12 +5,12 @@ "collect_list": [ { "file_path": "{{ domain.access_log }}", -{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length %} +{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length > 0 %} "log_group_name": "{{ nginx.log_group_prefix }}nginx-access", {% else %} "log_group_name": "nginx-access", {% endif %} -{% if domain.log_stream_name is defined and domain.log_stream_name|length %} +{% if domain.log_stream_name is defined and domain.log_stream_name|length > 0 %} "log_stream_name": "{{ domain.log_stream_name }}" {% else %} "log_stream_name": "{{ nginx.log_stream_name }}" @@ -18,12 +18,12 @@ }, { "file_path": "{{ domain.error_log }}", -{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length %} +{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length > 0 %} "log_group_name": "{{ nginx.log_group_prefix }}nginx-error", {% else %} "log_group_name": "nginx-error", {% endif %} -{% if domain.log_stream_name is defined and domain.log_stream_name|length %} +{% if domain.log_stream_name is defined and domain.log_stream_name|length > 0 %} "log_stream_name": "{{ domain.log_stream_name }}" {% else %} "log_stream_name": "{{ nginx.log_stream_name }}" diff --git a/roles/debian/openvpn/templates/headless-openvpn-install.sh.j2 b/roles/debian/openvpn/templates/headless-openvpn-install.sh.j2 index 2d078a5f9..0b36d94d8 100644 --- a/roles/debian/openvpn/templates/headless-openvpn-install.sh.j2 +++ b/roles/debian/openvpn/templates/headless-openvpn-install.sh.j2 @@ -13,12 +13,12 @@ export COMPRESSION_CHOICE={{ openvpn.compression_choice }} export CUSTOMIZE_ENC=n export CLIENT={{ openvpn.test_username }} export PASS=1 -{% if openvpn.nat_endpoint is defined and openvpn.nat_endpoint | length %} +{% if openvpn.nat_endpoint is defined and openvpn.nat_endpoint | length > 0 %} export ENDPOINT={{ openvpn.nat_endpoint }} {% endif %} {% if openvpn.dns | int == 13 %} export DNS1={{ openvpn.dns1 }} -{% if openvpn.dns2 is defined and openvpn.dns2 | length %} +{% if openvpn.dns2 is defined and openvpn.dns2 | length > 0 %} export DNS2={{ openvpn.dns2 }} {% endif %} {% endif %} diff --git a/roles/debian/php-fpm/templates/cloudwatch-php-fpm-fixedport.json.j2 b/roles/debian/php-fpm/templates/cloudwatch-php-fpm-fixedport.json.j2 index 74523ecdf..e5d5ba9eb 100644 --- a/roles/debian/php-fpm/templates/cloudwatch-php-fpm-fixedport.json.j2 +++ b/roles/debian/php-fpm/templates/cloudwatch-php-fpm-fixedport.json.j2 @@ -5,12 +5,12 @@ "collect_list": [ { "file_path": "/var/log/php{{ php.version[0] }}-fpm.log", -{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length %} +{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length > 0 %} "log_group_name": "{{ php.fpm.log_group_prefix }}php{{ php.version[0] }}", {% else %} "log_group_name": "php", {% endif %} -{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length %} +{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length > 0 %} "log_stream_name": "{{ php.fpm.log_stream_name }}" {% else %} "log_stream_name": "php-fpm" @@ -18,12 +18,12 @@ }, { "file_path": "{{ php.fpm.slowlog_file_directory }}/php{{ php.version[0] }}-fpm.slow.log", -{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length %} +{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length > 0 %} "log_group_name": "{{ php.fpm.log_group_prefix }}php{{ php.version[0] }}", {% else %} "log_group_name": "php", {% endif %} -{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length %} +{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length > 0 %} "log_stream_name": "{{ php.fpm.log_stream_name }}-slowlog" {% else %} "log_stream_name": "php-fpm-slowlog" diff --git a/roles/debian/php-fpm/templates/cloudwatch-php-fpm.json.j2 b/roles/debian/php-fpm/templates/cloudwatch-php-fpm.json.j2 index 19a848bf3..bfb9efab0 100644 --- a/roles/debian/php-fpm/templates/cloudwatch-php-fpm.json.j2 +++ b/roles/debian/php-fpm/templates/cloudwatch-php-fpm.json.j2 @@ -5,12 +5,12 @@ "collect_list": [ { "file_path": "/var/log/php{{ version }}-fpm.log", -{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length %} +{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length > 0 %} "log_group_name": "{{ php.fpm.log_group_prefix }}php{{ version }}", {% else %} "log_group_name": "php", {% endif %} -{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length %} +{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length > 0 %} "log_stream_name": "{{ php.fpm.log_stream_name }}" {% else %} "log_stream_name": "php-fpm" @@ -18,12 +18,12 @@ }, { "file_path": "{{ php.fpm.slowlog_file_directory }}/php{{ version }}-fpm.slow.log", -{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length %} +{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length > 0 %} "log_group_name": "{{ php.fpm.log_group_prefix }}php{{ version }}", {% else %} "log_group_name": "php", {% endif %} -{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length %} +{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length > 0 %} "log_stream_name": "{{ php.fpm.log_stream_name }}-slowlog" {% else %} "log_stream_name": "php-fpm-slowlog" diff --git a/roles/debian/postfix/templates/transport.j2 b/roles/debian/postfix/templates/transport.j2 index 098bf5265..1f053c8e2 100644 --- a/roles/debian/postfix/templates/transport.j2 +++ b/roles/debian/postfix/templates/transport.j2 @@ -1,7 +1,7 @@ {{ ansible_hostname }} : {{ ansible_fqdn }} : {% for transport in postfix.transport_maps %} -{% if transport|length %} +{% if transport|length > 0 %} {{ transport }} {% endif %} {% endfor %} diff --git a/roles/debian/ssh_server/templates/sshd_config.j2 b/roles/debian/ssh_server/templates/sshd_config.j2 index 216792bb0..9c832dbd3 100644 --- a/roles/debian/ssh_server/templates/sshd_config.j2 +++ b/roles/debian/ssh_server/templates/sshd_config.j2 @@ -27,7 +27,7 @@ ListenAddress {{ address }} #HostKey /etc/ssh/ssh_host_ecdsa_key #HostKey /etc/ssh/ssh_host_ed25519_key {% for key in sshd.HostKey %} -{% if key|length %} +{% if key|length > 0 %} HostKey {{ key }} {% endif %} {% endfor %} @@ -119,7 +119,7 @@ UsePAM {{ sshd.UsePAM }} AllowAgentForwarding {{ sshd.AllowAgentForwarding }} AllowTcpForwarding {{ sshd.AllowTcpForwarding }} -{% if sshd.AllowGroups|length %} +{% if sshd.AllowGroups|length > 0 %} AllowGroups {{ sshd.AllowGroups }} {% endif %} GatewayPorts {{ sshd.GatewayPorts }} From ef1064218869ca8f401ad359784a17c138c86c73 Mon Sep 17 00:00:00 2001 From: drazenCE <140631110+drazenCE@users.noreply.github.com> Date: Wed, 24 Sep 2025 13:25:24 +0200 Subject: [PATCH 33/61] Updating-pam-ldap-condition (#2695) * Updating-pam-ldap-condition * Updating-pam-ldap-condition-PR-2.x --- roles/debian/pam_ldap/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/debian/pam_ldap/tasks/main.yml b/roles/debian/pam_ldap/tasks/main.yml index b2a4de250..2445bed77 100644 --- a/roles/debian/pam_ldap/tasks/main.yml +++ b/roles/debian/pam_ldap/tasks/main.yml @@ -14,14 +14,14 @@ ansible.builtin.file: path: /etc/ldap/ssl state: directory - when: pam_ldap.ssl_certificate is defined and pam_ldap.ssl_certificate + when: pam_ldap.ssl_certificate is defined and pam_ldap.ssl_certificate | length > 0 - name: Copy certificate. ansible.builtin.copy: src: "{{ pam_ldap.ssl_certificate }}" dest: "/etc/ldap/ssl/{{ pam_ldap.ssl_certificate | basename }}" mode: "0666" - when: pam_ldap.ssl_certificate is defined and pam_ldap.ssl_certificate + when: pam_ldap.ssl_certificate is defined and pam_ldap.ssl_certificate | length > 0 - name: Copy nslcd config. ansible.builtin.template: From 725fcf7ba1ee8887869b408305430e11ee465bc0 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 25 Sep 2025 13:03:53 +0200 Subject: [PATCH 34/61] Fixing more LDAP role truthy issues. --- roles/debian/pam_ldap/tasks/main.yml | 43 ++++++++++++++-------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/roles/debian/pam_ldap/tasks/main.yml b/roles/debian/pam_ldap/tasks/main.yml index 2445bed77..8ea7995f1 100644 --- a/roles/debian/pam_ldap/tasks/main.yml +++ b/roles/debian/pam_ldap/tasks/main.yml @@ -14,14 +14,14 @@ ansible.builtin.file: path: /etc/ldap/ssl state: directory - when: pam_ldap.ssl_certificate is defined and pam_ldap.ssl_certificate | length > 0 + when: pam_ldap.ssl_certificate|length > 0 - name: Copy certificate. ansible.builtin.copy: src: "{{ pam_ldap.ssl_certificate }}" dest: "/etc/ldap/ssl/{{ pam_ldap.ssl_certificate | basename }}" mode: "0666" - when: pam_ldap.ssl_certificate is defined and pam_ldap.ssl_certificate | length > 0 + when: pam_ldap.ssl_certificate|length > 0 - name: Copy nslcd config. ansible.builtin.template: @@ -82,25 +82,24 @@ mode: 0555 owner: root -- name: Create LDAP key script passwd file. - ansible.builtin.template: - src: ldap-bindpw.j2 - dest: /etc/ldap/ldap-bindpw - mode: "0600" - owner: root - when: - - ldap_client.binddn is defined and ldap_client.binddn - - ldap_client.bindpw is defined and ldap_client.bindpw - -- name: Create wrapper script for LDAP key script. - ansible.builtin.template: - src: ssh-getkey-ldap-wrapper.sh.j2 - dest: /usr/local/bin/ssh-getkey-ldap-wrapper.sh - mode: "0555" - owner: root +- name: LDAP password handling. when: - - ldap_client.binddn is defined and ldap_client.binddn - - ldap_client.bindpw is defined and ldap_client.bindpw + - ldap_client.binddn|length > 0 + - ldap_client.bindpw|length > 0 + block: + - name: Create LDAP key script passwd file. + ansible.builtin.template: + src: ldap-bindpw.j2 + dest: /etc/ldap/ldap-bindpw + mode: "0600" + owner: root + + - name: Create wrapper script for LDAP key script. + ansible.builtin.template: + src: ssh-getkey-ldap-wrapper.sh.j2 + dest: /usr/local/bin/ssh-getkey-ldap-wrapper.sh + mode: "0555" + owner: root - name: Configure SSH pub key command if there is a binddn set. ansible.builtin.lineinfile: @@ -108,7 +107,7 @@ regexp: "AuthorizedKeysCommand " line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap-wrapper.sh when: - - ldap_client.binddn is defined and ldap_client.binddn + - ldap_client.binddn|length > 0 - name: Configure SSH pub key command if no binddn set. ansible.builtin.lineinfile: @@ -116,7 +115,7 @@ regexp: "AuthorizedKeysCommand " line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap when: - - not ldap_client.binddn + - not ldap_client.binddn|length > 0 - name: Configure SSH pub key command user. ansible.builtin.lineinfile: From 612c3c55ce8b6778089a0852956718e4942fcea9 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 25 Sep 2025 13:13:27 +0200 Subject: [PATCH 35/61] Slight block refactor for LDAP. --- roles/debian/pam_ldap/tasks/main.yml | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/roles/debian/pam_ldap/tasks/main.yml b/roles/debian/pam_ldap/tasks/main.yml index 8ea7995f1..6be7670cb 100644 --- a/roles/debian/pam_ldap/tasks/main.yml +++ b/roles/debian/pam_ldap/tasks/main.yml @@ -101,13 +101,12 @@ mode: "0555" owner: root -- name: Configure SSH pub key command if there is a binddn set. - ansible.builtin.lineinfile: - path: /etc/ssh/sshd_config - regexp: "AuthorizedKeysCommand " - line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap-wrapper.sh - when: - - ldap_client.binddn|length > 0 + # We don't support bind DN with no password because if there is no password the necessary script is not created. + - name: Configure SSH pub key command if there is a binddn set. + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: "AuthorizedKeysCommand " + line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap-wrapper.sh - name: Configure SSH pub key command if no binddn set. ansible.builtin.lineinfile: @@ -115,7 +114,7 @@ regexp: "AuthorizedKeysCommand " line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap when: - - not ldap_client.binddn|length > 0 + - not ldap_client.binddn == 0 - name: Configure SSH pub key command user. ansible.builtin.lineinfile: From 91ad6cae4add2b5adadc7945aa1d0a9d39ac5e42 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 25 Sep 2025 13:14:32 +0200 Subject: [PATCH 36/61] DN length check should not be negated. --- roles/debian/pam_ldap/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/debian/pam_ldap/tasks/main.yml b/roles/debian/pam_ldap/tasks/main.yml index 6be7670cb..021fa6c53 100644 --- a/roles/debian/pam_ldap/tasks/main.yml +++ b/roles/debian/pam_ldap/tasks/main.yml @@ -114,7 +114,7 @@ regexp: "AuthorizedKeysCommand " line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap when: - - not ldap_client.binddn == 0 + - ldap_client.binddn == 0 - name: Configure SSH pub key command user. ansible.builtin.lineinfile: From a60f424536fe977f42394c124b774f1b587a14d3 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Thu, 25 Sep 2025 13:15:39 +0200 Subject: [PATCH 37/61] Forgot to add the length filter. --- roles/debian/pam_ldap/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/debian/pam_ldap/tasks/main.yml b/roles/debian/pam_ldap/tasks/main.yml index 021fa6c53..9727b78e8 100644 --- a/roles/debian/pam_ldap/tasks/main.yml +++ b/roles/debian/pam_ldap/tasks/main.yml @@ -114,7 +114,7 @@ regexp: "AuthorizedKeysCommand " line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap when: - - ldap_client.binddn == 0 + - ldap_client.binddn|length == 0 - name: Configure SSH pub key command user. ansible.builtin.lineinfile: From 2b98f9f6733f778b861a9444ea556687a57bd480 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 13:00:29 +0200 Subject: [PATCH 38/61] Another boolean Ansible 12 error in AMI role. --- roles/aws/aws_ami/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/aws/aws_ami/tasks/main.yml b/roles/aws/aws_ami/tasks/main.yml index 2973ee816..1ce621463 100644 --- a/roles/aws/aws_ami/tasks/main.yml +++ b/roles/aws/aws_ami/tasks/main.yml @@ -17,7 +17,7 @@ ami_base_image_latest: "{{ ami_base_image.images | sort(attribute='creation_date') | last }}" when: - ami_base_image.images is defined - - ami_base_image.images + - ami_base_image.images|length > 0 - name: Delete existing image. ansible.builtin.include_tasks: delete.yml From 52b3ce17fc2e671351a3b1efff70af99a675186b Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 14:31:45 +0200 Subject: [PATCH 39/61] ALB port must be cast as a string for RedirectAction. --- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index f92b3c0df..e1845c65c 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -380,7 +380,7 @@ Host: "#{host}" Query: "#{query}" Path: "/#{path}" - Port: "{{ aws_ec2_autoscale_cluster.alb_https_port }}" + Port: "{{ aws_ec2_autoscale_cluster.alb_https_port|str }}" StatusCode: HTTP_301 _aws_ec2_autoscale_cluster_listeners_https: Protocol: HTTPS From 318dd420fdb40941a7466e457d4198090efd794f Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 14:34:08 +0200 Subject: [PATCH 40/61] Setting the correct Jinja filter, it's string, not str. --- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index e1845c65c..bc0ed9271 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -380,7 +380,7 @@ Host: "#{host}" Query: "#{query}" Path: "/#{path}" - Port: "{{ aws_ec2_autoscale_cluster.alb_https_port|str }}" + Port: "{{ aws_ec2_autoscale_cluster.alb_https_port|string }}" StatusCode: HTTP_301 _aws_ec2_autoscale_cluster_listeners_https: Protocol: HTTPS From e630bcba1b8cf002fa488d830eb30eaae5e166b3 Mon Sep 17 00:00:00 2001 From: drazenCE <140631110+drazenCE@users.noreply.github.com> Date: Tue, 30 Sep 2025 16:03:04 +0200 Subject: [PATCH 41/61] Nslcd-nscd-restart (#2693) --- roles/debian/pam_ldap/tasks/main.yml | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/roles/debian/pam_ldap/tasks/main.yml b/roles/debian/pam_ldap/tasks/main.yml index 2445bed77..10e432be4 100644 --- a/roles/debian/pam_ldap/tasks/main.yml +++ b/roles/debian/pam_ldap/tasks/main.yml @@ -124,17 +124,31 @@ regexp: "AuthorizedKeysCommandUser " line: AuthorizedKeysCommandUser root +- name: Check if nslcd service exists. + ansible.builtin.systemd: + name: nslcd + register: _nslcd_service_check + failed_when: false + changed_when: false + - name: Restart nslcd service. ansible.builtin.service: name: nslcd state: restarted - when: ansible_facts.services['nslcd.service'] is defined + when: _nslcd_service_check is defined + +- name: Check if nscd service exists. + ansible.builtin.systemd: + name: nscd + register: _nscd_service_check + failed_when: false + changed_when: false - name: Restart nscd service. ansible.builtin.service: name: nscd state: restarted - when: ansible_facts.services['nscd.service'] is defined + when: _nscd_service_check is defined - name: Restart SSH service. ansible.builtin.service: From f0026b77abb7d2c3b91643a3f25f52c8362502b6 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 16:14:40 +0200 Subject: [PATCH 42/61] Fixing more Ansible 12 length issues in autoscale role. --- .../aws_ec2_autoscale_cluster/tasks/main.yml | 113 ++++++++---------- 1 file changed, 51 insertions(+), 62 deletions(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index bc0ed9271..901199389 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -460,8 +460,8 @@ cmd: "aws elbv2 add-listener-certificates --region {{ aws_ec2_autoscale_cluster.region }} --profile {{ aws_ec2_autoscale_cluster.aws_profile }} --listener-arn {{ _aws_ec2_autoscale_cluster_alb_listener_ARN }} --certificates CertificateArn={{ item }}" when: - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs - - _ssl_certificate_ARN | length > 1 + - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs|length > 0 + - _ssl_certificate_ARN|length > 1 with_items: "{{ aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs }}" # EC2 - BUILD ASG @@ -475,7 +475,7 @@ group_names: "{{ aws_ec2_autoscale_cluster.cluster_security_groups }}" return_type: ids when: - - aws_ec2_autoscale_cluster.cluster_security_groups | length > 0 + - aws_ec2_autoscale_cluster.cluster_security_groups|length > 0 - aws_ec2_autoscale_cluster.asg_refresh - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster @@ -542,60 +542,56 @@ - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster -- name: Create step scaling AutoScale policies. - community.aws.autoscaling_policy: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - state: "present" - name: "{{ item.name }}-{{ item.policy_type }}" - adjustment_type: "{{ item.adjustment_type }}" - asg_name: "{{ aws_ec2_autoscale_cluster.name }}" - scaling_adjustment: "{{ item.adjustment }}" - min_adjustment_step: "{{ item.adjustment_step }}" - metric_aggregation: "{{ item.metric_aggregation }}" - step_adjustments: "{{ item.step_adjustments }}" - when: - - aws_ec2_autoscale_cluster.asg_scaling_policies - - item.policy_type == 'StepScaling' - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - register: _aws_ec2_autoscale_cluster_step_scaling_policies - with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" - -- name: Create simple scaling AutoScale policies. - community.aws.autoscaling_policy: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - state: "present" - name: "{{ item.name }}-{{ item.policy_type }}" - adjustment_type: "{{ item.adjustment_type }}" - asg_name: "{{ aws_ec2_autoscale_cluster.name }}" - scaling_adjustment: "{{ item.adjustment }}" - min_adjustment_step: "{{ item.adjustment_step }}" - cooldown: "{{ item.cooldown }}" - when: - - aws_ec2_autoscale_cluster.asg_scaling_policies - - item.policy_type == 'SimpleScaling' - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - register: _aws_ec2_autoscale_cluster_simple_scaling_policies - with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" - -- name: Fetch step scaling policies. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" +- name: Handle simple scaling AutoScale. when: - - _aws_ec2_autoscale_cluster_step_scaling_policies + - aws_ec2_autoscale_cluster.asg_scaling_policies|length > 0 + - item.policy_type == 'SimpleScaling' - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Fetch simple scaling policies. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_scaling_policies + _aws_ec2_autoscale_cluster_simple_scaling_policies.results }}" - when: - - _aws_ec2_autoscale_cluster_simple_scaling_policies + block: + - name: Create simple scaling AutoScale policies. + community.aws.autoscaling_policy: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + state: "present" + name: "{{ item.name }}-{{ item.policy_type }}" + adjustment_type: "{{ item.adjustment_type }}" + asg_name: "{{ aws_ec2_autoscale_cluster.name }}" + scaling_adjustment: "{{ item.adjustment }}" + min_adjustment_step: "{{ item.adjustment_step }}" + cooldown: "{{ item.cooldown }}" + register: _aws_ec2_autoscale_cluster_simple_scaling_policies + with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" + + - name: Fetch simple scaling policies. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_scaling_policies + _aws_ec2_autoscale_cluster_simple_scaling_policies.results }}" + +- name: Handle step scaling AustoScale. + when: + - aws_ec2_autoscale_cluster.asg_scaling_policies|length > 0 + - item.policy_type == 'StepScaling' - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster + block: + - name: Create step scaling AutoScale policies. + community.aws.autoscaling_policy: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + state: "present" + name: "{{ item.name }}-{{ item.policy_type }}" + adjustment_type: "{{ item.adjustment_type }}" + asg_name: "{{ aws_ec2_autoscale_cluster.name }}" + scaling_adjustment: "{{ item.adjustment }}" + min_adjustment_step: "{{ item.adjustment_step }}" + metric_aggregation: "{{ item.metric_aggregation }}" + step_adjustments: "{{ item.step_adjustments }}" + register: _aws_ec2_autoscale_cluster_step_scaling_policies + with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" + + - name: Fetch step scaling policies. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" - name: Create placeholder ARN variables for scaling policies. ansible.builtin.set_fact: @@ -740,7 +736,7 @@ _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases + [item.domain] }}" loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" when: - - aws_ec2_autoscale_cluster.acm.extra_domains | length > 0 + - aws_ec2_autoscale_cluster.acm.extra_domains|length > 0 - aws_ec2_autoscale_cluster.create_elb - aws_ec2_autoscale_cluster.cloudfront.create_distribution @@ -761,7 +757,7 @@ when: - aws_ec2_autoscale_cluster.create_elb - aws_ec2_autoscale_cluster.cloudfront.create_distribution - - _cf_certificate_ARN | length > 1 + - _cf_certificate_ARN|length > 1 # @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. @@ -777,11 +773,4 @@ loop: "{{ _aws_ec2_autoscale_cluster_dns_all_domains }}" when: - aws_ec2_autoscale_cluster.route_53.zone is defined - - aws_ec2_autoscale_cluster.route_53.zone | length > 0 - -#- name: Copy AMI to backup region. -# community.aws.ec2_ami_copy: -# aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" -# source_region: "{{ aws_ec2_autoscale_cluster.region }}" -# region: "{{ aws_backup.copy_vault.region }}" -# source_image_id: "{{ aws_ec2_autoscale_cluster_image_latest.image_id }}" + - aws_ec2_autoscale_cluster.route_53.zone|length > 0 From 26f1e24b7dbad5ccdf8a9fa6397d9f2bd829def0 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 17:45:33 +0200 Subject: [PATCH 43/61] Simplifying ASG role by refactoring into blocks. --- .../aws_ec2_autoscale_cluster/tasks/main.yml | 818 +++++++++--------- 1 file changed, 397 insertions(+), 421 deletions(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index 901199389..af34ada97 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -223,113 +223,98 @@ - aws_ec2_autoscale_cluster.type == "ecs" # EC2 - AMI BUILDING -- name: Add RDS endpoint address to extra vars for AMI building. - ansible.builtin.set_fact: - aws_ec2_autoscale_cluster: - ami_extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default([]) + ['_rds_endpoint: ' + _rds_instance_info.endpoint.address] }}" - when: - - _rds_instance_info.db_instance_identifier is defined - - aws_ec2_autoscale_cluster.rds.rds is defined - - aws_ec2_autoscale_cluster.rds.rds - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Add Aurora RDS endpoint address to extra vars for AMI building. - ansible.builtin.set_fact: - aws_ec2_autoscale_cluster: - ami_extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default([]) + ['_rds_endpoint: ' + _rds_instance_info_aurora.endpoint.address] }}" - when: - - _rds_instance_info_aurora.db_instance_identifier is defined - - aws_ec2_autoscale_cluster.rds.rds is defined - - aws_ec2_autoscale_cluster.rds.rds - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Gather running instances information. - amazon.aws.ec2_instance_info: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - filters: - "tag:Name": "{{ aws_ec2_autoscale_cluster.name }}" - instance-state-name: ["running"] - register: aws_ec2_autoscale_cluster_running_instances - when: - - aws_ec2_autoscale_cluster.asg_refresh or aws_ec2_autoscale_cluster.ami_refresh - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Gather subnet information for temporary EC2 instance if using the 'repack' operation to generate a new AMI. - amazon.aws.ec2_vpc_subnet_info: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - filters: - vpc-id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" - tag:Name: "{{ aws_ec2_autoscale_cluster.ami_subnet_name }}" - register: _aws_ec2_autoscale_ami_subnet - when: - - aws_ec2_autoscale_cluster.ami_refresh and aws_ec2_autoscale_cluster.ami_operation == 'repack' - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Create new AMI. - ansible.builtin.include_role: - name: aws/aws_ami - vars: - aws_ami: - aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - ami_name: "{{ _aws_ec2_autoscale_cluster_unique_name }}" - encrypt_boot: "{{ aws_ec2_autoscale_cluster.encrypt_boot }}" - name_filter: "{{ aws_ec2_autoscale_cluster.packer_name_filter }}" - repack: - root_volume_type: "{{ aws_ec2_autoscale_cluster.root_volume_type }}" - root_volume_size: "{{ aws_ec2_autoscale_cluster.root_volume_size }}" - cluster_name: "{{ aws_ec2_autoscale_cluster.name }}" - iam_role: "{{ aws_ec2_autoscale_cluster.iam_role_name | default(omit) }}" - vpc_id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" - vpc_subnet_id: "{{ _aws_ec2_autoscale_ami_subnet.subnets[0].subnet_id | default(omit) }}" - key_name: "{{ aws_ec2_autoscale_cluster.key_name }}" - ebs_optimized: "{{ aws_ec2_autoscale_cluster.ebs_optimized }}" - device_name: "{{ aws_ec2_autoscale_cluster.device_name }}" - playbook_file: "{{ aws_ec2_autoscale_cluster.ami_playbook_file }}" - on_error: "{{ aws_ec2_autoscale_cluster.packer_on_error }}" - vpc_filter: "{{ aws_ec2_autoscale_cluster.packer_vpc_filter }}" - subnet_filter_az: "{{ aws_ec2_autoscale_cluster.packer_subnet_filter_az }}" - force: "{{ aws_ec2_autoscale_cluster.packer_force }}" - operation: "{% if aws_ec2_autoscale_cluster_running_instances.instances | length > 0 %}{{ aws_ec2_autoscale_cluster.ami_operation }}{% else %}create{% endif %}" - tags: "{{ aws_ec2_autoscale_cluster.tags }}" - extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default(omit) }}" - when: - - aws_ec2_autoscale_cluster.ami_refresh - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -# No register in the previous task because we might not repack the AMI so we need to look it up. -- name: Gather AMI image from name. - amazon.aws.ec2_ami_info: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - owners: self - filters: - name: "{{ aws_ec2_autoscale_cluster.name }}*" - register: aws_ec2_autoscale_cluster_image - when: - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Register latest AMI image. - ansible.builtin.set_fact: - aws_ec2_autoscale_cluster_image_latest: "{{ aws_ec2_autoscale_cluster_image.images | sort(attribute='creation_date') | last }}" - when: - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster +- name: Create an AMI for EC2 clusters. + when: aws_ec2_autoscale_cluster.type == "ec2" + block: + - name: Add RDS endpoint address to extra vars for AMI building. + ansible.builtin.set_fact: + aws_ec2_autoscale_cluster: + ami_extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default([]) + ['_rds_endpoint: ' + _rds_instance_info.endpoint.address] }}" + when: + - _rds_instance_info.db_instance_identifier is defined + - aws_ec2_autoscale_cluster.rds.rds is defined + - aws_ec2_autoscale_cluster.rds.rds + + - name: Add Aurora RDS endpoint address to extra vars for AMI building. + ansible.builtin.set_fact: + aws_ec2_autoscale_cluster: + ami_extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default([]) + ['_rds_endpoint: ' + _rds_instance_info_aurora.endpoint.address] }}" + when: + - _rds_instance_info_aurora.db_instance_identifier is defined + - aws_ec2_autoscale_cluster.rds.rds is defined + - aws_ec2_autoscale_cluster.rds.rds + + - name: Gather running instances information. + amazon.aws.ec2_instance_info: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + filters: + "tag:Name": "{{ aws_ec2_autoscale_cluster.name }}" + instance-state-name: ["running"] + register: aws_ec2_autoscale_cluster_running_instances + when: aws_ec2_autoscale_cluster.asg_refresh or aws_ec2_autoscale_cluster.ami_refresh + + - name: Gather subnet information for temporary EC2 instance if using the 'repack' operation to generate a new AMI. + amazon.aws.ec2_vpc_subnet_info: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + filters: + vpc-id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" + tag:Name: "{{ aws_ec2_autoscale_cluster.ami_subnet_name }}" + register: _aws_ec2_autoscale_ami_subnet + when: aws_ec2_autoscale_cluster.ami_refresh and aws_ec2_autoscale_cluster.ami_operation == 'repack' + + - name: Create new AMI. + ansible.builtin.include_role: + name: aws/aws_ami + vars: + aws_ami: + aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + ami_name: "{{ _aws_ec2_autoscale_cluster_unique_name }}" + encrypt_boot: "{{ aws_ec2_autoscale_cluster.encrypt_boot }}" + name_filter: "{{ aws_ec2_autoscale_cluster.packer_name_filter }}" + repack: + root_volume_type: "{{ aws_ec2_autoscale_cluster.root_volume_type }}" + root_volume_size: "{{ aws_ec2_autoscale_cluster.root_volume_size }}" + cluster_name: "{{ aws_ec2_autoscale_cluster.name }}" + iam_role: "{{ aws_ec2_autoscale_cluster.iam_role_name | default(omit) }}" + vpc_id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" + vpc_subnet_id: "{{ _aws_ec2_autoscale_ami_subnet.subnets[0].subnet_id | default(omit) }}" + key_name: "{{ aws_ec2_autoscale_cluster.key_name }}" + ebs_optimized: "{{ aws_ec2_autoscale_cluster.ebs_optimized }}" + device_name: "{{ aws_ec2_autoscale_cluster.device_name }}" + playbook_file: "{{ aws_ec2_autoscale_cluster.ami_playbook_file }}" + on_error: "{{ aws_ec2_autoscale_cluster.packer_on_error }}" + vpc_filter: "{{ aws_ec2_autoscale_cluster.packer_vpc_filter }}" + subnet_filter_az: "{{ aws_ec2_autoscale_cluster.packer_subnet_filter_az }}" + force: "{{ aws_ec2_autoscale_cluster.packer_force }}" + operation: "{% if aws_ec2_autoscale_cluster_running_instances.instances | length > 0 %}{{ aws_ec2_autoscale_cluster.ami_operation }}{% else %}create{% endif %}" + tags: "{{ aws_ec2_autoscale_cluster.tags }}" + extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default(omit) }}" + when: + - aws_ec2_autoscale_cluster.ami_refresh + - aws_ec2_autoscale_cluster.deploy_cluster + + # No register in the previous task because we might not repack the AMI so we need to look it up. + - name: Gather AMI image from name. + amazon.aws.ec2_ami_info: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + owners: self + filters: + name: "{{ aws_ec2_autoscale_cluster.name }}*" + register: aws_ec2_autoscale_cluster_image + when: aws_ec2_autoscale_cluster.deploy_cluster -- name: Create ami cleanup function. - ansible.builtin.include_role: - name: aws/aws_ami_asg_cleanup + - name: Register latest AMI image. + ansible.builtin.set_fact: + aws_ec2_autoscale_cluster_image_latest: "{{ aws_ec2_autoscale_cluster_image.images | sort(attribute='creation_date') | last }}" + when: aws_ec2_autoscale_cluster.deploy_cluster -- name: Gather IAM role info. - amazon.aws.iam_role_info: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - name: "{{ aws_ec2_autoscale_cluster.iam_role_name }}" - register: _aws_ec2_autoscale_cluster_iam_role_info + - name: Create ami cleanup function. + ansible.builtin.include_role: + name: aws/aws_ami_asg_cleanup # LOAD BALANCING - name: "Create a Target group for port {{ aws_ec2_autoscale_cluster.target_group_http_port }}." @@ -361,186 +346,183 @@ when: - aws_ec2_autoscale_cluster.asg_refresh -- name: Define default ALB listeners. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_listeners_http: - Protocol: HTTP - Port: "{{ aws_ec2_autoscale_cluster.alb_http_port }}" - DefaultActions: - - Type: forward - TargetGroupName: "{{ aws_ec2_autoscale_cluster.name }}" - Rules: "{{ aws_ec2_autoscale_cluster.listeners_http.rules }}" - _aws_ec2_autoscale_cluster_listeners_redirect: - Protocol: HTTP - Port: "{{ aws_ec2_autoscale_cluster.alb_http_port }}" - DefaultActions: - - Type: redirect - RedirectConfig: - Protocol: HTTPS - Host: "#{host}" - Query: "#{query}" - Path: "/#{path}" - Port: "{{ aws_ec2_autoscale_cluster.alb_https_port|string }}" - StatusCode: HTTP_301 - _aws_ec2_autoscale_cluster_listeners_https: - Protocol: HTTPS - Port: "{{ aws_ec2_autoscale_cluster.alb_https_port }}" - SslPolicy: "{{ aws_ec2_autoscale_cluster.alb_ssl_policy }}" - Certificates: - - CertificateArn: "{{ _ssl_certificate_ARN }}" - DefaultActions: - - Type: forward - TargetGroupName: "{{ aws_ec2_autoscale_cluster.name }}" - Rules: "{{ aws_ec2_autoscale_cluster.listeners_https.rules }}" +- name: Build an ALB. when: aws_ec2_autoscale_cluster.create_elb + block: + - name: Define default ALB listeners. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_listeners_http: + Protocol: HTTP + Port: "{{ aws_ec2_autoscale_cluster.alb_http_port }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ aws_ec2_autoscale_cluster.name }}" + Rules: "{{ aws_ec2_autoscale_cluster.listeners_http.rules }}" + _aws_ec2_autoscale_cluster_listeners_redirect: + Protocol: HTTP + Port: "{{ aws_ec2_autoscale_cluster.alb_http_port }}" + DefaultActions: + - Type: redirect + RedirectConfig: + Protocol: HTTPS + Host: "#{host}" + Query: "#{query}" + Path: "/#{path}" + Port: "{{ aws_ec2_autoscale_cluster.alb_https_port|string }}" + StatusCode: HTTP_301 + _aws_ec2_autoscale_cluster_listeners_https: + Protocol: HTTPS + Port: "{{ aws_ec2_autoscale_cluster.alb_https_port }}" + SslPolicy: "{{ aws_ec2_autoscale_cluster.alb_ssl_policy }}" + Certificates: + - CertificateArn: "{{ _ssl_certificate_ARN }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ aws_ec2_autoscale_cluster.name }}" + Rules: "{{ aws_ec2_autoscale_cluster.listeners_https.rules }}" + + # @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. + + - name: Add HTTP listeners. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_listeners: "{{ [_aws_ec2_autoscale_cluster_listeners_http] }}" + when: + - _ssl_certificate_ARN|length < 1 -# @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. - -- name: Add HTTP listeners. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_listeners: "{{ [_aws_ec2_autoscale_cluster_listeners_http] }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - _ssl_certificate_ARN | length < 1 - -- name: Add HTTPS Listener. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_listeners: "{{ [_aws_ec2_autoscale_cluster_listeners_redirect, _aws_ec2_autoscale_cluster_listeners_https] }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - _ssl_certificate_ARN | length > 1 - -- name: Add custom Listeners. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_listeners: "{{ _aws_ec2_autoscale_cluster_listeners + aws_ec2_autoscale_cluster.listeners }}" - when: - - aws_ec2_autoscale_cluster is defined - - aws_ec2_autoscale_cluster | length > 0 - - aws_ec2_autoscale_cluster.create_elb - -- name: Generate security group information for the ALB. - ansible.builtin.include_role: - name: aws/aws_security_groups - vars: - aws_security_groups: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - group_names: "{{ aws_ec2_autoscale_cluster.alb_security_groups }}" - return_type: ids - when: - - aws_ec2_autoscale_cluster.alb_security_groups | length > 0 - - aws_ec2_autoscale_cluster.create_elb - -- name: Create the ALB. - amazon.aws.elb_application_lb: - name: "{{ aws_ec2_autoscale_cluster.name }}" - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - state: "{{ aws_ec2_autoscale_cluster.state }}" - tags: "{{ aws_ec2_autoscale_cluster.tags }}" - subnets: "{{ _aws_ec2_autoscale_cluster_public_subnets_ids }}" - security_groups: "{{ _aws_security_group_list + [_aws_ec2_autoscale_cluster_security_group.group_id] }}" - listeners: "{{ _aws_ec2_autoscale_cluster_listeners }}" - idle_timeout: "{{ aws_ec2_autoscale_cluster.alb_idle_timeout }}" - register: _aws_ec2_autoscale_cluster_alb - when: aws_ec2_autoscale_cluster.create_elb + - name: Add HTTPS Listener. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_listeners: "{{ [_aws_ec2_autoscale_cluster_listeners_redirect, _aws_ec2_autoscale_cluster_listeners_https] }}" + when: + - _ssl_certificate_ARN|length > 1 -- name: "Get ALB listener ARN for port {{ aws_ec2_autoscale_cluster.alb_https_port }}." - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_alb_listener_ARN: "{{ item.listener_arn }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - item.port == aws_ec2_autoscale_cluster.alb_https_port - - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs - - _ssl_certificate_ARN | length > 1 - with_items: "{{ _aws_ec2_autoscale_cluster_alb.listeners }}" - -- name: Add extra SSL certificates to the ALB. - ansible.builtin.command: - cmd: "aws elbv2 add-listener-certificates --region {{ aws_ec2_autoscale_cluster.region }} --profile {{ aws_ec2_autoscale_cluster.aws_profile }} --listener-arn {{ _aws_ec2_autoscale_cluster_alb_listener_ARN }} --certificates CertificateArn={{ item }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs|length > 0 - - _ssl_certificate_ARN|length > 1 - with_items: "{{ aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs }}" + - name: Add custom Listeners. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_listeners: "{{ _aws_ec2_autoscale_cluster_listeners + aws_ec2_autoscale_cluster.listeners }}" + when: + - aws_ec2_autoscale_cluster is defined + - aws_ec2_autoscale_cluster|length > 0 + + - name: Generate security group information for the ALB. + ansible.builtin.include_role: + name: aws/aws_security_groups + vars: + aws_security_groups: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + group_names: "{{ aws_ec2_autoscale_cluster.alb_security_groups }}" + return_type: ids + when: + - aws_ec2_autoscale_cluster.alb_security_groups|length > 0 + + - name: Create the ALB. + amazon.aws.elb_application_lb: + name: "{{ aws_ec2_autoscale_cluster.name }}" + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + state: "{{ aws_ec2_autoscale_cluster.state }}" + tags: "{{ aws_ec2_autoscale_cluster.tags }}" + subnets: "{{ _aws_ec2_autoscale_cluster_public_subnets_ids }}" + security_groups: "{{ _aws_security_group_list + [_aws_ec2_autoscale_cluster_security_group.group_id] }}" + listeners: "{{ _aws_ec2_autoscale_cluster_listeners }}" + idle_timeout: "{{ aws_ec2_autoscale_cluster.alb_idle_timeout }}" + register: _aws_ec2_autoscale_cluster_alb + + - name: "Get ALB listener ARN for port {{ aws_ec2_autoscale_cluster.alb_https_port }}." + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_alb_listener_ARN: "{{ item.listener_arn }}" + when: + - item.port == aws_ec2_autoscale_cluster.alb_https_port + - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs|length > 0 + - _ssl_certificate_ARN|length > 1 + with_items: "{{ _aws_ec2_autoscale_cluster_alb.listeners }}" + + - name: Add extra SSL certificates to the ALB. + ansible.builtin.command: + cmd: "aws elbv2 add-listener-certificates --region {{ aws_ec2_autoscale_cluster.region }} --profile {{ aws_ec2_autoscale_cluster.aws_profile }} --listener-arn {{ _aws_ec2_autoscale_cluster_alb_listener_ARN }} --certificates CertificateArn={{ item }}" + when: + - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs|length > 0 + - _ssl_certificate_ARN|length > 1 + with_items: "{{ aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs }}" # EC2 - BUILD ASG -- name: Generate security group information for the ASG. - ansible.builtin.include_role: - name: aws/aws_security_groups - vars: - aws_security_groups: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - group_names: "{{ aws_ec2_autoscale_cluster.cluster_security_groups }}" - return_type: ids - when: - - aws_ec2_autoscale_cluster.cluster_security_groups|length > 0 - - aws_ec2_autoscale_cluster.asg_refresh - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Create launch template. - amazon.aws.ec2_launch_template: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - name: "{{ aws_ec2_autoscale_cluster.name }}" - image_id: "{{ aws_ec2_autoscale_cluster.image_id if aws_ec2_autoscale_cluster.image_id is defined else aws_ec2_autoscale_cluster_image_latest.image_id }}" - key_name: "{{ aws_ec2_autoscale_cluster.key_name }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - state: "{{ aws_ec2_autoscale_cluster.state }}" - instance_type: "{{ aws_ec2_autoscale_cluster.instance_type }}" - iam_instance_profile: "{{ _aws_ec2_autoscale_cluster_iam_role_info.iam_roles[0].instance_profiles[0].arn }}" - disable_api_termination: "{{ aws_ec2_autoscale_cluster.instance_disable_api_termination }}" - ebs_optimized: "{{ aws_ec2_autoscale_cluster.ebs_optimized }}" - network_interfaces: - - associate_public_ip_address: "{{ aws_ec2_autoscale_cluster.assign_public_ip }}" - delete_on_termination: "{{ aws_ec2_autoscale_cluster.instance_nic_delete_on_termination }}" - subnet_id: "{{ subnet_id }}" # picked randomly from _aws_ec2_autoscale_cluster_subnets_ids, see with_random_choice - device_index: 0 # must be 0 - see https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html#change-network-interface - groups: "{{ _aws_security_group_list + [_aws_ec2_autoscale_cluster_security_group.group_id] }}" - block_device_mappings: - - ebs: - delete_on_termination: "{{ aws_ec2_autoscale_cluster.root_volume_delete_on_termination }}" - encrypted: "{{ aws_ec2_autoscale_cluster.encrypt_boot }}" - volume_size: "{{ aws_ec2_autoscale_cluster.root_volume_size }}" - volume_type: "{{ aws_ec2_autoscale_cluster.root_volume_type }}" - device_name: "{{ aws_ec2_autoscale_cluster.device_name }}" - credit_specification: "{{ aws_ec2_autoscale_cluster.instance_credit_specification | default(omit) }}" - with_random_choice: "{{ _aws_ec2_autoscale_cluster_subnets_ids }}" - loop_control: - loop_var: subnet_id +- name: Build the ASG. when: - - aws_ec2_autoscale_cluster.asg_refresh - - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Create AutoScale group and spin up new instances. - amazon.aws.autoscaling_group: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - name: "{{ aws_ec2_autoscale_cluster.name }}" - state: "{{ aws_ec2_autoscale_cluster.state }}" - launch_template: - launch_template_name: "{{ aws_ec2_autoscale_cluster.name }}" - health_check_type: "{% if aws_ec2_autoscale_cluster_running_instances.instances | length > 0 %}{{ aws_ec2_autoscale_cluster.alb_health_check_type }}{% else %}EC2{% endif %}" - health_check_period: "{{ aws_ec2_autoscale_cluster.alb_health_check_period | default(omit) }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - replace_all_instances: true - replace_batch_size: "{{ aws_ec2_autoscale_cluster.min_size if aws_ec2_autoscale_cluster.desired_capacity == 0 else aws_ec2_autoscale_cluster.desired_capacity }}" - wait_for_instances: true - lt_check: true - wait_timeout: 3000 - desired_capacity: "{{ aws_ec2_autoscale_cluster.min_size if aws_ec2_autoscale_cluster.desired_capacity == 0 else aws_ec2_autoscale_cluster.desired_capacity }}" - min_size: "{{ aws_ec2_autoscale_cluster.min_size }}" - max_size: "{{ aws_ec2_autoscale_cluster.max_size }}" - tags: "{{ aws_ec2_autoscale_cluster.tags | simpledict2list }}" - vpc_zone_identifier: "{{ _aws_ec2_autoscale_cluster_subnets_ids }}" - target_group_arns: - - "{{ _aws_ec2_target_group_created.target_group_arn }}" - register: _aws_ec2_asg_created - when: - aws_ec2_autoscale_cluster.asg_refresh - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster + block: + - name: Gather IAM role info. + amazon.aws.iam_role_info: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + name: "{{ aws_ec2_autoscale_cluster.iam_role_name }}" + register: _aws_ec2_autoscale_cluster_iam_role_info + + - name: Generate security group information for the ASG. + ansible.builtin.include_role: + name: aws/aws_security_groups + vars: + aws_security_groups: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + group_names: "{{ aws_ec2_autoscale_cluster.cluster_security_groups }}" + return_type: ids + when: + - aws_ec2_autoscale_cluster.cluster_security_groups|length > 0 + + - name: Create launch template. + amazon.aws.ec2_launch_template: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + name: "{{ aws_ec2_autoscale_cluster.name }}" + image_id: "{{ aws_ec2_autoscale_cluster.image_id if aws_ec2_autoscale_cluster.image_id is defined else aws_ec2_autoscale_cluster_image_latest.image_id }}" + key_name: "{{ aws_ec2_autoscale_cluster.key_name }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + state: "{{ aws_ec2_autoscale_cluster.state }}" + instance_type: "{{ aws_ec2_autoscale_cluster.instance_type }}" + iam_instance_profile: "{{ _aws_ec2_autoscale_cluster_iam_role_info.iam_roles[0].instance_profiles[0].arn }}" + disable_api_termination: "{{ aws_ec2_autoscale_cluster.instance_disable_api_termination }}" + ebs_optimized: "{{ aws_ec2_autoscale_cluster.ebs_optimized }}" + network_interfaces: + - associate_public_ip_address: "{{ aws_ec2_autoscale_cluster.assign_public_ip }}" + delete_on_termination: "{{ aws_ec2_autoscale_cluster.instance_nic_delete_on_termination }}" + subnet_id: "{{ subnet_id }}" # picked randomly from _aws_ec2_autoscale_cluster_subnets_ids, see with_random_choice + device_index: 0 # must be 0 - see https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html#change-network-interface + groups: "{{ _aws_security_group_list + [_aws_ec2_autoscale_cluster_security_group.group_id] }}" + block_device_mappings: + - ebs: + delete_on_termination: "{{ aws_ec2_autoscale_cluster.root_volume_delete_on_termination }}" + encrypted: "{{ aws_ec2_autoscale_cluster.encrypt_boot }}" + volume_size: "{{ aws_ec2_autoscale_cluster.root_volume_size }}" + volume_type: "{{ aws_ec2_autoscale_cluster.root_volume_type }}" + device_name: "{{ aws_ec2_autoscale_cluster.device_name }}" + credit_specification: "{{ aws_ec2_autoscale_cluster.instance_credit_specification | default(omit) }}" + with_random_choice: "{{ _aws_ec2_autoscale_cluster_subnets_ids }}" + loop_control: + loop_var: subnet_id + + - name: Create AutoScale group and spin up new instances. + amazon.aws.autoscaling_group: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + name: "{{ aws_ec2_autoscale_cluster.name }}" + state: "{{ aws_ec2_autoscale_cluster.state }}" + launch_template: + launch_template_name: "{{ aws_ec2_autoscale_cluster.name }}" + health_check_type: "{% if aws_ec2_autoscale_cluster_running_instances.instances | length > 0 %}{{ aws_ec2_autoscale_cluster.alb_health_check_type }}{% else %}EC2{% endif %}" + health_check_period: "{{ aws_ec2_autoscale_cluster.alb_health_check_period | default(omit) }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + replace_all_instances: true + replace_batch_size: "{{ aws_ec2_autoscale_cluster.min_size if aws_ec2_autoscale_cluster.desired_capacity == 0 else aws_ec2_autoscale_cluster.desired_capacity }}" + wait_for_instances: true + lt_check: true + wait_timeout: 3000 + desired_capacity: "{{ aws_ec2_autoscale_cluster.min_size if aws_ec2_autoscale_cluster.desired_capacity == 0 else aws_ec2_autoscale_cluster.desired_capacity }}" + min_size: "{{ aws_ec2_autoscale_cluster.min_size }}" + max_size: "{{ aws_ec2_autoscale_cluster.max_size }}" + tags: "{{ aws_ec2_autoscale_cluster.tags | simpledict2list }}" + vpc_zone_identifier: "{{ _aws_ec2_autoscale_cluster_subnets_ids }}" + target_group_arns: + - "{{ _aws_ec2_target_group_created.target_group_arn }}" + register: _aws_ec2_asg_created - name: Handle simple scaling AutoScale. when: @@ -593,124 +575,92 @@ ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" -- name: Create placeholder ARN variables for scaling policies. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_up_policy_ARN: "" - _aws_ec2_autoscale_cluster_scaling_down_policy_ARN: "" - when: - - _aws_ec2_autoscale_cluster_scaling_policies is defined - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -# @todo We should support multiple policies. If this built a list -# then we could potentially loop over it after. -- name: Set scaling up policy ARN. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_up_policy_ARN: "{{ item.arn }}" - loop: "{{ _aws_ec2_autoscale_cluster_scaling_policies }}" - when: - - _aws_ec2_autoscale_cluster_scaling_policies is defined - - item.item.name == aws_ec2_autoscale_cluster.asg_cloudwatch_policy_scale_up_name - - item.arn is defined - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -# @todo As above. -- name: Set scaling down policy ARN. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_down_policy_ARN: "{{ item.arn }}" - loop: "{{ _aws_ec2_autoscale_cluster_scaling_policies }}" +- name: Create scaling policies and alarms. when: - _aws_ec2_autoscale_cluster_scaling_policies is defined - - item.item.name == aws_ec2_autoscale_cluster.asg_cloudwatch_policy_scale_down_name - - item.arn is defined - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Create alarm in CloudWatch for auto scaling up. - ansible.builtin.include_role: - name: aws/aws_ec2_metric_alarm - vars: - aws_ec2_metric_alarm: - aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - name: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarm_scale_up_name }}" - description: "{{ item.description }}" - metric: "{{ item.metric }}" - namespace: "{{ item.namespace }}" - statistic: "{{ item.statistic }}" - comparison: "{{ item.comparison }}" - threshold: "{{ item.threshold }}" - unit: "{{ item.unit }}" - period: "{{ item.period }}" - evaluation_periods: "{{ item.evaluation_periods }}" - alarm_actions: - - "{{ _aws_ec2_autoscale_cluster_scaling_up_policy_ARN }}" - dimensions: - "AutoScalingGroupName": "{{ aws_ec2_autoscale_cluster.name }}" - with_items: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarms }}" - when: - - _aws_ec2_autoscale_cluster_scaling_up_policy_ARN is defined - - item.scale_direction == 'up' - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Create alarm in CloudWatch for auto scaling down. - ansible.builtin.include_role: - name: aws/aws_ec2_metric_alarm - vars: - aws_ec2_metric_alarm: - aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - name: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarm_scale_down_name }}" - description: "{{ item.description }}" - metric: "{{ item.metric }}" - namespace: "{{ item.namespace }}" - statistic: "{{ item.statistic }}" - comparison: "{{ item.comparison }}" - threshold: "{{ item.threshold }}" - unit: "{{ item.unit }}" - period: "{{ item.period }}" - evaluation_periods: "{{ item.evaluation_periods }}" - alarm_actions: - - "{{ _aws_ec2_autoscale_cluster_scaling_down_policy_ARN }}" - dimensions: - "AutoScalingGroupName": "{{ aws_ec2_autoscale_cluster.name }}" - with_items: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarms }}" - when: - - _aws_ec2_autoscale_cluster_scaling_down_policy_ARN is defined - - item.scale_direction == 'down' - aws_ec2_autoscale_cluster.type == "ec2" + block: + - name: Create placeholder ARN variables for scaling policies. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_up_policy_ARN: "" + _aws_ec2_autoscale_cluster_scaling_down_policy_ARN: "" + when: + - aws_ec2_autoscale_cluster.deploy_cluster + + # @todo We should support multiple policies. If this built a list + # then we could potentially loop over it after. + - name: Set scaling up policy ARN. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_up_policy_ARN: "{{ item.arn }}" + loop: "{{ _aws_ec2_autoscale_cluster_scaling_policies }}" + when: + - item.item.name == aws_ec2_autoscale_cluster.asg_cloudwatch_policy_scale_up_name + - item.arn is defined + - aws_ec2_autoscale_cluster.deploy_cluster + + # @todo As above. + - name: Set scaling down policy ARN. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_down_policy_ARN: "{{ item.arn }}" + loop: "{{ _aws_ec2_autoscale_cluster_scaling_policies }}" + when: + - item.item.name == aws_ec2_autoscale_cluster.asg_cloudwatch_policy_scale_down_name + - item.arn is defined + - aws_ec2_autoscale_cluster.deploy_cluster + + - name: Create alarm in CloudWatch for auto scaling up. + ansible.builtin.include_role: + name: aws/aws_ec2_metric_alarm + vars: + aws_ec2_metric_alarm: + aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + name: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarm_scale_up_name }}" + description: "{{ item.description }}" + metric: "{{ item.metric }}" + namespace: "{{ item.namespace }}" + statistic: "{{ item.statistic }}" + comparison: "{{ item.comparison }}" + threshold: "{{ item.threshold }}" + unit: "{{ item.unit }}" + period: "{{ item.period }}" + evaluation_periods: "{{ item.evaluation_periods }}" + alarm_actions: + - "{{ _aws_ec2_autoscale_cluster_scaling_up_policy_ARN }}" + dimensions: + "AutoScalingGroupName": "{{ aws_ec2_autoscale_cluster.name }}" + with_items: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarms }}" + when: + - _aws_ec2_autoscale_cluster_scaling_up_policy_ARN is defined + - item.scale_direction == 'up' + + - name: Create alarm in CloudWatch for auto scaling down. + ansible.builtin.include_role: + name: aws/aws_ec2_metric_alarm + vars: + aws_ec2_metric_alarm: + aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + name: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarm_scale_down_name }}" + description: "{{ item.description }}" + metric: "{{ item.metric }}" + namespace: "{{ item.namespace }}" + statistic: "{{ item.statistic }}" + comparison: "{{ item.comparison }}" + threshold: "{{ item.threshold }}" + unit: "{{ item.unit }}" + period: "{{ item.period }}" + evaluation_periods: "{{ item.evaluation_periods }}" + alarm_actions: + - "{{ _aws_ec2_autoscale_cluster_scaling_down_policy_ARN }}" + dimensions: + "AutoScalingGroupName": "{{ aws_ec2_autoscale_cluster.name }}" + with_items: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarms }}" + when: + - _aws_ec2_autoscale_cluster_scaling_down_policy_ARN is defined + - item.scale_direction == 'down' # CLOUDFRONT -- name: Create SSL certificate for CloudFront. - ansible.builtin.include_role: - name: aws/aws_acm - vars: - aws_acm: - export: false - region: us-east-1 # Certificate must be in us-east-1 for CloudFront. - domain_name: "{{ aws_ec2_autoscale_cluster.route_53.record }}" - extra_domains: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" - route_53: - aws_profile: "{{ aws_ec2_autoscale_cluster.acm.route_53.aws_profile }}" - zone: "{{ aws_ec2_autoscale_cluster.acm.route_53.zone }}" - when: - - aws_ec2_autoscale_cluster.cloudfront.create_cert - - aws_ec2_autoscale_cluster.region != 'us-east-1' - - aws_ec2_autoscale_cluster.cloudfront.create_distribution - -- name: Default to provided CloudFront SSL certificate ARN. - ansible.builtin.set_fact: - _cf_certificate_ARN: "{{ aws_ec2_autoscale_cluster.cloudfront.cf_certificate_ARN }}" - when: aws_ec2_autoscale_cluster.cloudfront.create_distribution - -- name: If provided, override CloudFront SSL certificate ARN with the one received from ACM. - ansible.builtin.set_fact: - _cf_certificate_ARN: "{{ aws_acm_certificate_arn }}" - when: - - aws_ec2_autoscale_cluster.cloudfront.create_cert - - aws_ec2_autoscale_cluster.cloudfront.create_distribution - - name: Initialise the domains loop var with main domain entry DNS settings. ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_dns_all_domains: @@ -724,40 +674,66 @@ loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" when: aws_ec2_autoscale_cluster.acm.extra_domains | length > 0 -- name: Initialise a list of CloudFront aliases with main domain name. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ [_domain_name] }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.cloudfront.create_distribution +- name: Handle CloudFront. + when: aws_ec2_autoscale_cluster.cloudfront.create_distribution + block: + - name: Create SSL certificate for CloudFront. + ansible.builtin.include_role: + name: aws/aws_acm + vars: + aws_acm: + export: false + region: us-east-1 # Certificate must be in us-east-1 for CloudFront. + domain_name: "{{ aws_ec2_autoscale_cluster.route_53.record }}" + extra_domains: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" + route_53: + aws_profile: "{{ aws_ec2_autoscale_cluster.acm.route_53.aws_profile }}" + zone: "{{ aws_ec2_autoscale_cluster.acm.route_53.zone }}" + when: + - aws_ec2_autoscale_cluster.cloudfront.create_cert + - aws_ec2_autoscale_cluster.region != 'us-east-1' + + - name: Default to provided CloudFront SSL certificate ARN. + ansible.builtin.set_fact: + _cf_certificate_ARN: "{{ aws_ec2_autoscale_cluster.cloudfront.cf_certificate_ARN }}" -- name: Add extra_domains so we can set up additional CloudFront aliases. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases + [item.domain] }}" - loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" - when: - - aws_ec2_autoscale_cluster.acm.extra_domains|length > 0 - - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.cloudfront.create_distribution + - name: If provided, override CloudFront SSL certificate ARN with the one received from ACM. + ansible.builtin.set_fact: + _cf_certificate_ARN: "{{ aws_acm_certificate_arn }}" + when: + - aws_ec2_autoscale_cluster.cloudfront.create_cert -- name: Create a CloudFront distribution. - ansible.builtin.include_role: - name: aws/aws_cloudfront_distribution - vars: - aws_cloudfront_distribution: - tags: "{{ aws_ec2_autoscale_cluster.tags | combine({'Name': aws_ec2_autoscale_cluster.name}) }}" - aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases }}" - viewer_certificate: - acm_certificate_arn: "{{ _cf_certificate_ARN }}" - origins: - - domain_name: "{{ _aws_ec2_autoscale_cluster_alb.dns_name }}" - id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" - default_cache_behavior: - target_origin_id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.cloudfront.create_distribution - - _cf_certificate_ARN|length > 1 + - name: Initialise a list of CloudFront aliases with main domain name. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ [_domain_name] }}" + when: + - aws_ec2_autoscale_cluster.create_elb + + - name: Add extra_domains so we can set up additional CloudFront aliases. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases + [item.domain] }}" + loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" + when: + - aws_ec2_autoscale_cluster.acm.extra_domains|length > 0 + - aws_ec2_autoscale_cluster.create_elb + + - name: Create a CloudFront distribution. + ansible.builtin.include_role: + name: aws/aws_cloudfront_distribution + vars: + aws_cloudfront_distribution: + tags: "{{ aws_ec2_autoscale_cluster.tags | combine({'Name': aws_ec2_autoscale_cluster.name}) }}" + aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases }}" + viewer_certificate: + acm_certificate_arn: "{{ _cf_certificate_ARN }}" + origins: + - domain_name: "{{ _aws_ec2_autoscale_cluster_alb.dns_name }}" + id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" + default_cache_behavior: + target_origin_id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" + when: + - aws_ec2_autoscale_cluster.create_elb + - _cf_certificate_ARN|length > 1 # @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. From 05f260071b33cc33de3381fac0031cc357c01f02 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 18:20:54 +0200 Subject: [PATCH 44/61] Further simplifying ASG CloudFront block. --- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index af34ada97..a4115baa7 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -675,7 +675,9 @@ when: aws_ec2_autoscale_cluster.acm.extra_domains | length > 0 - name: Handle CloudFront. - when: aws_ec2_autoscale_cluster.cloudfront.create_distribution + when: + - aws_ec2_autoscale_cluster.cloudfront.create_distribution + - aws_ec2_autoscale_cluster.create_elb block: - name: Create SSL certificate for CloudFront. ansible.builtin.include_role: @@ -706,8 +708,6 @@ - name: Initialise a list of CloudFront aliases with main domain name. ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ [_domain_name] }}" - when: - - aws_ec2_autoscale_cluster.create_elb - name: Add extra_domains so we can set up additional CloudFront aliases. ansible.builtin.set_fact: @@ -715,7 +715,6 @@ loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" when: - aws_ec2_autoscale_cluster.acm.extra_domains|length > 0 - - aws_ec2_autoscale_cluster.create_elb - name: Create a CloudFront distribution. ansible.builtin.include_role: @@ -732,7 +731,6 @@ default_cache_behavior: target_origin_id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" when: - - aws_ec2_autoscale_cluster.create_elb - _cf_certificate_ARN|length > 1 # @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. From 32cfd678475f5a0751497e7eec03b0b94ebc2c90 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 19:01:11 +0200 Subject: [PATCH 45/61] Scaling rules refactor needs work. --- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index a4115baa7..410e9d199 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -527,7 +527,6 @@ - name: Handle simple scaling AutoScale. when: - aws_ec2_autoscale_cluster.asg_scaling_policies|length > 0 - - item.policy_type == 'SimpleScaling' - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster block: @@ -544,18 +543,13 @@ cooldown: "{{ item.cooldown }}" register: _aws_ec2_autoscale_cluster_simple_scaling_policies with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" + when: item.policy_type == 'SimpleScaling' - name: Fetch simple scaling policies. ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_scaling_policies + _aws_ec2_autoscale_cluster_simple_scaling_policies.results }}" + when: _aws_ec2_autoscale_cluster_simple_scaling_policies.results is defined -- name: Handle step scaling AustoScale. - when: - - aws_ec2_autoscale_cluster.asg_scaling_policies|length > 0 - - item.policy_type == 'StepScaling' - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - block: - name: Create step scaling AutoScale policies. community.aws.autoscaling_policy: profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" @@ -570,10 +564,12 @@ step_adjustments: "{{ item.step_adjustments }}" register: _aws_ec2_autoscale_cluster_step_scaling_policies with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" + when: item.policy_type == 'StepScaling' - name: Fetch step scaling policies. ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" + when: _aws_ec2_autoscale_cluster_step_scaling_policies.results is defined - name: Create scaling policies and alarms. when: From 1d1f82086efc9d6b14226a5654bf2b3dbb57acb4 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 19:21:12 +0200 Subject: [PATCH 46/61] Scaling policies list needs to be defined in case it is empty and we try to concatenate. --- roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index 410e9d199..42e1898d4 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -524,12 +524,16 @@ - "{{ _aws_ec2_target_group_created.target_group_arn }}" register: _aws_ec2_asg_created -- name: Handle simple scaling AutoScale. +- name: Handle AutoScale policies and alarms. when: - aws_ec2_autoscale_cluster.asg_scaling_policies|length > 0 - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster block: + - name: Set empty scaling policies fact. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_policies: [] + - name: Create simple scaling AutoScale policies. community.aws.autoscaling_policy: profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" @@ -545,9 +549,9 @@ with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" when: item.policy_type == 'SimpleScaling' - - name: Fetch simple scaling policies. + - name: Add simple scaling policies to scaling policies list. ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_scaling_policies + _aws_ec2_autoscale_cluster_simple_scaling_policies.results }}" + _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_simple_scaling_policies.results }}" when: _aws_ec2_autoscale_cluster_simple_scaling_policies.results is defined - name: Create step scaling AutoScale policies. @@ -566,9 +570,9 @@ with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" when: item.policy_type == 'StepScaling' - - name: Fetch step scaling policies. + - name: Add step scaling policies to scaling policies list. ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" + _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_scaling_policies + _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" when: _aws_ec2_autoscale_cluster_step_scaling_policies.results is defined - name: Create scaling policies and alarms. From 540f6d105b84f411f3275872a930c547a54f6222 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 19:30:14 +0200 Subject: [PATCH 47/61] Enhancing installer to accept an Ansible version and putting Ansible 12 back into GitHub Actions containers. --- install.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/install.sh b/install.sh index c11219cbd..3a3be01d8 100755 --- a/install.sh +++ b/install.sh @@ -18,6 +18,7 @@ usage(){ /usr/bin/echo '--hostname: the server hostname to set (default: depends on system or provider)' /usr/bin/echo '--no-firewall: skip installing iptables with ports 22, 80 and 443 open' /usr/bin/echo '--gitlab: install GitLab CE on this server (default: no, set to desired GitLab address to install, e.g. gitlab.example.com)' + /usr/bin/echo '--ansible-version: pass an Ansible version string such as <12 for less than version 12 (default: latest)' /usr/bin/echo '--letsencrypt: try to create an SSL certificate with LetsEncrypt (requires DNS pointing at this server for provided GitLab URL)' /usr/bin/echo '--aws: enable AWS support' /usr/bin/echo '--docker: script is running in a Docker container' @@ -52,6 +53,10 @@ parse_options(){ shift GITLAB_URL="$1" ;; + "--ansible-version") + shift + ANSIBLE_VERSION="$1" + ;; "--letsencrypt") LE_SUPPORT="yes" ;; @@ -84,7 +89,7 @@ FIREWALL="true" AWS_SUPPORT="false" IS_LOCAL="false" SERVER_HOSTNAME=$(hostname) -ANSIBLE_VERSION="<12" +ANSIBLE_VERSION="" # Parse options. parse_options "$@" From 8593d7583387c9619cefea9d9f7d4b2a034e5438 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 19:38:58 +0200 Subject: [PATCH 48/61] Trying a different approach to defaulting the venv username. --- roles/debian/python_pip_packages/defaults/main.yml | 2 +- roles/debian/python_pip_packages/tasks/main.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/debian/python_pip_packages/defaults/main.yml b/roles/debian/python_pip_packages/defaults/main.yml index 67d6d0120..56a4f77e9 100644 --- a/roles/debian/python_pip_packages/defaults/main.yml +++ b/roles/debian/python_pip_packages/defaults/main.yml @@ -3,7 +3,7 @@ python_pip_packages: # These are usually set in the _init role using _venv_path, _venv_command and _venv_install_username but can be overridden. #venv_path: /path/to/venv #venv_command: /usr/bin/python3.11 -m venv - #install_username: deploy # user to become when creating venv + install_username: "{{ _venv_install_username }}" # _venv_install_username is set in _init packages: [] # - name: pip diff --git a/roles/debian/python_pip_packages/tasks/main.yml b/roles/debian/python_pip_packages/tasks/main.yml index 50c038d25..ad855d12e 100644 --- a/roles/debian/python_pip_packages/tasks/main.yml +++ b/roles/debian/python_pip_packages/tasks/main.yml @@ -12,5 +12,5 @@ path: "{{ python_pip_packages.venv_path | default(_venv_path) }}" state: directory recurse: true - owner: "{{ python_pip_packages.install_username | default(_venv_install_username) }}" - group: "{{ python_pip_packages.install_username | default(_venv_install_username) }}" + owner: "{{ python_pip_packages.install_username }}" + group: "{{ python_pip_packages.install_username }}" From 4448fa6833ab2f8ce0e257de7efcdec1ee3a4dbd Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 20:05:20 +0200 Subject: [PATCH 49/61] Removing default() filter from python_pip_packages role. --- roles/debian/python_pip_packages/defaults/main.yml | 8 ++++---- roles/debian/python_pip_packages/tasks/main.yml | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/roles/debian/python_pip_packages/defaults/main.yml b/roles/debian/python_pip_packages/defaults/main.yml index 56a4f77e9..c2e179208 100644 --- a/roles/debian/python_pip_packages/defaults/main.yml +++ b/roles/debian/python_pip_packages/defaults/main.yml @@ -1,9 +1,9 @@ --- python_pip_packages: - # These are usually set in the _init role using _venv_path, _venv_command and _venv_install_username but can be overridden. - #venv_path: /path/to/venv - #venv_command: /usr/bin/python3.11 -m venv - install_username: "{{ _venv_install_username }}" # _venv_install_username is set in _init + # These are usually set in the _init role but can be overridden here. + venv_path: "{{ _venv_path }}" + venv_command: "{{ _venv_command }}" + install_username: "{{ _venv_install_username }}" packages: [] # - name: pip diff --git a/roles/debian/python_pip_packages/tasks/main.yml b/roles/debian/python_pip_packages/tasks/main.yml index ad855d12e..0bdbcd85b 100644 --- a/roles/debian/python_pip_packages/tasks/main.yml +++ b/roles/debian/python_pip_packages/tasks/main.yml @@ -2,14 +2,14 @@ - name: Install packages. ansible.builtin.pip: name: "{{ item.name }}" - state: "{{ item.state | default(omit) }}" - virtualenv: "{{ python_pip_packages.venv_path | default(_venv_path) }}" - virtualenv_command: "{{ python_pip_packages.venv_command | default(_venv_command) }}" + state: "{{ item.state|default(omit) }}" + virtualenv: "{{ python_pip_packages.venv_path }}" + virtualenv_command: "{{ python_pip_packages.venv_command }}" with_items: "{{ python_pip_packages.packages }}" - name: Ensure venv permissions. ansible.builtin.file: - path: "{{ python_pip_packages.venv_path | default(_venv_path) }}" + path: "{{ python_pip_packages.venv_path }}" state: directory recurse: true owner: "{{ python_pip_packages.install_username }}" From 08ea87e13ea9eab383013d6fa8b24ac46bd08166 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Tue, 30 Sep 2025 20:29:38 +0200 Subject: [PATCH 50/61] Fixing up the ce_ansible role for Ansible 12. --- roles/debian/ansible/defaults/main.yml | 16 ++++++++-------- roles/debian/ansible/tasks/main.yml | 8 +------- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/roles/debian/ansible/defaults/main.yml b/roles/debian/ansible/defaults/main.yml index 47707d7d0..bacce1d11 100644 --- a/roles/debian/ansible/defaults/main.yml +++ b/roles/debian/ansible/defaults/main.yml @@ -1,14 +1,14 @@ --- ce_ansible: - # These are usually set in the _init role using _venv_path, _venv_command and _venv_install_username but can be overridden. - #venv_path: "/home/{{ ce_provision.username }}/ansible" - #venv_command: /usr/bin/python3.11 -m venv - #venv_install_username: ansible # user to become when creating venv - ansible_version: "<12.0" # also check install.sh script in the repo root and set the version there accordingly. + # These are usually set in the _init role but can be overridden here. + venv_path: "{{ _venv_path }}" + venv_command: "{{ _venv_command }}" + venv_install_username: "{{ _venv_install_username }}" + ansible_version: "" # if used with the install.sh script in the repo root, version strings should match upgrade: - enabled: false # create systemd timer to auto-upgrade Ansible. Temporary disabled due to ansible 2.19 breaking changes. + enabled: false # create systemd timer to auto-upgrade Ansible. Temporary disabled due to ansible 2.19 breaking changes command: "{{ _venv_path }}/bin/python3 -m pip install --upgrade ansible" # if you set venv_path above then set it here too - on_calendar: "*-*-* 01:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events + on_calendar: "*-*-* 01:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events #timer_name: upgrade_ansible linters: - enabled: true # will not install linters if false, installing linters breaks cloud-init + enabled: true # will not install linters if false, installing linters breaks cloud-init diff --git a/roles/debian/ansible/tasks/main.yml b/roles/debian/ansible/tasks/main.yml index cdf6d0862..146c62e8d 100644 --- a/roles/debian/ansible/tasks/main.yml +++ b/roles/debian/ansible/tasks/main.yml @@ -21,20 +21,14 @@ - name: Override Python venv path if provided. ansible.builtin.set_fact: _venv_path: "{{ ce_ansible.venv_path }}" - when: - - ce_ansible.venv_path is defined - name: Override Python venv command if provided. ansible.builtin.set_fact: _venv_command: "{{ ce_ansible.venv_command }}" - when: - - ce_ansible.venv_command is defined - name: Override Python user if provided. ansible.builtin.set_fact: _venv_install_username: "{{ ce_ansible.venv_install_username }}" - when: - - ce_ansible.venv_install_username is defined - name: Set up Python packages. ansible.builtin.include_role: @@ -75,7 +69,7 @@ - name: Add the venv to $PATH using profile.d. ansible.builtin.copy: - content: "export PATH=$PATH:{{ ce_ansible.venv_path | default(_venv_path) }}/bin" + content: "export PATH=$PATH:{{ ce_ansible.venv_path }}/bin" dest: "/etc/profile.d/ansible-path.sh" mode: '0644' From 3a2b4b35407cf147fc258ca21b2891f1366d0602 Mon Sep 17 00:00:00 2001 From: drazenCE <140631110+drazenCE@users.noreply.github.com> Date: Wed, 1 Oct 2025 08:35:54 +0200 Subject: [PATCH 51/61] Fixing-varnish-pinning (#2710) --- roles/debian/varnish_config/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/debian/varnish_config/tasks/main.yml b/roles/debian/varnish_config/tasks/main.yml index 1a8f6cd3d..35ae0342a 100644 --- a/roles/debian/varnish_config/tasks/main.yml +++ b/roles/debian/varnish_config/tasks/main.yml @@ -24,7 +24,7 @@ - name: Varnish pin packagecloud.io. ansible.builtin.template: src: varnish.preferences.j2 - dest: /etc/apt/preferences.d + dest: /etc/apt/preferences.d/varnish mode: '0644' - name: Apt update to apply changes. From 01623ebb5894f87474db67a5f520b98dfe4a837e Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 11:52:15 +0200 Subject: [PATCH 52/61] Removing unnecessary from_json filter from CloudFront acc ID lookup. --- roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml b/roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml index a91c48477..f87ec0f7c 100644 --- a/roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml +++ b/roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml @@ -7,7 +7,7 @@ - name: Setting previous command output into variable. ansible.builtin.set_fact: - _acc_id: "{{ _acc_id.stdout | from_json }}" + _acc_id: "{{ _acc_id.stdout }}" - name: Get CloudFront info. ansible.builtin.shell: "aws cloudfront get-distribution-config --id {{ _aws_cloudfront_distribution.id }} --output json > /tmp/dist-config.json" From 6657bcca6081661aaf5417e490591120d64225b3 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 12:30:42 +0200 Subject: [PATCH 53/61] Bug fixes pr 2.x (#2698) * Fixing installer variable bug. * Fixing tests for external PRs. * Testing with a fork. * Adding repo owner's username into installer string. * Refactoring config repo detection to simplify. * No longer permitted to use an integer as a truthy value. * No longer permitted to use existence check as a truthy value. * Can't see a reason why linotp var shouldn't be a boolean. * No longer permitted to use existence check as a truthy value. * Fixing truthy errors in ce_deploy role. * No longer permitted to use an integer as a truthy value. * Updating clamav command to use flock avoiding duplicate processes running. * More truthy length fixes. * Fixing more LDAP role truthy issues. * Slight block refactor for LDAP. * DN length check should not be negated. * Forgot to add the length filter. * Another boolean Ansible 12 error in AMI role. * ALB port must be cast as a string for RedirectAction. * Setting the correct Jinja filter, it's string, not str. * Fixing more Ansible 12 length issues in autoscale role. * Simplifying ASG role by refactoring into blocks. * Further simplifying ASG CloudFront block. * Scaling rules refactor needs work. * Scaling policies list needs to be defined in case it is empty and we try to concatenate. * Enhancing installer to accept an Ansible version and putting Ansible 12 back into GitHub Actions containers. * Trying a different approach to defaulting the venv username. * Removing default() filter from python_pip_packages role. * Fixing up the ce_ansible role for Ansible 12. * Removing unnecessary from_json filter from CloudFront acc ID lookup. --- install.sh | 7 +- roles/_overrides/tasks/main.yml | 4 +- roles/aws/aws_ami/tasks/main.yml | 2 +- roles/aws/aws_ami/templates/packer.json.j2 | 6 +- .../tasks/add_cf_function.yml | 2 +- .../aws_ec2_autoscale_cluster/tasks/main.yml | 937 +++++++++--------- roles/aws/aws_elb/tasks/main.yml | 2 +- .../templates/simplesamlphp_sp.j2 | 2 +- roles/debian/ansible/defaults/main.yml | 16 +- roles/debian/ansible/tasks/main.yml | 8 +- .../apache/templates/cloudwatch-main.json.j2 | 6 +- .../apache/templates/cloudwatch-vhost.json.j2 | 10 +- .../templates/config.json.j2 | 14 +- .../templates/include-exclude-filelist.j2 | 2 +- .../nginx/templates/cloudwatch-main.json.j2 | 4 +- .../nginx/templates/cloudwatch-vhost.json.j2 | 8 +- .../templates/headless-openvpn-install.sh.j2 | 4 +- roles/debian/pam_ldap/tasks/main.yml | 56 +- .../cloudwatch-php-fpm-fixedport.json.j2 | 8 +- .../templates/cloudwatch-php-fpm.json.j2 | 8 +- roles/debian/postfix/templates/transport.j2 | 2 +- .../python_pip_packages/defaults/main.yml | 8 +- .../debian/python_pip_packages/tasks/main.yml | 12 +- .../ssh_server/templates/sshd_config.j2 | 4 +- 24 files changed, 546 insertions(+), 586 deletions(-) diff --git a/install.sh b/install.sh index c11219cbd..3a3be01d8 100755 --- a/install.sh +++ b/install.sh @@ -18,6 +18,7 @@ usage(){ /usr/bin/echo '--hostname: the server hostname to set (default: depends on system or provider)' /usr/bin/echo '--no-firewall: skip installing iptables with ports 22, 80 and 443 open' /usr/bin/echo '--gitlab: install GitLab CE on this server (default: no, set to desired GitLab address to install, e.g. gitlab.example.com)' + /usr/bin/echo '--ansible-version: pass an Ansible version string such as <12 for less than version 12 (default: latest)' /usr/bin/echo '--letsencrypt: try to create an SSL certificate with LetsEncrypt (requires DNS pointing at this server for provided GitLab URL)' /usr/bin/echo '--aws: enable AWS support' /usr/bin/echo '--docker: script is running in a Docker container' @@ -52,6 +53,10 @@ parse_options(){ shift GITLAB_URL="$1" ;; + "--ansible-version") + shift + ANSIBLE_VERSION="$1" + ;; "--letsencrypt") LE_SUPPORT="yes" ;; @@ -84,7 +89,7 @@ FIREWALL="true" AWS_SUPPORT="false" IS_LOCAL="false" SERVER_HOSTNAME=$(hostname) -ANSIBLE_VERSION="<12" +ANSIBLE_VERSION="" # Parse options. parse_options "$@" diff --git a/roles/_overrides/tasks/main.yml b/roles/_overrides/tasks/main.yml index 18365b2e6..3fcfdd4eb 100644 --- a/roles/_overrides/tasks/main.yml +++ b/roles/_overrides/tasks/main.yml @@ -6,7 +6,7 @@ loop_var: override_file when: - _overrides.files is defined - - _overrides.files | length + - _overrides.files|length > 0 - name: Generate links overrides. ansible.builtin.include_tasks: link.yml @@ -15,4 +15,4 @@ loop_var: override_link when: - _overrides.links is defined - - _overrides.links | length + - _overrides.links|length > 0 diff --git a/roles/aws/aws_ami/tasks/main.yml b/roles/aws/aws_ami/tasks/main.yml index 2973ee816..1ce621463 100644 --- a/roles/aws/aws_ami/tasks/main.yml +++ b/roles/aws/aws_ami/tasks/main.yml @@ -17,7 +17,7 @@ ami_base_image_latest: "{{ ami_base_image.images | sort(attribute='creation_date') | last }}" when: - ami_base_image.images is defined - - ami_base_image.images + - ami_base_image.images|length > 0 - name: Delete existing image. ansible.builtin.include_tasks: delete.yml diff --git a/roles/aws/aws_ami/templates/packer.json.j2 b/roles/aws/aws_ami/templates/packer.json.j2 index faa3074a1..0a27cdbc7 100755 --- a/roles/aws/aws_ami/templates/packer.json.j2 +++ b/roles/aws/aws_ami/templates/packer.json.j2 @@ -31,7 +31,7 @@ "owners": ["{{ aws_ami.owner }}"], "most_recent": true }, - {% if aws_ami.vpc_filter is defined and aws_ami.vpc_filter | length > 0 %} + {% if aws_ami.vpc_filter is defined and aws_ami.vpc_filter|length > 0 %} "vpc_filter": { "filters": { "tag:Name": "{{ aws_ami.vpc_filter }}" @@ -53,7 +53,7 @@ "playbook_file": "{{ aws_ami.playbook_file }}", "inventory_directory": "{{ _ce_provision_base_dir }}/hosts", "ssh_authorized_key_file": "/home/{{ user_provision.username }}/.ssh/{{ aws_ami.public_key_name }}", - {% if aws_ami.groups is defined and aws_ami.groups | length %} + {% if aws_ami.groups is defined and aws_ami.groups|length > 0 %} "groups": {{ aws_ami.groups | to_json }}, {% endif %} "ansible_env_vars": @@ -68,7 +68,7 @@ {% if ansible_verbosity >= 1 %} "-vvvv", {% endif %} - {% if _aws_ami_extra_vars is defined and _aws_ami_extra_vars | length %} + {% if _aws_ami_extra_vars is defined and _aws_ami_extra_vars|length > 0 %} "--extra-vars", "{{ _aws_ami_extra_vars }}", {% endif %} diff --git a/roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml b/roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml index a91c48477..f87ec0f7c 100644 --- a/roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml +++ b/roles/aws/aws_cloudfront_distribution/tasks/add_cf_function.yml @@ -7,7 +7,7 @@ - name: Setting previous command output into variable. ansible.builtin.set_fact: - _acc_id: "{{ _acc_id.stdout | from_json }}" + _acc_id: "{{ _acc_id.stdout }}" - name: Get CloudFront info. ansible.builtin.shell: "aws cloudfront get-distribution-config --id {{ _aws_cloudfront_distribution.id }} --output json > /tmp/dist-config.json" diff --git a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml index 2bb19d861..42e1898d4 100644 --- a/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml +++ b/roles/aws/aws_ec2_autoscale_cluster/tasks/main.yml @@ -27,7 +27,7 @@ - name: Use provided VPC id. ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_vpc_id: "{{ aws_ec2_autoscale_cluster.vpc_id }}" - when: aws_ec2_autoscale_cluster.vpc_name is not defined or (aws_ec2_autoscale_cluster.vpc_name | length) == 0 + when: (aws_ec2_autoscale_cluster.vpc_name is not defined) or (aws_ec2_autoscale_cluster.vpc_name|length == 0) - name: Create matching Security Group. ansible.builtin.include_role: @@ -223,113 +223,98 @@ - aws_ec2_autoscale_cluster.type == "ecs" # EC2 - AMI BUILDING -- name: Add RDS endpoint address to extra vars for AMI building. - ansible.builtin.set_fact: - aws_ec2_autoscale_cluster: - ami_extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default([]) + ['_rds_endpoint: ' + _rds_instance_info.endpoint.address] }}" - when: - - _rds_instance_info.db_instance_identifier is defined - - aws_ec2_autoscale_cluster.rds.rds is defined - - aws_ec2_autoscale_cluster.rds.rds - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Add Aurora RDS endpoint address to extra vars for AMI building. - ansible.builtin.set_fact: - aws_ec2_autoscale_cluster: - ami_extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default([]) + ['_rds_endpoint: ' + _rds_instance_info_aurora.endpoint.address] }}" - when: - - _rds_instance_info_aurora.db_instance_identifier is defined - - aws_ec2_autoscale_cluster.rds.rds is defined - - aws_ec2_autoscale_cluster.rds.rds - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Gather running instances information. - amazon.aws.ec2_instance_info: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - filters: - "tag:Name": "{{ aws_ec2_autoscale_cluster.name }}" - instance-state-name: ["running"] - register: aws_ec2_autoscale_cluster_running_instances - when: - - aws_ec2_autoscale_cluster.asg_refresh or aws_ec2_autoscale_cluster.ami_refresh - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Gather subnet information for temporary EC2 instance if using the 'repack' operation to generate a new AMI. - amazon.aws.ec2_vpc_subnet_info: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - filters: - vpc-id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" - tag:Name: "{{ aws_ec2_autoscale_cluster.ami_subnet_name }}" - register: _aws_ec2_autoscale_ami_subnet - when: - - aws_ec2_autoscale_cluster.ami_refresh and aws_ec2_autoscale_cluster.ami_operation == 'repack' - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Create new AMI. - ansible.builtin.include_role: - name: aws/aws_ami - vars: - aws_ami: - aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - ami_name: "{{ _aws_ec2_autoscale_cluster_unique_name }}" - encrypt_boot: "{{ aws_ec2_autoscale_cluster.encrypt_boot }}" - name_filter: "{{ aws_ec2_autoscale_cluster.packer_name_filter }}" - repack: - root_volume_type: "{{ aws_ec2_autoscale_cluster.root_volume_type }}" - root_volume_size: "{{ aws_ec2_autoscale_cluster.root_volume_size }}" - cluster_name: "{{ aws_ec2_autoscale_cluster.name }}" - iam_role: "{{ aws_ec2_autoscale_cluster.iam_role_name | default(omit) }}" - vpc_id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" - vpc_subnet_id: "{{ _aws_ec2_autoscale_ami_subnet.subnets[0].subnet_id | default(omit) }}" - key_name: "{{ aws_ec2_autoscale_cluster.key_name }}" - ebs_optimized: "{{ aws_ec2_autoscale_cluster.ebs_optimized }}" - device_name: "{{ aws_ec2_autoscale_cluster.device_name }}" - playbook_file: "{{ aws_ec2_autoscale_cluster.ami_playbook_file }}" - on_error: "{{ aws_ec2_autoscale_cluster.packer_on_error }}" - vpc_filter: "{{ aws_ec2_autoscale_cluster.packer_vpc_filter }}" - subnet_filter_az: "{{ aws_ec2_autoscale_cluster.packer_subnet_filter_az }}" - force: "{{ aws_ec2_autoscale_cluster.packer_force }}" - operation: "{% if aws_ec2_autoscale_cluster_running_instances.instances | length > 0 %}{{ aws_ec2_autoscale_cluster.ami_operation }}{% else %}create{% endif %}" - tags: "{{ aws_ec2_autoscale_cluster.tags }}" - extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default(omit) }}" - when: - - aws_ec2_autoscale_cluster.ami_refresh - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -# No register in the previous task because we might not repack the AMI so we need to look it up. -- name: Gather AMI image from name. - amazon.aws.ec2_ami_info: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - owners: self - filters: - name: "{{ aws_ec2_autoscale_cluster.name }}*" - register: aws_ec2_autoscale_cluster_image - when: - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Register latest AMI image. - ansible.builtin.set_fact: - aws_ec2_autoscale_cluster_image_latest: "{{ aws_ec2_autoscale_cluster_image.images | sort(attribute='creation_date') | last }}" - when: - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Create ami cleanup function. - ansible.builtin.include_role: - name: aws/aws_ami_asg_cleanup - -- name: Gather IAM role info. - amazon.aws.iam_role_info: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - name: "{{ aws_ec2_autoscale_cluster.iam_role_name }}" - register: _aws_ec2_autoscale_cluster_iam_role_info +- name: Create an AMI for EC2 clusters. + when: aws_ec2_autoscale_cluster.type == "ec2" + block: + - name: Add RDS endpoint address to extra vars for AMI building. + ansible.builtin.set_fact: + aws_ec2_autoscale_cluster: + ami_extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default([]) + ['_rds_endpoint: ' + _rds_instance_info.endpoint.address] }}" + when: + - _rds_instance_info.db_instance_identifier is defined + - aws_ec2_autoscale_cluster.rds.rds is defined + - aws_ec2_autoscale_cluster.rds.rds + + - name: Add Aurora RDS endpoint address to extra vars for AMI building. + ansible.builtin.set_fact: + aws_ec2_autoscale_cluster: + ami_extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default([]) + ['_rds_endpoint: ' + _rds_instance_info_aurora.endpoint.address] }}" + when: + - _rds_instance_info_aurora.db_instance_identifier is defined + - aws_ec2_autoscale_cluster.rds.rds is defined + - aws_ec2_autoscale_cluster.rds.rds + + - name: Gather running instances information. + amazon.aws.ec2_instance_info: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + filters: + "tag:Name": "{{ aws_ec2_autoscale_cluster.name }}" + instance-state-name: ["running"] + register: aws_ec2_autoscale_cluster_running_instances + when: aws_ec2_autoscale_cluster.asg_refresh or aws_ec2_autoscale_cluster.ami_refresh + + - name: Gather subnet information for temporary EC2 instance if using the 'repack' operation to generate a new AMI. + amazon.aws.ec2_vpc_subnet_info: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + filters: + vpc-id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" + tag:Name: "{{ aws_ec2_autoscale_cluster.ami_subnet_name }}" + register: _aws_ec2_autoscale_ami_subnet + when: aws_ec2_autoscale_cluster.ami_refresh and aws_ec2_autoscale_cluster.ami_operation == 'repack' + + - name: Create new AMI. + ansible.builtin.include_role: + name: aws/aws_ami + vars: + aws_ami: + aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + ami_name: "{{ _aws_ec2_autoscale_cluster_unique_name }}" + encrypt_boot: "{{ aws_ec2_autoscale_cluster.encrypt_boot }}" + name_filter: "{{ aws_ec2_autoscale_cluster.packer_name_filter }}" + repack: + root_volume_type: "{{ aws_ec2_autoscale_cluster.root_volume_type }}" + root_volume_size: "{{ aws_ec2_autoscale_cluster.root_volume_size }}" + cluster_name: "{{ aws_ec2_autoscale_cluster.name }}" + iam_role: "{{ aws_ec2_autoscale_cluster.iam_role_name | default(omit) }}" + vpc_id: "{{ _aws_ec2_autoscale_cluster_vpc_id }}" + vpc_subnet_id: "{{ _aws_ec2_autoscale_ami_subnet.subnets[0].subnet_id | default(omit) }}" + key_name: "{{ aws_ec2_autoscale_cluster.key_name }}" + ebs_optimized: "{{ aws_ec2_autoscale_cluster.ebs_optimized }}" + device_name: "{{ aws_ec2_autoscale_cluster.device_name }}" + playbook_file: "{{ aws_ec2_autoscale_cluster.ami_playbook_file }}" + on_error: "{{ aws_ec2_autoscale_cluster.packer_on_error }}" + vpc_filter: "{{ aws_ec2_autoscale_cluster.packer_vpc_filter }}" + subnet_filter_az: "{{ aws_ec2_autoscale_cluster.packer_subnet_filter_az }}" + force: "{{ aws_ec2_autoscale_cluster.packer_force }}" + operation: "{% if aws_ec2_autoscale_cluster_running_instances.instances | length > 0 %}{{ aws_ec2_autoscale_cluster.ami_operation }}{% else %}create{% endif %}" + tags: "{{ aws_ec2_autoscale_cluster.tags }}" + extra_vars: "{{ aws_ec2_autoscale_cluster.ami_extra_vars | default(omit) }}" + when: + - aws_ec2_autoscale_cluster.ami_refresh + - aws_ec2_autoscale_cluster.deploy_cluster + + # No register in the previous task because we might not repack the AMI so we need to look it up. + - name: Gather AMI image from name. + amazon.aws.ec2_ami_info: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + owners: self + filters: + name: "{{ aws_ec2_autoscale_cluster.name }}*" + register: aws_ec2_autoscale_cluster_image + when: aws_ec2_autoscale_cluster.deploy_cluster + + - name: Register latest AMI image. + ansible.builtin.set_fact: + aws_ec2_autoscale_cluster_image_latest: "{{ aws_ec2_autoscale_cluster_image.images | sort(attribute='creation_date') | last }}" + when: aws_ec2_autoscale_cluster.deploy_cluster + + - name: Create ami cleanup function. + ansible.builtin.include_role: + name: aws/aws_ami_asg_cleanup # LOAD BALANCING - name: "Create a Target group for port {{ aws_ec2_autoscale_cluster.target_group_http_port }}." @@ -361,360 +346,321 @@ when: - aws_ec2_autoscale_cluster.asg_refresh -- name: Define default ALB listeners. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_listeners_http: - Protocol: HTTP - Port: "{{ aws_ec2_autoscale_cluster.alb_http_port }}" - DefaultActions: - - Type: forward - TargetGroupName: "{{ aws_ec2_autoscale_cluster.name }}" - Rules: "{{ aws_ec2_autoscale_cluster.listeners_http.rules }}" - _aws_ec2_autoscale_cluster_listeners_redirect: - Protocol: HTTP - Port: "{{ aws_ec2_autoscale_cluster.alb_http_port }}" - DefaultActions: - - Type: redirect - RedirectConfig: - Protocol: HTTPS - Host: "#{host}" - Query: "#{query}" - Path: "/#{path}" - Port: "{{ aws_ec2_autoscale_cluster.alb_https_port }}" - StatusCode: HTTP_301 - _aws_ec2_autoscale_cluster_listeners_https: - Protocol: HTTPS - Port: "{{ aws_ec2_autoscale_cluster.alb_https_port }}" - SslPolicy: "{{ aws_ec2_autoscale_cluster.alb_ssl_policy }}" - Certificates: - - CertificateArn: "{{ _ssl_certificate_ARN }}" - DefaultActions: - - Type: forward - TargetGroupName: "{{ aws_ec2_autoscale_cluster.name }}" - Rules: "{{ aws_ec2_autoscale_cluster.listeners_https.rules }}" - when: aws_ec2_autoscale_cluster.create_elb - -# @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. - -- name: Add HTTP listeners. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_listeners: "{{ [_aws_ec2_autoscale_cluster_listeners_http] }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - _ssl_certificate_ARN | length < 1 - -- name: Add HTTPS Listener. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_listeners: "{{ [_aws_ec2_autoscale_cluster_listeners_redirect, _aws_ec2_autoscale_cluster_listeners_https] }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - _ssl_certificate_ARN | length > 1 - -- name: Add custom Listeners. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_listeners: "{{ _aws_ec2_autoscale_cluster_listeners + aws_ec2_autoscale_cluster.listeners }}" - when: - - aws_ec2_autoscale_cluster is defined - - aws_ec2_autoscale_cluster | length - - aws_ec2_autoscale_cluster.create_elb - -- name: Generate security group information for the ALB. - ansible.builtin.include_role: - name: aws/aws_security_groups - vars: - aws_security_groups: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - group_names: "{{ aws_ec2_autoscale_cluster.alb_security_groups }}" - return_type: ids - when: - - aws_ec2_autoscale_cluster.alb_security_groups | length > 0 - - aws_ec2_autoscale_cluster.create_elb - -- name: Create the ALB. - amazon.aws.elb_application_lb: - name: "{{ aws_ec2_autoscale_cluster.name }}" - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - state: "{{ aws_ec2_autoscale_cluster.state }}" - tags: "{{ aws_ec2_autoscale_cluster.tags }}" - subnets: "{{ _aws_ec2_autoscale_cluster_public_subnets_ids }}" - security_groups: "{{ _aws_security_group_list + [_aws_ec2_autoscale_cluster_security_group.group_id] }}" - listeners: "{{ _aws_ec2_autoscale_cluster_listeners }}" - idle_timeout: "{{ aws_ec2_autoscale_cluster.alb_idle_timeout }}" - register: _aws_ec2_autoscale_cluster_alb +- name: Build an ALB. when: aws_ec2_autoscale_cluster.create_elb - -- name: "Get ALB listener ARN for port {{ aws_ec2_autoscale_cluster.alb_https_port }}." - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_alb_listener_ARN: "{{ item.listener_arn }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - item.port == aws_ec2_autoscale_cluster.alb_https_port - - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs - - _ssl_certificate_ARN | length > 1 - with_items: "{{ _aws_ec2_autoscale_cluster_alb.listeners }}" - -- name: Add extra SSL certificates to the ALB. - ansible.builtin.command: - cmd: "aws elbv2 add-listener-certificates --region {{ aws_ec2_autoscale_cluster.region }} --profile {{ aws_ec2_autoscale_cluster.aws_profile }} --listener-arn {{ _aws_ec2_autoscale_cluster_alb_listener_ARN }} --certificates CertificateArn={{ item }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs - - _ssl_certificate_ARN | length > 1 - with_items: "{{ aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs }}" + block: + - name: Define default ALB listeners. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_listeners_http: + Protocol: HTTP + Port: "{{ aws_ec2_autoscale_cluster.alb_http_port }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ aws_ec2_autoscale_cluster.name }}" + Rules: "{{ aws_ec2_autoscale_cluster.listeners_http.rules }}" + _aws_ec2_autoscale_cluster_listeners_redirect: + Protocol: HTTP + Port: "{{ aws_ec2_autoscale_cluster.alb_http_port }}" + DefaultActions: + - Type: redirect + RedirectConfig: + Protocol: HTTPS + Host: "#{host}" + Query: "#{query}" + Path: "/#{path}" + Port: "{{ aws_ec2_autoscale_cluster.alb_https_port|string }}" + StatusCode: HTTP_301 + _aws_ec2_autoscale_cluster_listeners_https: + Protocol: HTTPS + Port: "{{ aws_ec2_autoscale_cluster.alb_https_port }}" + SslPolicy: "{{ aws_ec2_autoscale_cluster.alb_ssl_policy }}" + Certificates: + - CertificateArn: "{{ _ssl_certificate_ARN }}" + DefaultActions: + - Type: forward + TargetGroupName: "{{ aws_ec2_autoscale_cluster.name }}" + Rules: "{{ aws_ec2_autoscale_cluster.listeners_https.rules }}" + + # @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. + + - name: Add HTTP listeners. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_listeners: "{{ [_aws_ec2_autoscale_cluster_listeners_http] }}" + when: + - _ssl_certificate_ARN|length < 1 + + - name: Add HTTPS Listener. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_listeners: "{{ [_aws_ec2_autoscale_cluster_listeners_redirect, _aws_ec2_autoscale_cluster_listeners_https] }}" + when: + - _ssl_certificate_ARN|length > 1 + + - name: Add custom Listeners. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_listeners: "{{ _aws_ec2_autoscale_cluster_listeners + aws_ec2_autoscale_cluster.listeners }}" + when: + - aws_ec2_autoscale_cluster is defined + - aws_ec2_autoscale_cluster|length > 0 + + - name: Generate security group information for the ALB. + ansible.builtin.include_role: + name: aws/aws_security_groups + vars: + aws_security_groups: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + group_names: "{{ aws_ec2_autoscale_cluster.alb_security_groups }}" + return_type: ids + when: + - aws_ec2_autoscale_cluster.alb_security_groups|length > 0 + + - name: Create the ALB. + amazon.aws.elb_application_lb: + name: "{{ aws_ec2_autoscale_cluster.name }}" + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + state: "{{ aws_ec2_autoscale_cluster.state }}" + tags: "{{ aws_ec2_autoscale_cluster.tags }}" + subnets: "{{ _aws_ec2_autoscale_cluster_public_subnets_ids }}" + security_groups: "{{ _aws_security_group_list + [_aws_ec2_autoscale_cluster_security_group.group_id] }}" + listeners: "{{ _aws_ec2_autoscale_cluster_listeners }}" + idle_timeout: "{{ aws_ec2_autoscale_cluster.alb_idle_timeout }}" + register: _aws_ec2_autoscale_cluster_alb + + - name: "Get ALB listener ARN for port {{ aws_ec2_autoscale_cluster.alb_https_port }}." + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_alb_listener_ARN: "{{ item.listener_arn }}" + when: + - item.port == aws_ec2_autoscale_cluster.alb_https_port + - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs|length > 0 + - _ssl_certificate_ARN|length > 1 + with_items: "{{ _aws_ec2_autoscale_cluster_alb.listeners }}" + + - name: Add extra SSL certificates to the ALB. + ansible.builtin.command: + cmd: "aws elbv2 add-listener-certificates --region {{ aws_ec2_autoscale_cluster.region }} --profile {{ aws_ec2_autoscale_cluster.aws_profile }} --listener-arn {{ _aws_ec2_autoscale_cluster_alb_listener_ARN }} --certificates CertificateArn={{ item }}" + when: + - aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs|length > 0 + - _ssl_certificate_ARN|length > 1 + with_items: "{{ aws_ec2_autoscale_cluster.ssl_extra_certificate_ARNs }}" # EC2 - BUILD ASG -- name: Generate security group information for the ASG. - ansible.builtin.include_role: - name: aws/aws_security_groups - vars: - aws_security_groups: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - group_names: "{{ aws_ec2_autoscale_cluster.cluster_security_groups }}" - return_type: ids - when: - - aws_ec2_autoscale_cluster.cluster_security_groups | length > 0 - - aws_ec2_autoscale_cluster.asg_refresh - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Create launch template. - amazon.aws.ec2_launch_template: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - name: "{{ aws_ec2_autoscale_cluster.name }}" - image_id: "{{ aws_ec2_autoscale_cluster.image_id if aws_ec2_autoscale_cluster.image_id is defined else aws_ec2_autoscale_cluster_image_latest.image_id }}" - key_name: "{{ aws_ec2_autoscale_cluster.key_name }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - state: "{{ aws_ec2_autoscale_cluster.state }}" - instance_type: "{{ aws_ec2_autoscale_cluster.instance_type }}" - iam_instance_profile: "{{ _aws_ec2_autoscale_cluster_iam_role_info.iam_roles[0].instance_profiles[0].arn }}" - disable_api_termination: "{{ aws_ec2_autoscale_cluster.instance_disable_api_termination }}" - ebs_optimized: "{{ aws_ec2_autoscale_cluster.ebs_optimized }}" - network_interfaces: - - associate_public_ip_address: "{{ aws_ec2_autoscale_cluster.assign_public_ip }}" - delete_on_termination: "{{ aws_ec2_autoscale_cluster.instance_nic_delete_on_termination }}" - subnet_id: "{{ subnet_id }}" # picked randomly from _aws_ec2_autoscale_cluster_subnets_ids, see with_random_choice - device_index: 0 # must be 0 - see https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html#change-network-interface - groups: "{{ _aws_security_group_list + [_aws_ec2_autoscale_cluster_security_group.group_id] }}" - block_device_mappings: - - ebs: - delete_on_termination: "{{ aws_ec2_autoscale_cluster.root_volume_delete_on_termination }}" - encrypted: "{{ aws_ec2_autoscale_cluster.encrypt_boot }}" - volume_size: "{{ aws_ec2_autoscale_cluster.root_volume_size }}" - volume_type: "{{ aws_ec2_autoscale_cluster.root_volume_type }}" - device_name: "{{ aws_ec2_autoscale_cluster.device_name }}" - credit_specification: "{{ aws_ec2_autoscale_cluster.instance_credit_specification | default(omit) }}" - with_random_choice: "{{ _aws_ec2_autoscale_cluster_subnets_ids }}" - loop_control: - loop_var: subnet_id +- name: Build the ASG. when: - - aws_ec2_autoscale_cluster.asg_refresh - - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Create AutoScale group and spin up new instances. - amazon.aws.autoscaling_group: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - name: "{{ aws_ec2_autoscale_cluster.name }}" - state: "{{ aws_ec2_autoscale_cluster.state }}" - launch_template: - launch_template_name: "{{ aws_ec2_autoscale_cluster.name }}" - health_check_type: "{% if aws_ec2_autoscale_cluster_running_instances.instances | length > 0 %}{{ aws_ec2_autoscale_cluster.alb_health_check_type }}{% else %}EC2{% endif %}" - health_check_period: "{{ aws_ec2_autoscale_cluster.alb_health_check_period | default(omit) }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - replace_all_instances: true - replace_batch_size: "{{ aws_ec2_autoscale_cluster.min_size if aws_ec2_autoscale_cluster.desired_capacity == 0 else aws_ec2_autoscale_cluster.desired_capacity }}" - wait_for_instances: true - lt_check: true - wait_timeout: 3000 - desired_capacity: "{{ aws_ec2_autoscale_cluster.min_size if aws_ec2_autoscale_cluster.desired_capacity == 0 else aws_ec2_autoscale_cluster.desired_capacity }}" - min_size: "{{ aws_ec2_autoscale_cluster.min_size }}" - max_size: "{{ aws_ec2_autoscale_cluster.max_size }}" - tags: "{{ aws_ec2_autoscale_cluster.tags | simpledict2list }}" - vpc_zone_identifier: "{{ _aws_ec2_autoscale_cluster_subnets_ids }}" - target_group_arns: - - "{{ _aws_ec2_target_group_created.target_group_arn }}" - register: _aws_ec2_asg_created - when: - aws_ec2_autoscale_cluster.asg_refresh - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Create step scaling AutoScale policies. - community.aws.autoscaling_policy: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - state: "present" - name: "{{ item.name }}-{{ item.policy_type }}" - adjustment_type: "{{ item.adjustment_type }}" - asg_name: "{{ aws_ec2_autoscale_cluster.name }}" - scaling_adjustment: "{{ item.adjustment }}" - min_adjustment_step: "{{ item.adjustment_step }}" - metric_aggregation: "{{ item.metric_aggregation }}" - step_adjustments: "{{ item.step_adjustments }}" - when: - - aws_ec2_autoscale_cluster.asg_scaling_policies - - item.policy_type == 'StepScaling' - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - register: _aws_ec2_autoscale_cluster_step_scaling_policies - with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" - -- name: Create simple scaling AutoScale policies. - community.aws.autoscaling_policy: - profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - state: "present" - name: "{{ item.name }}-{{ item.policy_type }}" - adjustment_type: "{{ item.adjustment_type }}" - asg_name: "{{ aws_ec2_autoscale_cluster.name }}" - scaling_adjustment: "{{ item.adjustment }}" - min_adjustment_step: "{{ item.adjustment_step }}" - cooldown: "{{ item.cooldown }}" - when: - - aws_ec2_autoscale_cluster.asg_scaling_policies - - item.policy_type == 'SimpleScaling' - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - register: _aws_ec2_autoscale_cluster_simple_scaling_policies - with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" - -- name: Fetch step scaling policies. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" - when: - - _aws_ec2_autoscale_cluster_step_scaling_policies - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Fetch simple scaling policies. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_scaling_policies + _aws_ec2_autoscale_cluster_simple_scaling_policies.results }}" - when: - - _aws_ec2_autoscale_cluster_simple_scaling_policies - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Create placeholder ARN variables for scaling policies. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_up_policy_ARN: "" - _aws_ec2_autoscale_cluster_scaling_down_policy_ARN: "" - when: - - _aws_ec2_autoscale_cluster_scaling_policies is defined - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -# @todo We should support multiple policies. If this built a list -# then we could potentially loop over it after. -- name: Set scaling up policy ARN. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_up_policy_ARN: "{{ item.arn }}" - loop: "{{ _aws_ec2_autoscale_cluster_scaling_policies }}" - when: - - _aws_ec2_autoscale_cluster_scaling_policies is defined - - item.item.name == aws_ec2_autoscale_cluster.asg_cloudwatch_policy_scale_up_name - - item.arn is defined + block: + - name: Gather IAM role info. + amazon.aws.iam_role_info: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + name: "{{ aws_ec2_autoscale_cluster.iam_role_name }}" + register: _aws_ec2_autoscale_cluster_iam_role_info + + - name: Generate security group information for the ASG. + ansible.builtin.include_role: + name: aws/aws_security_groups + vars: + aws_security_groups: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + group_names: "{{ aws_ec2_autoscale_cluster.cluster_security_groups }}" + return_type: ids + when: + - aws_ec2_autoscale_cluster.cluster_security_groups|length > 0 + + - name: Create launch template. + amazon.aws.ec2_launch_template: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + name: "{{ aws_ec2_autoscale_cluster.name }}" + image_id: "{{ aws_ec2_autoscale_cluster.image_id if aws_ec2_autoscale_cluster.image_id is defined else aws_ec2_autoscale_cluster_image_latest.image_id }}" + key_name: "{{ aws_ec2_autoscale_cluster.key_name }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + state: "{{ aws_ec2_autoscale_cluster.state }}" + instance_type: "{{ aws_ec2_autoscale_cluster.instance_type }}" + iam_instance_profile: "{{ _aws_ec2_autoscale_cluster_iam_role_info.iam_roles[0].instance_profiles[0].arn }}" + disable_api_termination: "{{ aws_ec2_autoscale_cluster.instance_disable_api_termination }}" + ebs_optimized: "{{ aws_ec2_autoscale_cluster.ebs_optimized }}" + network_interfaces: + - associate_public_ip_address: "{{ aws_ec2_autoscale_cluster.assign_public_ip }}" + delete_on_termination: "{{ aws_ec2_autoscale_cluster.instance_nic_delete_on_termination }}" + subnet_id: "{{ subnet_id }}" # picked randomly from _aws_ec2_autoscale_cluster_subnets_ids, see with_random_choice + device_index: 0 # must be 0 - see https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-launch-template.html#change-network-interface + groups: "{{ _aws_security_group_list + [_aws_ec2_autoscale_cluster_security_group.group_id] }}" + block_device_mappings: + - ebs: + delete_on_termination: "{{ aws_ec2_autoscale_cluster.root_volume_delete_on_termination }}" + encrypted: "{{ aws_ec2_autoscale_cluster.encrypt_boot }}" + volume_size: "{{ aws_ec2_autoscale_cluster.root_volume_size }}" + volume_type: "{{ aws_ec2_autoscale_cluster.root_volume_type }}" + device_name: "{{ aws_ec2_autoscale_cluster.device_name }}" + credit_specification: "{{ aws_ec2_autoscale_cluster.instance_credit_specification | default(omit) }}" + with_random_choice: "{{ _aws_ec2_autoscale_cluster_subnets_ids }}" + loop_control: + loop_var: subnet_id + + - name: Create AutoScale group and spin up new instances. + amazon.aws.autoscaling_group: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + name: "{{ aws_ec2_autoscale_cluster.name }}" + state: "{{ aws_ec2_autoscale_cluster.state }}" + launch_template: + launch_template_name: "{{ aws_ec2_autoscale_cluster.name }}" + health_check_type: "{% if aws_ec2_autoscale_cluster_running_instances.instances | length > 0 %}{{ aws_ec2_autoscale_cluster.alb_health_check_type }}{% else %}EC2{% endif %}" + health_check_period: "{{ aws_ec2_autoscale_cluster.alb_health_check_period | default(omit) }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + replace_all_instances: true + replace_batch_size: "{{ aws_ec2_autoscale_cluster.min_size if aws_ec2_autoscale_cluster.desired_capacity == 0 else aws_ec2_autoscale_cluster.desired_capacity }}" + wait_for_instances: true + lt_check: true + wait_timeout: 3000 + desired_capacity: "{{ aws_ec2_autoscale_cluster.min_size if aws_ec2_autoscale_cluster.desired_capacity == 0 else aws_ec2_autoscale_cluster.desired_capacity }}" + min_size: "{{ aws_ec2_autoscale_cluster.min_size }}" + max_size: "{{ aws_ec2_autoscale_cluster.max_size }}" + tags: "{{ aws_ec2_autoscale_cluster.tags | simpledict2list }}" + vpc_zone_identifier: "{{ _aws_ec2_autoscale_cluster_subnets_ids }}" + target_group_arns: + - "{{ _aws_ec2_target_group_created.target_group_arn }}" + register: _aws_ec2_asg_created + +- name: Handle AutoScale policies and alarms. + when: + - aws_ec2_autoscale_cluster.asg_scaling_policies|length > 0 - aws_ec2_autoscale_cluster.type == "ec2" - aws_ec2_autoscale_cluster.deploy_cluster - -# @todo As above. -- name: Set scaling down policy ARN. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_scaling_down_policy_ARN: "{{ item.arn }}" - loop: "{{ _aws_ec2_autoscale_cluster_scaling_policies }}" + block: + - name: Set empty scaling policies fact. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_policies: [] + + - name: Create simple scaling AutoScale policies. + community.aws.autoscaling_policy: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + state: "present" + name: "{{ item.name }}-{{ item.policy_type }}" + adjustment_type: "{{ item.adjustment_type }}" + asg_name: "{{ aws_ec2_autoscale_cluster.name }}" + scaling_adjustment: "{{ item.adjustment }}" + min_adjustment_step: "{{ item.adjustment_step }}" + cooldown: "{{ item.cooldown }}" + register: _aws_ec2_autoscale_cluster_simple_scaling_policies + with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" + when: item.policy_type == 'SimpleScaling' + + - name: Add simple scaling policies to scaling policies list. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_simple_scaling_policies.results }}" + when: _aws_ec2_autoscale_cluster_simple_scaling_policies.results is defined + + - name: Create step scaling AutoScale policies. + community.aws.autoscaling_policy: + profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + state: "present" + name: "{{ item.name }}-{{ item.policy_type }}" + adjustment_type: "{{ item.adjustment_type }}" + asg_name: "{{ aws_ec2_autoscale_cluster.name }}" + scaling_adjustment: "{{ item.adjustment }}" + min_adjustment_step: "{{ item.adjustment_step }}" + metric_aggregation: "{{ item.metric_aggregation }}" + step_adjustments: "{{ item.step_adjustments }}" + register: _aws_ec2_autoscale_cluster_step_scaling_policies + with_items: "{{ aws_ec2_autoscale_cluster.asg_scaling_policies }}" + when: item.policy_type == 'StepScaling' + + - name: Add step scaling policies to scaling policies list. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_policies: "{{ _aws_ec2_autoscale_cluster_scaling_policies + _aws_ec2_autoscale_cluster_step_scaling_policies.results }}" + when: _aws_ec2_autoscale_cluster_step_scaling_policies.results is defined + +- name: Create scaling policies and alarms. when: - _aws_ec2_autoscale_cluster_scaling_policies is defined - - item.item.name == aws_ec2_autoscale_cluster.asg_cloudwatch_policy_scale_down_name - - item.arn is defined - - aws_ec2_autoscale_cluster.type == "ec2" - - aws_ec2_autoscale_cluster.deploy_cluster - -- name: Create alarm in CloudWatch for auto scaling up. - ansible.builtin.include_role: - name: aws/aws_ec2_metric_alarm - vars: - aws_ec2_metric_alarm: - aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - name: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarm_scale_up_name }}" - description: "{{ item.description }}" - metric: "{{ item.metric }}" - namespace: "{{ item.namespace }}" - statistic: "{{ item.statistic }}" - comparison: "{{ item.comparison }}" - threshold: "{{ item.threshold }}" - unit: "{{ item.unit }}" - period: "{{ item.period }}" - evaluation_periods: "{{ item.evaluation_periods }}" - alarm_actions: - - "{{ _aws_ec2_autoscale_cluster_scaling_up_policy_ARN }}" - dimensions: - "AutoScalingGroupName": "{{ aws_ec2_autoscale_cluster.name }}" - with_items: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarms }}" - when: - - _aws_ec2_autoscale_cluster_scaling_up_policy_ARN is defined - - item.scale_direction == 'up' - - aws_ec2_autoscale_cluster.type == "ec2" - -- name: Create alarm in CloudWatch for auto scaling down. - ansible.builtin.include_role: - name: aws/aws_ec2_metric_alarm - vars: - aws_ec2_metric_alarm: - aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" - region: "{{ aws_ec2_autoscale_cluster.region }}" - name: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarm_scale_down_name }}" - description: "{{ item.description }}" - metric: "{{ item.metric }}" - namespace: "{{ item.namespace }}" - statistic: "{{ item.statistic }}" - comparison: "{{ item.comparison }}" - threshold: "{{ item.threshold }}" - unit: "{{ item.unit }}" - period: "{{ item.period }}" - evaluation_periods: "{{ item.evaluation_periods }}" - alarm_actions: - - "{{ _aws_ec2_autoscale_cluster_scaling_down_policy_ARN }}" - dimensions: - "AutoScalingGroupName": "{{ aws_ec2_autoscale_cluster.name }}" - with_items: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarms }}" - when: - - _aws_ec2_autoscale_cluster_scaling_down_policy_ARN is defined - - item.scale_direction == 'down' - aws_ec2_autoscale_cluster.type == "ec2" + block: + - name: Create placeholder ARN variables for scaling policies. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_up_policy_ARN: "" + _aws_ec2_autoscale_cluster_scaling_down_policy_ARN: "" + when: + - aws_ec2_autoscale_cluster.deploy_cluster + + # @todo We should support multiple policies. If this built a list + # then we could potentially loop over it after. + - name: Set scaling up policy ARN. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_up_policy_ARN: "{{ item.arn }}" + loop: "{{ _aws_ec2_autoscale_cluster_scaling_policies }}" + when: + - item.item.name == aws_ec2_autoscale_cluster.asg_cloudwatch_policy_scale_up_name + - item.arn is defined + - aws_ec2_autoscale_cluster.deploy_cluster + + # @todo As above. + - name: Set scaling down policy ARN. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_scaling_down_policy_ARN: "{{ item.arn }}" + loop: "{{ _aws_ec2_autoscale_cluster_scaling_policies }}" + when: + - item.item.name == aws_ec2_autoscale_cluster.asg_cloudwatch_policy_scale_down_name + - item.arn is defined + - aws_ec2_autoscale_cluster.deploy_cluster + + - name: Create alarm in CloudWatch for auto scaling up. + ansible.builtin.include_role: + name: aws/aws_ec2_metric_alarm + vars: + aws_ec2_metric_alarm: + aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + name: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarm_scale_up_name }}" + description: "{{ item.description }}" + metric: "{{ item.metric }}" + namespace: "{{ item.namespace }}" + statistic: "{{ item.statistic }}" + comparison: "{{ item.comparison }}" + threshold: "{{ item.threshold }}" + unit: "{{ item.unit }}" + period: "{{ item.period }}" + evaluation_periods: "{{ item.evaluation_periods }}" + alarm_actions: + - "{{ _aws_ec2_autoscale_cluster_scaling_up_policy_ARN }}" + dimensions: + "AutoScalingGroupName": "{{ aws_ec2_autoscale_cluster.name }}" + with_items: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarms }}" + when: + - _aws_ec2_autoscale_cluster_scaling_up_policy_ARN is defined + - item.scale_direction == 'up' + + - name: Create alarm in CloudWatch for auto scaling down. + ansible.builtin.include_role: + name: aws/aws_ec2_metric_alarm + vars: + aws_ec2_metric_alarm: + aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" + region: "{{ aws_ec2_autoscale_cluster.region }}" + name: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarm_scale_down_name }}" + description: "{{ item.description }}" + metric: "{{ item.metric }}" + namespace: "{{ item.namespace }}" + statistic: "{{ item.statistic }}" + comparison: "{{ item.comparison }}" + threshold: "{{ item.threshold }}" + unit: "{{ item.unit }}" + period: "{{ item.period }}" + evaluation_periods: "{{ item.evaluation_periods }}" + alarm_actions: + - "{{ _aws_ec2_autoscale_cluster_scaling_down_policy_ARN }}" + dimensions: + "AutoScalingGroupName": "{{ aws_ec2_autoscale_cluster.name }}" + with_items: "{{ aws_ec2_autoscale_cluster.asg_cloudwatch_alarms }}" + when: + - _aws_ec2_autoscale_cluster_scaling_down_policy_ARN is defined + - item.scale_direction == 'down' # CLOUDFRONT -- name: Create SSL certificate for CloudFront. - ansible.builtin.include_role: - name: aws/aws_acm - vars: - aws_acm: - export: false - region: us-east-1 # Certificate must be in us-east-1 for CloudFront. - domain_name: "{{ aws_ec2_autoscale_cluster.route_53.record }}" - extra_domains: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" - route_53: - aws_profile: "{{ aws_ec2_autoscale_cluster.acm.route_53.aws_profile }}" - zone: "{{ aws_ec2_autoscale_cluster.acm.route_53.zone }}" - when: - - aws_ec2_autoscale_cluster.cloudfront.create_cert - - aws_ec2_autoscale_cluster.region != 'us-east-1' - - aws_ec2_autoscale_cluster.cloudfront.create_distribution - -- name: Default to provided CloudFront SSL certificate ARN. - ansible.builtin.set_fact: - _cf_certificate_ARN: "{{ aws_ec2_autoscale_cluster.cloudfront.cf_certificate_ARN }}" - when: aws_ec2_autoscale_cluster.cloudfront.create_distribution - -- name: If provided, override CloudFront SSL certificate ARN with the one received from ACM. - ansible.builtin.set_fact: - _cf_certificate_ARN: "{{ aws_acm_certificate_arn }}" - when: - - aws_ec2_autoscale_cluster.cloudfront.create_cert - - aws_ec2_autoscale_cluster.cloudfront.create_distribution - - name: Initialise the domains loop var with main domain entry DNS settings. ansible.builtin.set_fact: _aws_ec2_autoscale_cluster_dns_all_domains: @@ -728,40 +674,64 @@ loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" when: aws_ec2_autoscale_cluster.acm.extra_domains | length > 0 -- name: Initialise a list of CloudFront aliases with main domain name. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ [_domain_name] }}" - when: - - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.cloudfront.create_distribution - -- name: Add extra_domains so we can set up additional CloudFront aliases. - ansible.builtin.set_fact: - _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases + [item.domain] }}" - loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" +- name: Handle CloudFront. when: - - aws_ec2_autoscale_cluster.acm.extra_domains | length > 0 - - aws_ec2_autoscale_cluster.create_elb - aws_ec2_autoscale_cluster.cloudfront.create_distribution - -- name: Create a CloudFront distribution. - ansible.builtin.include_role: - name: aws/aws_cloudfront_distribution - vars: - aws_cloudfront_distribution: - tags: "{{ aws_ec2_autoscale_cluster.tags | combine({'Name': aws_ec2_autoscale_cluster.name}) }}" - aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases }}" - viewer_certificate: - acm_certificate_arn: "{{ _cf_certificate_ARN }}" - origins: - - domain_name: "{{ _aws_ec2_autoscale_cluster_alb.dns_name }}" - id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" - default_cache_behavior: - target_origin_id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" - when: - aws_ec2_autoscale_cluster.create_elb - - aws_ec2_autoscale_cluster.cloudfront.create_distribution - - _cf_certificate_ARN | length > 1 + block: + - name: Create SSL certificate for CloudFront. + ansible.builtin.include_role: + name: aws/aws_acm + vars: + aws_acm: + export: false + region: us-east-1 # Certificate must be in us-east-1 for CloudFront. + domain_name: "{{ aws_ec2_autoscale_cluster.route_53.record }}" + extra_domains: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" + route_53: + aws_profile: "{{ aws_ec2_autoscale_cluster.acm.route_53.aws_profile }}" + zone: "{{ aws_ec2_autoscale_cluster.acm.route_53.zone }}" + when: + - aws_ec2_autoscale_cluster.cloudfront.create_cert + - aws_ec2_autoscale_cluster.region != 'us-east-1' + + - name: Default to provided CloudFront SSL certificate ARN. + ansible.builtin.set_fact: + _cf_certificate_ARN: "{{ aws_ec2_autoscale_cluster.cloudfront.cf_certificate_ARN }}" + + - name: If provided, override CloudFront SSL certificate ARN with the one received from ACM. + ansible.builtin.set_fact: + _cf_certificate_ARN: "{{ aws_acm_certificate_arn }}" + when: + - aws_ec2_autoscale_cluster.cloudfront.create_cert + + - name: Initialise a list of CloudFront aliases with main domain name. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ [_domain_name] }}" + + - name: Add extra_domains so we can set up additional CloudFront aliases. + ansible.builtin.set_fact: + _aws_ec2_autoscale_cluster_cloudfront_aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases + [item.domain] }}" + loop: "{{ aws_ec2_autoscale_cluster.acm.extra_domains }}" + when: + - aws_ec2_autoscale_cluster.acm.extra_domains|length > 0 + + - name: Create a CloudFront distribution. + ansible.builtin.include_role: + name: aws/aws_cloudfront_distribution + vars: + aws_cloudfront_distribution: + tags: "{{ aws_ec2_autoscale_cluster.tags | combine({'Name': aws_ec2_autoscale_cluster.name}) }}" + aliases: "{{ _aws_ec2_autoscale_cluster_cloudfront_aliases }}" + viewer_certificate: + acm_certificate_arn: "{{ _cf_certificate_ARN }}" + origins: + - domain_name: "{{ _aws_ec2_autoscale_cluster_alb.dns_name }}" + id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" + default_cache_behavior: + target_origin_id: "ELB-{{ aws_ec2_autoscale_cluster.name }}" + when: + - _cf_certificate_ARN|length > 1 # @TODO - we can use the aws_acm_obsolete_certificate_arn variable to tidy up previous ACM certs, if it is defined. @@ -777,11 +747,4 @@ loop: "{{ _aws_ec2_autoscale_cluster_dns_all_domains }}" when: - aws_ec2_autoscale_cluster.route_53.zone is defined - - aws_ec2_autoscale_cluster.route_53.zone | length > 0 - -#- name: Copy AMI to backup region. -# community.aws.ec2_ami_copy: -# aws_profile: "{{ aws_ec2_autoscale_cluster.aws_profile }}" -# source_region: "{{ aws_ec2_autoscale_cluster.region }}" -# region: "{{ aws_backup.copy_vault.region }}" -# source_image_id: "{{ aws_ec2_autoscale_cluster_image_latest.image_id }}" + - aws_ec2_autoscale_cluster.route_53.zone|length > 0 diff --git a/roles/aws/aws_elb/tasks/main.yml b/roles/aws/aws_elb/tasks/main.yml index 9ea51db71..8c988ff1c 100644 --- a/roles/aws/aws_elb/tasks/main.yml +++ b/roles/aws/aws_elb/tasks/main.yml @@ -111,7 +111,7 @@ _aws_ec2_listeners: "{{ _aws_ec2_listeners + aws_elb.listeners }}" when: - aws_elb is defined - - aws_elb | length + - aws_elb | length > 0 - name: Generate security group information. ansible.builtin.include_role: diff --git a/roles/aws/aws_iam_saml/templates/simplesamlphp_sp.j2 b/roles/aws/aws_iam_saml/templates/simplesamlphp_sp.j2 index 509fd4dbe..d931cdbaa 100644 --- a/roles/aws/aws_iam_saml/templates/simplesamlphp_sp.j2 +++ b/roles/aws/aws_iam_saml/templates/simplesamlphp_sp.j2 @@ -55,7 +55,7 @@ $metadata['urn:amazon:{{ _aws_account_info.account }}'] = array ( 'groups' => 'urn:oid:1.3.6.1.4.1.5923.1.1.1.1', ), -{% if aws_iam_saml.linotp_server is defined and aws_iam_saml.linotp_server|length %} +{% if aws_iam_saml.linotp_server is defined and aws_iam_saml.linotp_server|length > 0 %} # LinOTP settings 55 => array( 'class' => 'linotp2:OTP', diff --git a/roles/debian/ansible/defaults/main.yml b/roles/debian/ansible/defaults/main.yml index 47707d7d0..bacce1d11 100644 --- a/roles/debian/ansible/defaults/main.yml +++ b/roles/debian/ansible/defaults/main.yml @@ -1,14 +1,14 @@ --- ce_ansible: - # These are usually set in the _init role using _venv_path, _venv_command and _venv_install_username but can be overridden. - #venv_path: "/home/{{ ce_provision.username }}/ansible" - #venv_command: /usr/bin/python3.11 -m venv - #venv_install_username: ansible # user to become when creating venv - ansible_version: "<12.0" # also check install.sh script in the repo root and set the version there accordingly. + # These are usually set in the _init role but can be overridden here. + venv_path: "{{ _venv_path }}" + venv_command: "{{ _venv_command }}" + venv_install_username: "{{ _venv_install_username }}" + ansible_version: "" # if used with the install.sh script in the repo root, version strings should match upgrade: - enabled: false # create systemd timer to auto-upgrade Ansible. Temporary disabled due to ansible 2.19 breaking changes. + enabled: false # create systemd timer to auto-upgrade Ansible. Temporary disabled due to ansible 2.19 breaking changes command: "{{ _venv_path }}/bin/python3 -m pip install --upgrade ansible" # if you set venv_path above then set it here too - on_calendar: "*-*-* 01:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events + on_calendar: "*-*-* 01:30:00" # see systemd.time documentation - https://www.freedesktop.org/software/systemd/man/latest/systemd.time.html#Calendar%20Events #timer_name: upgrade_ansible linters: - enabled: true # will not install linters if false, installing linters breaks cloud-init + enabled: true # will not install linters if false, installing linters breaks cloud-init diff --git a/roles/debian/ansible/tasks/main.yml b/roles/debian/ansible/tasks/main.yml index cdf6d0862..146c62e8d 100644 --- a/roles/debian/ansible/tasks/main.yml +++ b/roles/debian/ansible/tasks/main.yml @@ -21,20 +21,14 @@ - name: Override Python venv path if provided. ansible.builtin.set_fact: _venv_path: "{{ ce_ansible.venv_path }}" - when: - - ce_ansible.venv_path is defined - name: Override Python venv command if provided. ansible.builtin.set_fact: _venv_command: "{{ ce_ansible.venv_command }}" - when: - - ce_ansible.venv_command is defined - name: Override Python user if provided. ansible.builtin.set_fact: _venv_install_username: "{{ ce_ansible.venv_install_username }}" - when: - - ce_ansible.venv_install_username is defined - name: Set up Python packages. ansible.builtin.include_role: @@ -75,7 +69,7 @@ - name: Add the venv to $PATH using profile.d. ansible.builtin.copy: - content: "export PATH=$PATH:{{ ce_ansible.venv_path | default(_venv_path) }}/bin" + content: "export PATH=$PATH:{{ ce_ansible.venv_path }}/bin" dest: "/etc/profile.d/ansible-path.sh" mode: '0644' diff --git a/roles/debian/apache/templates/cloudwatch-main.json.j2 b/roles/debian/apache/templates/cloudwatch-main.json.j2 index e5e899a15..38b8a0772 100644 --- a/roles/debian/apache/templates/cloudwatch-main.json.j2 +++ b/roles/debian/apache/templates/cloudwatch-main.json.j2 @@ -5,7 +5,7 @@ "collect_list": [ { "file_path": "/var/log/apache2/access.log", -{% if apache.log_group_prefix is defined and apache.log_group_prefix|length %} +{% if apache.log_group_prefix is defined and apache.log_group_prefix|length > 0 %} "log_group_name": "{{ apache.log_group_prefix }}apache-access", {% else %} "log_group_name": "apache-access", @@ -14,7 +14,7 @@ }, { "file_path": "/var/log/apache2/error.log", -{% if apache.log_group_prefix is defined and apache.log_group_prefix|length %} +{% if apache.log_group_prefix is defined and apache.log_group_prefix|length > 0 %} "log_group_name": "{{ apache.log_group_prefix }}apache-error", {% else %} "log_group_name": "apache-error", @@ -25,4 +25,4 @@ } } } -} \ No newline at end of file +} diff --git a/roles/debian/apache/templates/cloudwatch-vhost.json.j2 b/roles/debian/apache/templates/cloudwatch-vhost.json.j2 index 331e30ff5..7299936c0 100644 --- a/roles/debian/apache/templates/cloudwatch-vhost.json.j2 +++ b/roles/debian/apache/templates/cloudwatch-vhost.json.j2 @@ -5,12 +5,12 @@ "collect_list": [ { "file_path": "{{ domain.access_log }}", -{% if apache.log_group_prefix is defined and apache.log_group_prefix|length %} +{% if apache.log_group_prefix is defined and apache.log_group_prefix|length > 0 %} "log_group_name": "{{ apache.log_group_prefix }}apache2-access", {% else %} "log_group_name": "apache2-access", {% endif %} -{% if domain.log_stream_name is defined and domain.log_stream_name|length %} +{% if domain.log_stream_name is defined and domain.log_stream_name|length > 0 %} "log_stream_name": "{{ domain.log_stream_name }}" {% else %} "log_stream_name": "{{ apache.log_stream_name }}" @@ -18,12 +18,12 @@ }, { "file_path": "{{ domain.error_log }}", -{% if apache.log_group_prefix is defined and apache.log_group_prefix|length %} +{% if apache.log_group_prefix is defined and apache.log_group_prefix|length > 0 %} "log_group_name": "{{ apache.log_group_prefix }}apache2-error", {% else %} "log_group_name": "apache2-error", {% endif %} -{% if domain.log_stream_name is defined and domain.log_stream_name|length %} +{% if domain.log_stream_name is defined and domain.log_stream_name|length > 0 %} "log_stream_name": "{{ domain.log_stream_name }}" {% else %} "log_stream_name": "{{ apache.log_stream_name }}" @@ -33,4 +33,4 @@ } } } -} \ No newline at end of file +} diff --git a/roles/debian/aws_cloudwatch_agent/templates/config.json.j2 b/roles/debian/aws_cloudwatch_agent/templates/config.json.j2 index 169ea4c53..6dce2d3fe 100755 --- a/roles/debian/aws_cloudwatch_agent/templates/config.json.j2 +++ b/roles/debian/aws_cloudwatch_agent/templates/config.json.j2 @@ -9,7 +9,7 @@ "collect_list": [ { "file_path": "/var/log/syslog", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}syslog", {% else %} "log_group_name": "syslog", @@ -18,7 +18,7 @@ }, { "file_path": "/var/log/auth.log", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}auth", {% else %} "log_group_name": "auth", @@ -27,7 +27,7 @@ }, { "file_path": "/var/log/daemon.log", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}daemon", {% else %} "log_group_name": "daemon", @@ -36,7 +36,7 @@ }, { "file_path": "/var/log/messages", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}messages", {% else %} "log_group_name": "messages", @@ -45,7 +45,7 @@ }, { "file_path": "/var/log/alternatives.log", -{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length %} +{% if aws_cloudwatch_agent.log_group_prefix is defined and aws_cloudwatch_agent.log_group_prefix|length > 0 %} "log_group_name": "{{aws_cloudwatch_agent.log_group_prefix}}alternatives", {% else %} "log_group_name": "alternatives", @@ -57,7 +57,7 @@ } }, "metrics": { -{% if aws_cloudwatch_agent.metrics_namespace is defined and aws_cloudwatch_agent.metrics_namespace|length %} +{% if aws_cloudwatch_agent.metrics_namespace is defined and aws_cloudwatch_agent.metrics_namespace|length > 0 %} "namespace": "{{ aws_cloudwatch_agent.metrics_namespace }}", {% endif %} "append_dimensions": { @@ -110,4 +110,4 @@ } } } -} \ No newline at end of file +} diff --git a/roles/debian/duplicity/templates/include-exclude-filelist.j2 b/roles/debian/duplicity/templates/include-exclude-filelist.j2 index bf491cd28..61f745439 100644 --- a/roles/debian/duplicity/templates/include-exclude-filelist.j2 +++ b/roles/debian/duplicity/templates/include-exclude-filelist.j2 @@ -1,5 +1,5 @@ {% for rule in dir.rules %} -{% if rule|length %} +{% if rule|length > 0 %} {{ rule }} {% endif %} {% endfor %} diff --git a/roles/debian/nginx/templates/cloudwatch-main.json.j2 b/roles/debian/nginx/templates/cloudwatch-main.json.j2 index 8ba152202..4c17bb09b 100644 --- a/roles/debian/nginx/templates/cloudwatch-main.json.j2 +++ b/roles/debian/nginx/templates/cloudwatch-main.json.j2 @@ -5,7 +5,7 @@ "collect_list": [ { "file_path": "/var/log/nginx/access.log", -{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length %} +{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length > 0 %} "log_group_name": "{{ nginx.log_group_prefix }}nginx-access", {% else %} "log_group_name": "nginx-access", @@ -14,7 +14,7 @@ }, { "file_path": "/var/log/nginx/error.log", -{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length %} +{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length > 0 %} "log_group_name": "{{ nginx.log_group_prefix }}nginx-error", {% else %} "log_group_name": "nginx-error", diff --git a/roles/debian/nginx/templates/cloudwatch-vhost.json.j2 b/roles/debian/nginx/templates/cloudwatch-vhost.json.j2 index 285252767..a278f674d 100644 --- a/roles/debian/nginx/templates/cloudwatch-vhost.json.j2 +++ b/roles/debian/nginx/templates/cloudwatch-vhost.json.j2 @@ -5,12 +5,12 @@ "collect_list": [ { "file_path": "{{ domain.access_log }}", -{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length %} +{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length > 0 %} "log_group_name": "{{ nginx.log_group_prefix }}nginx-access", {% else %} "log_group_name": "nginx-access", {% endif %} -{% if domain.log_stream_name is defined and domain.log_stream_name|length %} +{% if domain.log_stream_name is defined and domain.log_stream_name|length > 0 %} "log_stream_name": "{{ domain.log_stream_name }}" {% else %} "log_stream_name": "{{ nginx.log_stream_name }}" @@ -18,12 +18,12 @@ }, { "file_path": "{{ domain.error_log }}", -{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length %} +{% if nginx.log_group_prefix is defined and nginx.log_group_prefix|length > 0 %} "log_group_name": "{{ nginx.log_group_prefix }}nginx-error", {% else %} "log_group_name": "nginx-error", {% endif %} -{% if domain.log_stream_name is defined and domain.log_stream_name|length %} +{% if domain.log_stream_name is defined and domain.log_stream_name|length > 0 %} "log_stream_name": "{{ domain.log_stream_name }}" {% else %} "log_stream_name": "{{ nginx.log_stream_name }}" diff --git a/roles/debian/openvpn/templates/headless-openvpn-install.sh.j2 b/roles/debian/openvpn/templates/headless-openvpn-install.sh.j2 index 2d078a5f9..0b36d94d8 100644 --- a/roles/debian/openvpn/templates/headless-openvpn-install.sh.j2 +++ b/roles/debian/openvpn/templates/headless-openvpn-install.sh.j2 @@ -13,12 +13,12 @@ export COMPRESSION_CHOICE={{ openvpn.compression_choice }} export CUSTOMIZE_ENC=n export CLIENT={{ openvpn.test_username }} export PASS=1 -{% if openvpn.nat_endpoint is defined and openvpn.nat_endpoint | length %} +{% if openvpn.nat_endpoint is defined and openvpn.nat_endpoint | length > 0 %} export ENDPOINT={{ openvpn.nat_endpoint }} {% endif %} {% if openvpn.dns | int == 13 %} export DNS1={{ openvpn.dns1 }} -{% if openvpn.dns2 is defined and openvpn.dns2 | length %} +{% if openvpn.dns2 is defined and openvpn.dns2 | length > 0 %} export DNS2={{ openvpn.dns2 }} {% endif %} {% endif %} diff --git a/roles/debian/pam_ldap/tasks/main.yml b/roles/debian/pam_ldap/tasks/main.yml index 10e432be4..fbcf2c26c 100644 --- a/roles/debian/pam_ldap/tasks/main.yml +++ b/roles/debian/pam_ldap/tasks/main.yml @@ -14,14 +14,14 @@ ansible.builtin.file: path: /etc/ldap/ssl state: directory - when: pam_ldap.ssl_certificate is defined and pam_ldap.ssl_certificate | length > 0 + when: pam_ldap.ssl_certificate|length > 0 - name: Copy certificate. ansible.builtin.copy: src: "{{ pam_ldap.ssl_certificate }}" dest: "/etc/ldap/ssl/{{ pam_ldap.ssl_certificate | basename }}" mode: "0666" - when: pam_ldap.ssl_certificate is defined and pam_ldap.ssl_certificate | length > 0 + when: pam_ldap.ssl_certificate|length > 0 - name: Copy nslcd config. ansible.builtin.template: @@ -82,33 +82,31 @@ mode: 0555 owner: root -- name: Create LDAP key script passwd file. - ansible.builtin.template: - src: ldap-bindpw.j2 - dest: /etc/ldap/ldap-bindpw - mode: "0600" - owner: root - when: - - ldap_client.binddn is defined and ldap_client.binddn - - ldap_client.bindpw is defined and ldap_client.bindpw - -- name: Create wrapper script for LDAP key script. - ansible.builtin.template: - src: ssh-getkey-ldap-wrapper.sh.j2 - dest: /usr/local/bin/ssh-getkey-ldap-wrapper.sh - mode: "0555" - owner: root - when: - - ldap_client.binddn is defined and ldap_client.binddn - - ldap_client.bindpw is defined and ldap_client.bindpw - -- name: Configure SSH pub key command if there is a binddn set. - ansible.builtin.lineinfile: - path: /etc/ssh/sshd_config - regexp: "AuthorizedKeysCommand " - line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap-wrapper.sh +- name: LDAP password handling. when: - - ldap_client.binddn is defined and ldap_client.binddn + - ldap_client.binddn|length > 0 + - ldap_client.bindpw|length > 0 + block: + - name: Create LDAP key script passwd file. + ansible.builtin.template: + src: ldap-bindpw.j2 + dest: /etc/ldap/ldap-bindpw + mode: "0600" + owner: root + + - name: Create wrapper script for LDAP key script. + ansible.builtin.template: + src: ssh-getkey-ldap-wrapper.sh.j2 + dest: /usr/local/bin/ssh-getkey-ldap-wrapper.sh + mode: "0555" + owner: root + + # We don't support bind DN with no password because if there is no password the necessary script is not created. + - name: Configure SSH pub key command if there is a binddn set. + ansible.builtin.lineinfile: + path: /etc/ssh/sshd_config + regexp: "AuthorizedKeysCommand " + line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap-wrapper.sh - name: Configure SSH pub key command if no binddn set. ansible.builtin.lineinfile: @@ -116,7 +114,7 @@ regexp: "AuthorizedKeysCommand " line: AuthorizedKeysCommand /usr/local/bin/ssh-getkey-ldap when: - - not ldap_client.binddn + - ldap_client.binddn|length == 0 - name: Configure SSH pub key command user. ansible.builtin.lineinfile: diff --git a/roles/debian/php-fpm/templates/cloudwatch-php-fpm-fixedport.json.j2 b/roles/debian/php-fpm/templates/cloudwatch-php-fpm-fixedport.json.j2 index 74523ecdf..e5d5ba9eb 100644 --- a/roles/debian/php-fpm/templates/cloudwatch-php-fpm-fixedport.json.j2 +++ b/roles/debian/php-fpm/templates/cloudwatch-php-fpm-fixedport.json.j2 @@ -5,12 +5,12 @@ "collect_list": [ { "file_path": "/var/log/php{{ php.version[0] }}-fpm.log", -{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length %} +{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length > 0 %} "log_group_name": "{{ php.fpm.log_group_prefix }}php{{ php.version[0] }}", {% else %} "log_group_name": "php", {% endif %} -{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length %} +{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length > 0 %} "log_stream_name": "{{ php.fpm.log_stream_name }}" {% else %} "log_stream_name": "php-fpm" @@ -18,12 +18,12 @@ }, { "file_path": "{{ php.fpm.slowlog_file_directory }}/php{{ php.version[0] }}-fpm.slow.log", -{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length %} +{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length > 0 %} "log_group_name": "{{ php.fpm.log_group_prefix }}php{{ php.version[0] }}", {% else %} "log_group_name": "php", {% endif %} -{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length %} +{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length > 0 %} "log_stream_name": "{{ php.fpm.log_stream_name }}-slowlog" {% else %} "log_stream_name": "php-fpm-slowlog" diff --git a/roles/debian/php-fpm/templates/cloudwatch-php-fpm.json.j2 b/roles/debian/php-fpm/templates/cloudwatch-php-fpm.json.j2 index 19a848bf3..bfb9efab0 100644 --- a/roles/debian/php-fpm/templates/cloudwatch-php-fpm.json.j2 +++ b/roles/debian/php-fpm/templates/cloudwatch-php-fpm.json.j2 @@ -5,12 +5,12 @@ "collect_list": [ { "file_path": "/var/log/php{{ version }}-fpm.log", -{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length %} +{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length > 0 %} "log_group_name": "{{ php.fpm.log_group_prefix }}php{{ version }}", {% else %} "log_group_name": "php", {% endif %} -{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length %} +{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length > 0 %} "log_stream_name": "{{ php.fpm.log_stream_name }}" {% else %} "log_stream_name": "php-fpm" @@ -18,12 +18,12 @@ }, { "file_path": "{{ php.fpm.slowlog_file_directory }}/php{{ version }}-fpm.slow.log", -{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length %} +{% if php.fpm.log_group_prefix is defined and php.fpm.log_group_prefix|length > 0 %} "log_group_name": "{{ php.fpm.log_group_prefix }}php{{ version }}", {% else %} "log_group_name": "php", {% endif %} -{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length %} +{% if php.fpm.log_stream_name is defined and php.fpm.log_stream_name|length > 0 %} "log_stream_name": "{{ php.fpm.log_stream_name }}-slowlog" {% else %} "log_stream_name": "php-fpm-slowlog" diff --git a/roles/debian/postfix/templates/transport.j2 b/roles/debian/postfix/templates/transport.j2 index 098bf5265..1f053c8e2 100644 --- a/roles/debian/postfix/templates/transport.j2 +++ b/roles/debian/postfix/templates/transport.j2 @@ -1,7 +1,7 @@ {{ ansible_hostname }} : {{ ansible_fqdn }} : {% for transport in postfix.transport_maps %} -{% if transport|length %} +{% if transport|length > 0 %} {{ transport }} {% endif %} {% endfor %} diff --git a/roles/debian/python_pip_packages/defaults/main.yml b/roles/debian/python_pip_packages/defaults/main.yml index 67d6d0120..c2e179208 100644 --- a/roles/debian/python_pip_packages/defaults/main.yml +++ b/roles/debian/python_pip_packages/defaults/main.yml @@ -1,9 +1,9 @@ --- python_pip_packages: - # These are usually set in the _init role using _venv_path, _venv_command and _venv_install_username but can be overridden. - #venv_path: /path/to/venv - #venv_command: /usr/bin/python3.11 -m venv - #install_username: deploy # user to become when creating venv + # These are usually set in the _init role but can be overridden here. + venv_path: "{{ _venv_path }}" + venv_command: "{{ _venv_command }}" + install_username: "{{ _venv_install_username }}" packages: [] # - name: pip diff --git a/roles/debian/python_pip_packages/tasks/main.yml b/roles/debian/python_pip_packages/tasks/main.yml index 50c038d25..0bdbcd85b 100644 --- a/roles/debian/python_pip_packages/tasks/main.yml +++ b/roles/debian/python_pip_packages/tasks/main.yml @@ -2,15 +2,15 @@ - name: Install packages. ansible.builtin.pip: name: "{{ item.name }}" - state: "{{ item.state | default(omit) }}" - virtualenv: "{{ python_pip_packages.venv_path | default(_venv_path) }}" - virtualenv_command: "{{ python_pip_packages.venv_command | default(_venv_command) }}" + state: "{{ item.state|default(omit) }}" + virtualenv: "{{ python_pip_packages.venv_path }}" + virtualenv_command: "{{ python_pip_packages.venv_command }}" with_items: "{{ python_pip_packages.packages }}" - name: Ensure venv permissions. ansible.builtin.file: - path: "{{ python_pip_packages.venv_path | default(_venv_path) }}" + path: "{{ python_pip_packages.venv_path }}" state: directory recurse: true - owner: "{{ python_pip_packages.install_username | default(_venv_install_username) }}" - group: "{{ python_pip_packages.install_username | default(_venv_install_username) }}" + owner: "{{ python_pip_packages.install_username }}" + group: "{{ python_pip_packages.install_username }}" diff --git a/roles/debian/ssh_server/templates/sshd_config.j2 b/roles/debian/ssh_server/templates/sshd_config.j2 index 216792bb0..9c832dbd3 100644 --- a/roles/debian/ssh_server/templates/sshd_config.j2 +++ b/roles/debian/ssh_server/templates/sshd_config.j2 @@ -27,7 +27,7 @@ ListenAddress {{ address }} #HostKey /etc/ssh/ssh_host_ecdsa_key #HostKey /etc/ssh/ssh_host_ed25519_key {% for key in sshd.HostKey %} -{% if key|length %} +{% if key|length > 0 %} HostKey {{ key }} {% endif %} {% endfor %} @@ -119,7 +119,7 @@ UsePAM {{ sshd.UsePAM }} AllowAgentForwarding {{ sshd.AllowAgentForwarding }} AllowTcpForwarding {{ sshd.AllowTcpForwarding }} -{% if sshd.AllowGroups|length %} +{% if sshd.AllowGroups|length > 0 %} AllowGroups {{ sshd.AllowGroups }} {% endif %} GatewayPorts {{ sshd.GatewayPorts }} From 3986a6f5e8bd8333c166596fef3c6d770a490577 Mon Sep 17 00:00:00 2001 From: drazenCE <140631110+drazenCE@users.noreply.github.com> Date: Wed, 1 Oct 2025 12:41:35 +0200 Subject: [PATCH 54/61] Adding-cyphers-nginx-template (#2679) --- roles/debian/nginx/defaults/main.yml | 3 +++ roles/debian/nginx/templates/nginx.conf.j2 | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/roles/debian/nginx/defaults/main.yml b/roles/debian/nginx/defaults/main.yml index e79e7fe36..86144f452 100644 --- a/roles/debian/nginx/defaults/main.yml +++ b/roles/debian/nginx/defaults/main.yml @@ -130,6 +130,9 @@ nginx: php_fastcgi_backend: "127.0.0.1:90{{ php.version[-1] | replace('.', '') }}" # for unix socket use "unix:/var/run/php{{ php.version[-1] | replace('.','') }}-fpm.sock" ratelimitingcrawlers: false client_max_body_size: "700M" + ssl_ciphers: [] # Defaults to empty list, you can add ciphers if needed +# - ECDHE-RSA-AES256-GCM-SHA384 +# - ECDHE-RSA-CHACHA20-POLY1305 # drupal_fallback: [] # Default location behavior for nginx # If no custom location behavior is defined, this will be used: diff --git a/roles/debian/nginx/templates/nginx.conf.j2 b/roles/debian/nginx/templates/nginx.conf.j2 index 9b3dbd5f0..212b794a2 100644 --- a/roles/debian/nginx/templates/nginx.conf.j2 +++ b/roles/debian/nginx/templates/nginx.conf.j2 @@ -43,7 +43,9 @@ http { ssl_protocols {{ nginx.http.ssl_protocols }}; # Dropping SSLv3, ref: POODLE ssl_prefer_server_ciphers on; - + {% if nginx.ssl_ciphers is defined and nginx.ssl_ciphers|length > 0 %} + ssl_ciphers {{ nginx.ssl_ciphers | join(':') }}; + {% endif %} ## # Logging Settings ## From dd4c3c9f665c91090963bb5fd0cff7588bf8ca21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matej=20=C5=A0tajduhar?= <30931414+matej5@users.noreply.github.com> Date: Wed, 1 Oct 2025 12:48:04 +0200 Subject: [PATCH 55/61] Fixing-when-statement (#2700) * Fixing-when-statement * Updating-LE-tasks * Adding-from_json-for-systemd-timers * Adding-from_json-for-systemd-timers-2 * Removin-from_json-for-systemd-timers * Updating-pam_ldap-when-statements * Updating-pam_ldap-when-statements-2 * Updated-Backup-validation-role * Updated-trusted-entity-file-name * Updated-event-patterns * Dropped-default-aurora-retention-to-1 * Bug-fixes * Moving-iam-policy * Moving-iam-policy-2 * Updating-tasks * Updating-tasks-2 * Updating-return-value * Updating-file-names * Updating-file-names-2 * Updating-file-names-3 * Updating-file-names-4 * Adding-debug * Adding-debug-2 * Adding-debug-3 * Updating-source-for-iam * Updating-source-for-iam * Removing-handle-zip-for-lambda * Updating-regex-search * Updating-regex-search * Updating-lambda-function-handling * Updating-lambda-function-handling * Updating-lambda-function-handling-2 * Updating-event-bridge-role-arn * Updating-event-bridge-role-arn-2 * Moving-functions-to-gitlab * Updating-event-pattern * Updating-iam-role * Updating-iam-role-2 * Updating-defaults * Removing-files * Adding-LE-vars-for-apache --------- Co-authored-by: Matej Stajduhar Co-authored-by: Greg Harvey --- .../templates/api_get_list_of_ec2.py.j2 | 48 ----- .../templates/default_s3_object.j2 | 1 - .../templates/get_infra_data_from_s3.py.j2 | 0 .../aws_backup_validation/defaults/main.yml | 44 ++++- .../files/trusted_entitites.j2 | 12 -- .../aws/aws_backup_validation/tasks/main.yml | 171 +++++++++++------- .../tasks/testing_resources.yml | 18 +- ...e_testing.j2 => Aurora_restore_testing.j2} | 0 .../templates/EC2_validation.py.j2 | 117 ------------ .../templates/RDS_validation.py.j2 | 97 ---------- .../templates/event_document_policy.json.j2 | 19 ++ .../templates/trusted_entitites.json.j2 | 18 ++ .../templates/validation_report.py.j2 | 138 -------------- .../files/lambda_document_policy.json | 12 ++ roles/aws/aws_iam_role/tasks/main.yml | 9 +- roles/aws/aws_rds/tasks/main.yml | 4 +- roles/debian/apache/defaults/main.yml | 2 + roles/debian/nginx/defaults/main.yml | 2 + roles/debian/ssl/tasks/letsencrypt.yml | 40 +++- roles/debian/ssl/templates/le_cron.sh.j2 | 2 +- 20 files changed, 246 insertions(+), 508 deletions(-) delete mode 100644 roles/aws/aws_admin_tools/templates/api_get_list_of_ec2.py.j2 delete mode 100644 roles/aws/aws_admin_tools/templates/default_s3_object.j2 delete mode 100644 roles/aws/aws_admin_tools/templates/get_infra_data_from_s3.py.j2 delete mode 100644 roles/aws/aws_backup_validation/files/trusted_entitites.j2 rename roles/aws/aws_backup_validation/templates/{AURORA_restore_testing.j2 => Aurora_restore_testing.j2} (100%) delete mode 100644 roles/aws/aws_backup_validation/templates/EC2_validation.py.j2 delete mode 100644 roles/aws/aws_backup_validation/templates/RDS_validation.py.j2 create mode 100644 roles/aws/aws_backup_validation/templates/event_document_policy.json.j2 create mode 100644 roles/aws/aws_backup_validation/templates/trusted_entitites.json.j2 delete mode 100644 roles/aws/aws_backup_validation/templates/validation_report.py.j2 create mode 100644 roles/aws/aws_iam_role/files/lambda_document_policy.json diff --git a/roles/aws/aws_admin_tools/templates/api_get_list_of_ec2.py.j2 b/roles/aws/aws_admin_tools/templates/api_get_list_of_ec2.py.j2 deleted file mode 100644 index a3af97ec6..000000000 --- a/roles/aws/aws_admin_tools/templates/api_get_list_of_ec2.py.j2 +++ /dev/null @@ -1,48 +0,0 @@ -import json -import boto3 - -# Defining Clients -ec2_cli = boto3.client("ec2", region_name="{{ _aws_region }}") - -def lambda_handler(event, context): - print("Gathering instance details.") - - # Describe instances and addresses once - instances_response = ec2_cli.describe_instances() - addresses_response = ec2_cli.describe_addresses() - - # Preprocess EIPs for quick lookup by tag:Name - eip_map = {} - for eip in addresses_response.get('Addresses', []): - name_tag = next((tag['Value'] for tag in eip.get('Tags', []) if tag['Key'] == 'Name'), None) - if name_tag: - eip_map.setdefault(name_tag, {'Public': [], 'Private': []}) - if 'PublicIp' in eip: - eip_map[name_tag]['Public'].append(eip['PublicIp']) - if 'PrivateIpAddress' in eip: - eip_map[name_tag]['Private'].append(eip['PrivateIpAddress']) - - ec2_info_list = [] - - for reservation in instances_response.get("Reservations", []): - for instance in reservation.get("Instances", []): - inst_name = "-" - if "Tags" in instance: - for tag in instance["Tags"]: - if tag["Key"] == "Name": - inst_name = tag["Value"] - break - - ec2_info_list.append({ - "EC2Name": inst_name, - "State": instance.get("State", {}), - "PublicIP": instance.get("PublicIpAddress", "-"), - "PrivateIP": instance.get("PrivateIpAddress", "-"), - "InstanceType": instance.get("InstanceType", "-"), - "EIP": eip_map.get(inst_name, {"Public": [], "Private": []}) - }) - - return { - "statusCode": 200, - "EC2Info": ec2_info_list - } diff --git a/roles/aws/aws_admin_tools/templates/default_s3_object.j2 b/roles/aws/aws_admin_tools/templates/default_s3_object.j2 deleted file mode 100644 index 5dfe8fb2b..000000000 --- a/roles/aws/aws_admin_tools/templates/default_s3_object.j2 +++ /dev/null @@ -1 +0,0 @@ -Hello from S3! diff --git a/roles/aws/aws_admin_tools/templates/get_infra_data_from_s3.py.j2 b/roles/aws/aws_admin_tools/templates/get_infra_data_from_s3.py.j2 deleted file mode 100644 index e69de29bb..000000000 diff --git a/roles/aws/aws_backup_validation/defaults/main.yml b/roles/aws/aws_backup_validation/defaults/main.yml index bbbfa3282..8264ff268 100644 --- a/roles/aws/aws_backup_validation/defaults/main.yml +++ b/roles/aws/aws_backup_validation/defaults/main.yml @@ -8,6 +8,46 @@ aws_backup_validation: runtime: "python3.12" handler: "lambda_handler" resources: - - EC2 - - RDS + - name: ec2_test_instance + git_url: true + type: EC2 + lambda_policy: + - "backup:PutRestoreValidationResult" + - "ssm:GetCommandInvocation" + - "ssm:GetConnectionStatus" + - "ssm:SendCommand" + - "ec2:DescribeInstances" + - name: rds_test_instance + git_url: true + type: RDS + lambda_policy: + - "backup:PutRestoreValidationResult" + - "ssm:GetCommandInvocation" + - "ssm:SendCommand" + - "ec2:DescribeInstances" + - "rds:DescribeDBInstances" + - name: aurora_create_instance + git_url: true + type: Aurora + lambda_policy: + - "lambda:InvokeFunction" + - name: aurora_test_instance + git_url: true + type: Aurora + event_pattern: '{ "source": ["aws.rds"], "detail-type": ["RDS DB Instance Event"], "resources": [{ "prefix": "arn:aws:rds:eu-west-1:{{ _acc_id }}:db:restoretest" }], "detail": { "EventID": ["RDS-EVENT-0005"] } }' + lambda_policy: + - "backup:PutRestoreValidationResult" + - "ec2:DescribeInstances" + - "rds:DescribeDBInstances" + - "rds:DescribeDBClusters" + - "rds:DeleteDBInstance" + - name: validation_report + git_url: true + type: Schedule + schedule: "cron(0 0 ? * MON *)" + lambda_policy: + - "backup:ListRestoreJobs" + - "ses:SendEmail" + - "ec2:DescribeImages" + - "rds:DescribeDbSnapshots" #- EFS diff --git a/roles/aws/aws_backup_validation/files/trusted_entitites.j2 b/roles/aws/aws_backup_validation/files/trusted_entitites.j2 deleted file mode 100644 index fb84ae9de..000000000 --- a/roles/aws/aws_backup_validation/files/trusted_entitites.j2 +++ /dev/null @@ -1,12 +0,0 @@ -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": "lambda.amazonaws.com" - }, - "Action": "sts:AssumeRole" - } - ] -} diff --git a/roles/aws/aws_backup_validation/tasks/main.yml b/roles/aws/aws_backup_validation/tasks/main.yml index b4ab85c2a..4ef77ab35 100644 --- a/roles/aws/aws_backup_validation/tasks/main.yml +++ b/roles/aws/aws_backup_validation/tasks/main.yml @@ -1,36 +1,94 @@ --- -- name: Create a role and attach policies for Lambda backup validation. +- name: Get account ID for ARN. + ansible.builtin.command: >- + aws sts get-caller-identity + --query Account + --output text + register: _acc_id + +- name: Setting previous command output into variable. + ansible.builtin.set_fact: + _acc_id: "{{ _acc_id.stdout | from_json }}" + +- name: Create a role and attach policies for events. + ansible.builtin.include_role: + name: aws/aws_iam_role + vars: + aws_iam_role: + name: "{{ item.name }}_event" + source: "{{ item.name}}" + aws_profile: "{{ _aws_profile }}" + inline_policies: + name: "{{ item.name }}_event" + resource: "arn:aws:lambda:{{ _aws_region }}:{{ _acc_id }}:function:{{ item.name }}" + action: + - "lambda:InvokeFunction" + policy_document: "{{ lookup('template', 'event_document_policy.json.j2') }}" + loop: "{{ aws_backup_validation.resources }}" + loop_control: + extended: true + extended_allitems: false + +- name: Create a role and attach policies for Lambda functions. ansible.builtin.include_role: name: aws/aws_iam_role vars: aws_iam_role: - name: LambdaBackupRestoreRole + name: "{{ item.name}}_lambda" + source: "{{ item.name}}" aws_profile: "{{ _aws_profile }}" managed_policies: - - arn:aws:iam::aws:policy/AmazonEC2FullAccess - - arn:aws:iam::aws:policy/AWSBackupFullAccess - - arn:aws:iam::aws:policy/AmazonRDSFullAccess - arn:aws:iam::aws:policy/CloudWatchLogsFullAccess - - arn:aws:iam::aws:policy/AmazonSSMFullAccess - - arn:aws:iam::aws:policy/AmazonSESFullAccess - policy_document: "{{ lookup('file', 'trusted_entitites.j2') }}" + inline_policies: + name: "{{ item.name }}_lambda" + resource: "*" + action: "{{ item.lambda_policy }}" + policy_document: "{{ lookup('template', 'trusted_entitites.json.j2') }}" + loop: "{{ aws_backup_validation.resources }}" + loop_control: + extended: true + extended_allitems: false + +- name: Get info about newly created restore testing plan. + ansible.builtin.command: > + aws backup list-restore-testing-plans --region {{ _aws_region }} + register: _testing_plans + +- name: Create Lambda functions from templates. + ansible.builtin.include_role: + name: aws/aws_lambda + vars: + aws_lambda: + name: "{{ item.name }}" + description: "Lambda functions for {{ item.type }} validation." + timeout: "{{ aws_backup_validation.timeout }}" + role: "{{ aws_iam_role._result[item.name + '_lambda'] }}" + runtime: "{{ aws_backup_validation.runtime }}" + function_file: "{{ lookup('template', item.name + '.py.j2') }}" + s3_bucket: "{{ aws_backup_validation.s3_bucket }}" + s3_bucket_prefix: "lambda-functions" + tags: + Name: "{{ item.name }}" + loop: "{{ aws_backup_validation.resources }}" + when: item.git_url is not defined -- name: Create backup validation Lambda functions. +- name: Create Lambda functions from git url. ansible.builtin.include_role: name: aws/aws_lambda vars: aws_lambda: - name: "{{ aws_backup_validation.name }}_{{ item }}" - description: "{{ aws_backup_validation.description }}" + name: "{{ item.name }}" + description: "Lambda functions for {{ item.type }} validation." timeout: "{{ aws_backup_validation.timeout }}" - role: "{{ aws_iam_role._result['LambdaBackupRestoreRole'] }}" + role: "{{ aws_iam_role._result[item.name + '_lambda'] }}" runtime: "{{ aws_backup_validation.runtime }}" - function_file: "{{ lookup('template', item + '_validation.py.j2') }}" + function_file: "{{ item.git_url }}" s3_bucket: "{{ aws_backup_validation.s3_bucket }}" s3_bucket_prefix: "lambda-functions" tags: - Name: "{{ item }}_backup_validation" + Name: "{{ item.name }}" loop: "{{ aws_backup_validation.resources }}" + when: item.git_url is defined - name: Create an IAM Managed Policy for passing roles and setup IAM role. ansible.builtin.include_role: @@ -53,79 +111,52 @@ #- name: Get verified domain. # ansible.builtin.include_tasks: get_valid_email.yml -- name: Get info about newly created restore testing plan. - ansible.builtin.command: > - aws backup list-restore-testing-plans --region {{ _aws_region }} - register: _testing_plans - -- name: Create validation report function. - ansible.builtin.include_role: - name: aws/aws_lambda - vars: - aws_lambda: - name: "validation_report" - description: "{{ aws_backup_validation.description }}" - timeout: "30" - role: "{{ aws_iam_role._result['LambdaBackupRestoreRole'] }}" - runtime: "{{ aws_backup_validation.runtime }}" - function_file: "{{ lookup('template', 'validation_report.py.j2') }}" - s3_bucket: "{{ aws_backup_validation.s3_bucket }}" - s3_bucket_prefix: "lambda-functions" - tags: - Name: "validation_report" - -- name: Get account ID for ARN. - ansible.builtin.command: >- - aws sts get-caller-identity - --query Account - --output text - register: _acc_id - -- name: Setting previous command output into variable. - ansible.builtin.set_fact: - _acc_id: "{{ _acc_id.stdout | from_json }}" - -- name: Create EventBridge for validation functions. +- name: Create EventBridge with lambda functions. amazon.aws.cloudwatchevent_rule: - name: "RestoreValidation_{{ item }}" + name: "{{ item.name }}" description: "{{ aws_backup_validation.description }}" state: present region: "{{ _aws_region }}" - event_pattern: '{ "source": ["aws.backup"], "detail-type": ["Restore Job State Change"], "detail": { "resourceType": ["{{ item }}"], "status": ["COMPLETED"] } }' + role_arn: "arn:aws:iam::{{ _acc_id }}:role/{{ item.name }}_event" + event_pattern: >- + {{ item.event_pattern | default( + { + "source": ["aws.backup"], + "detail-type": ["Restore Job State Change"], + "detail": { + "resourceType": [ item.type ], + "status": ["COMPLETED"] + } + } | to_json + ) }} targets: - - id: "RestoreValidation_{{ item }}" - arn: "arn:aws:lambda:{{ _aws_region }}:{{ _acc_id }}:function:RestoreValidation_{{ item }}" + - id: "{{ item.name }}" + arn: "arn:aws:lambda:{{ _aws_region }}:{{ _acc_id }}:function:{{ item.name }}" loop: "{{ aws_backup_validation.resources }}" + when: item.type != "Schedule" register: _event_bridges - name: Create schedule for validation reports. amazon.aws.cloudwatchevent_rule: - name: validation_report - schedule_expression: "cron(0 0 ? * MON *)" - description: Run validation reporting + name: "{{ item.name }}" + schedule_expression: "{{ item.schedule }}" + description: "Run validation reporting." region: "{{ _aws_region }}" + role_arn: "arn:aws:iam::{{ _acc_id }}:role/{{ item.name }}_event" targets: - id: validation_report - arn: "{{ (aws_lambda._result['validation_report'].configuration.function_arn.split(':') | map('trim'))[:-1] | join(':') }}" # Remove the version number from ARN + arn: "arn:aws:lambda:{{ _aws_region }}:{{ _acc_id }}:function:{{ item.name }}" + loop: "{{ aws_backup_validation.resources }}" + when: item.type == "Schedule" register: _validation_event -- name: Update Lambda policy. +- name: Update Lambda policies. amazon.aws.lambda_policy: state: present - function_name: "{{ item.rule.name }}" - statement_id: "{{ item.rule.name }}" + function_name: "{{ item.name }}" + statement_id: "{{ item.name }}" action: lambda:InvokeFunction principal: events.amazonaws.com - source_arn: "{{ item.rule.arn }}" - region: "{{ _aws_region }}" - loop: "{{ _event_bridges.results }}" - -- name: Update lambda validation report policy. - amazon.aws.lambda_policy: - state: present - function_name: "validation_report" - statement_id: "validation_report" - action: lambda:InvokeFunction - principal: events.amazonaws.com - source_arn: "{{ _validation_event.rule.arn }}" + source_arn: "arn:aws:events:{{ _aws_region }}:{{ _acc_id }}:rule/{{ item.name }}" region: "{{ _aws_region }}" + loop: "{{ aws_backup_validation.resources }}" diff --git a/roles/aws/aws_backup_validation/tasks/testing_resources.yml b/roles/aws/aws_backup_validation/tasks/testing_resources.yml index 3e996ec22..d95ae4295 100644 --- a/roles/aws/aws_backup_validation/tasks/testing_resources.yml +++ b/roles/aws/aws_backup_validation/tasks/testing_resources.yml @@ -68,34 +68,26 @@ instance: "EC2" file-system: "EFS" db: "RDS" - cluster: "AURORA" + cluster: "Aurora" - name: Set instance type for template. ansible.builtin.set_fact: _instance_type_restore: "{{ instance_type[backup.resource_type] }}" - _template_prefix: "{{ instance_type[backup.resource_type] }}" when: backup.resource_type != 'file-system' -- name: Set instance type to Aurora if defined. - ansible.builtin.set_fact: - _instance_type_restore: "Aurora" - when: - - backup.resource_type == 'db' - - "'aurora' in aws_rds.engine" - - name: Create restore testing query file. ansible.builtin.template: - src: "{{ _template_prefix }}_restore_testing.j2" + src: "{{ _instance_type_restore }}_restore_testing.j2" dest: /tmp/restore_testing.json register: _restore_testing_query - when: _template_prefix is defined + when: _instance_type_restore is defined - name: Check if protected reource exist. ansible.builtin.command: > aws backup list-protected-resources --query "Results[?ResourceArn=='{{ _resource_arn }}']" --region {{ _aws_region }} register: _protected_res -- name: Assign {{ _template_prefix }} resource to AWS restore testing plan. +- name: Assign {{ _instance_type_restore }} resource to AWS restore testing plan. ansible.builtin.command: > aws backup create-restore-testing-selection --cli-input-json file:///tmp/restore_testing.json --region {{ _aws_region }} - when: _template_prefix is defined and _testing_plan_info.stdout != "null" and _testing_selection_exists.stdout | length == 0 and _protected_res.stdout | length != 0 + when: _instance_type_restore is defined and _testing_plan_info.stdout != "null" and _testing_selection_exists.stdout | length == 0 and _protected_res.stdout | length != 0 diff --git a/roles/aws/aws_backup_validation/templates/AURORA_restore_testing.j2 b/roles/aws/aws_backup_validation/templates/Aurora_restore_testing.j2 similarity index 100% rename from roles/aws/aws_backup_validation/templates/AURORA_restore_testing.j2 rename to roles/aws/aws_backup_validation/templates/Aurora_restore_testing.j2 diff --git a/roles/aws/aws_backup_validation/templates/EC2_validation.py.j2 b/roles/aws/aws_backup_validation/templates/EC2_validation.py.j2 deleted file mode 100644 index 70b0963b7..000000000 --- a/roles/aws/aws_backup_validation/templates/EC2_validation.py.j2 +++ /dev/null @@ -1,117 +0,0 @@ -import json -import boto3 -import socket -import time - -# Defining Clients -#s3_cli = boto3.client('s3', region_name='eu-west-2') -backup_cli = boto3.client('backup', region_name="{{ _aws_region }}") -ec2_cli = boto3.client("ec2", region_name="{{ _aws_region }}") -ssm_cli = boto3.client('ssm', region_name="{{ _aws_region }}") -ses_cli = boto3.client('ses', region_name="{{ _aws_region }}") - -# Debugger -#boto3.set_stream_logger('') - -def lambda_handler(event, context): - - mail_body = "" - - print("Gathering instance id.") - ec2_instance_id=event['detail']['createdResourceArn'].split("/",1)[1] - - print("Gathering instance details.") - ec2_instances=ec2_cli.describe_instances() - - instance_exist = False - - for reservation in ec2_instances["Reservations"]: - for instance in reservation["Instances"]: - if ec2_instance_id == instance["InstanceId"]: - tags = instance['Tags'] - instance_type = instance["InstanceType"] - private_ip = instance["PrivateIpAddress"] - mail_body = mail_body + "Instance is restored!\n" - instance_exist = True - - if instance_exist: - port = 22 - - print("Gathering instance name.") - instance_name = '' - for tag in tags: - if tag['Key'] == 'Name': - instance_name = tag['Value'] - - print("Testing connection!") - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.settimeout(5.0) - result = sock.connect_ex((private_ip,port)) - - print(result) - - # If result was not success - if result != 0: - mail_body = mail_body + "Connection on " + str(port) + " is not working, this could be caused by firewall not accepting connections.\n" - else: - mail_body = mail_body + "Connection on " + str(port) + " is working!\n" - - # Check if SSM is set up for instance - ssm_status = ssm_cli.get_connection_status(Target=ec2_instance_id) - - if ssm_status['Status'] == 'connected': - # Run scripts on EC2 - print("Creating script!") - script = """ - echo "Server info:" - hostnamectl - echo "Disk usage:" - df -h - """ - - print("Running command!") - command_response = ssm_cli.send_command( - DocumentName ='AWS-RunShellScript', - Parameters = {'commands': [script]}, - InstanceIds = [ - ec2_instance_id - ] - ) - - print("Gathering commands details!") - time.sleep(10) - c_res = ssm_cli.get_command_invocation( - CommandId=command_response['Command']['CommandId'], - InstanceId=ec2_instance_id - ) - - print(c_res['StandardOutputContent']) - instance_message = "Instance " + instance_name + " - " + ec2_instance_id + " was restored.\n" - mail_body = mail_body + "SSM is working and these are the details of the instance:\n" + c_res['StandardOutputContent'] - - else: - mail_body = mail_body + "SSM is not configured or accessible!\n" - - print("Validating Restore job!") - backup_cli.put_restore_validation_result( - RestoreJobId=event['detail']['restoreJobId'], - ValidationStatus="SUCCESSFUL", - ValidationStatusMessage="" - ) - - else: - print("Validating Restore job!") - backup_cli.put_restore_validation_result( - RestoreJobId=event['detail']['restoreJobId'], - ValidationStatus="FAILED", - ValidationStatusMessage="" - ) - - mail_body = mail_body + "Instance " + ec2_instance_id + " is not running!" - - print(instance_message + mail_body) - - return { - 'statusCode': 200, - 'body': event - } diff --git a/roles/aws/aws_backup_validation/templates/RDS_validation.py.j2 b/roles/aws/aws_backup_validation/templates/RDS_validation.py.j2 deleted file mode 100644 index a5c2daab3..000000000 --- a/roles/aws/aws_backup_validation/templates/RDS_validation.py.j2 +++ /dev/null @@ -1,97 +0,0 @@ -import json -import boto3 -import socket -import time - -# Defining Clients -#s3_cli = boto3.client('s3', region_name='eu-west-2') -backup_cli = boto3.client('backup', region_name="{{ _aws_region }}") -ec2_cli = boto3.client("ec2", region_name="{{ _aws_region }}") -rds_cli = boto3.client("rds", region_name="{{ _aws_region }}") -ssm_cli = boto3.client('ssm', region_name="{{ _aws_region }}") -ses_cli = boto3.client('ses', region_name="{{ _aws_region }}") - -# Debugger -#boto3.set_stream_logger('') - -def lambda_handler(event, context): - - mail_body = "" - - print("Gathering instance id.") - rds_instance_id=event['detail']['createdResourceArn'].split(":")[-1] - - print(rds_instance_id) - - print("Gathering instance details.") - rds_instance=rds_cli.describe_db_instances( - DBInstanceIdentifier=rds_instance_id - ) - - print("Get instance name with access to RDS") - for tag in rds_instance['DBInstances'][0]['TagList']: - if tag['Key']=='Name': - ec2_with_access=tag['Value'] - - new_endpoint = rds_instance['DBInstances'][0]['Endpoint']['Address'] - - print(ec2_with_access) - - print("Get all instances") - ec2_instances=ec2_cli.describe_instances() - - instance_exist = False - - print("Iterate to get id of EC2") - for reservation in ec2_instances["Reservations"]: - for instance in reservation["Instances"]: - for tag in instance['Tags']: - if ((tag['Key'] == "Name") and (tag['Value'] == ec2_with_access)): - print(tag['Key'] + " - " + tag['Value'] + " - " + instance["InstanceId"]) - ec2_instance_id = instance["InstanceId"] - - print(ec2_instance_id) - - # Run scripts on EC2 - print("Creating script!") - script = """ - old_host={{ '$' }}(grep host /home/deploy/.mysql.creds | awk -F= '{{ '{{' }}print $2 {{ '}}' }}') - sed 's,old_host,{new_host},g' /home/deploy/.mysql.creds >> /home/deploy/.mysql.creds.tmp - mysql --defaults-file=/home/deploy/.mysql.creds.tmp -e "SELECT table_schema 'DB Name', ROUND(SUM(data_length + index_length) / 1024 / 1024, 1) 'DB Size in MB' FROM information_schema.tables GROUP BY table_schema;" - rm /home/deploy/.mysql.creds.tmp - """.format(new_host=new_endpoint) - - print("Running command!") - command_response = ssm_cli.send_command( - DocumentName ='AWS-RunShellScript', - Parameters = {'commands': [script]}, - InstanceIds = [ - ec2_instance_id - ] - ) - - print("Getting command output.") - time.sleep(30) - c_res = ssm_cli.get_command_invocation( - CommandId=command_response['Command']['CommandId'], - InstanceId=ec2_instance_id - ) - - print(c_res['StandardOutputContent']) - instance_message = "RDS instance " + ec2_with_access + " was restored.\n" - conn_message = "Able to get information from " + new_endpoint + ":\n" - mail_body=instance_message + conn_message + c_res['StandardOutputContent'] - - print("Validating Restore job!") - backup_cli.put_restore_validation_result( - RestoreJobId=event['detail']['restoreJobId'], - ValidationStatus="SUCCESSFUL", - ValidationStatusMessage="" - ) - - print(mail_body) - - return { - 'statusCode': 200, - 'body': json.dumps(event) - } diff --git a/roles/aws/aws_backup_validation/templates/event_document_policy.json.j2 b/roles/aws/aws_backup_validation/templates/event_document_policy.json.j2 new file mode 100644 index 000000000..f76d3d804 --- /dev/null +++ b/roles/aws/aws_backup_validation/templates/event_document_policy.json.j2 @@ -0,0 +1,19 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "TrustEventBridgeService", + "Effect": "Allow", + "Principal": { + "Service": "events.amazonaws.com" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "aws:SourceArn": "arn:aws:events:{{ _aws_region }}:{{ _acc_id }}:rule/{{ aws_iam_role.source }}", + "aws:SourceAccount": "{{ _acc_id }}" + } + } + } + ] +} diff --git a/roles/aws/aws_backup_validation/templates/trusted_entitites.json.j2 b/roles/aws/aws_backup_validation/templates/trusted_entitites.json.j2 new file mode 100644 index 000000000..d6c4d434a --- /dev/null +++ b/roles/aws/aws_backup_validation/templates/trusted_entitites.json.j2 @@ -0,0 +1,18 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "aws:SourceArn": "arn:aws:lambda:{{ _aws_region }}:{{ _acc_id }}:function:{{ aws_iam_role.source }}", + "aws:SourceAccount": "{{ _acc_id }}" + } + } + } + ] +} diff --git a/roles/aws/aws_backup_validation/templates/validation_report.py.j2 b/roles/aws/aws_backup_validation/templates/validation_report.py.j2 deleted file mode 100644 index 7501db7ca..000000000 --- a/roles/aws/aws_backup_validation/templates/validation_report.py.j2 +++ /dev/null @@ -1,138 +0,0 @@ -import json -import boto3 -import socket -import time -import datetime - -# Defining Clients -backup_cli = boto3.client('backup', region_name="{{ _aws_region }}") -ses_cli = boto3.client('ses', region_name="{{ _aws_region }}") -ec2_cli = boto3.client('ec2', region_name="{{ _aws_region }}") -rds_cli = boto3.client('rds', region_name="{{ _aws_region }}") - -mail_title = "" -mail_body = "" - -# Debugger -#boto3.set_stream_logger('') - -def set_mail_body(success_restore, inst, instance_name, ami_id): - global mail_body - if success_restore: - mail_body+=instance_name + " was restored successfully from " + ami_id + ", " - else: - mail_body+=instance_name + " failed to restore from " + ami_id + ", " - - if inst['ValidationStatus'] == "SUCCESSFUL": - mail_body+= "and validation was successful! \n" - elif inst['ValidationStatus'] == "FAILED": - failed_validation=True - mail_body+= "but validation has failed with following message: \n" - mail_body+= inst['ValidationStatusMessage'] - elif inst['ValidationStatus'] == "TIMED_OUT": - failed_validation=True - mail_body+= "but validation timed out. \n" - else: - failed_validation=True - mail_body+="with unknown validation failure! \n" - -def lambda_handler(event, context): - instance_name = "" - success_restore = False - failed_validation = False - global mail_body - last_restore_valdation_date = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime('%Y-%m-%d') - completed_jobs=[] - failed_jobs=[] - - print("Getting list of successful restoration.") - {% for plan in _testing_plans.stdout | from_json | json_query('RestoreTestingPlans') %} -completed_job = backup_cli.list_restore_jobs( - ByCreatedAfter=last_restore_valdation_date, - ByStatus='COMPLETED', - ByRestoreTestingPlanArn='{{ plan['RestoreTestingPlanArn'] }}' - ) - completed_jobs+=completed_job['RestoreJobs'] - {% endfor %} - - print("Getting instance details.") - for inst in completed_jobs: - success_restore = True - print("Getting instance arn.") - ami_arn = inst['RecoveryPointArn'].split(':')[2] - - if ami_arn == 'ec2': - ami_id = inst['RecoveryPointArn'].split('/')[-1] - instance_details = ec2_cli.describe_images( - ImageIds=[ami_id] - ) - for tag in instance_details['Images'][0]['Tags']: - if tag['Key'] == 'Name': - instance_name = tag['Value'] - - elif ami_arn == 'rds': - ami_id = inst['RecoveryPointArn'].split('snapshot:')[-1] - instance_details = rds_cli.describe_db_snapshots( - DBSnapshotIdentifier=ami_id - ) - print(instance_details) - for tag in instance_details['DBSnapshots'][0]['TagList']: - if tag['Key'] == 'Name': - instance_name = tag['Value'] - - else: - ami_id = 'Probably EFS' - instance_name = 'latest snapshot' - - set_mail_body(success_restore, inst, instance_name, ami_id) - - print(mail_body) - print("Getting list of failed restoration.") - {% for plan in _testing_plans.stdout | from_json | json_query('RestoreTestingPlans') %} -failed_job = backup_cli.list_restore_jobs( - ByCreatedAfter=last_restore_valdation_date, - ByStatus='FAILED', - ByRestoreTestingPlanArn='{{ plan['RestoreTestingPlanArn'] }}' - ) - failed_jobs += failed_job['RestoreJobs'] - {% endfor %} - - if len(failed_jobs) > 0: - mail_title = "🔴 Failed!" - else: - mail_title = "🟢 Success!" - print("Successful restore jobs:") - print(completed_jobs) - - print("Failed restore jobs:") - print(failed_jobs) - - print("Sending email!") - response = ses_cli.send_email( - Destination={ - 'BccAddresses': [ - ], - 'CcAddresses': [], - 'ToAddresses': [ - 'sysadm@codeenigma.com' - ], - }, - Message={ - 'Body': { - 'Text': { - 'Charset': 'UTF-8', - 'Data': mail_body, - }, - }, - 'Subject': { - 'Charset': 'UTF-8', - 'Data': 'Restore testing - {{ _infra_name }}: ' + mail_title, - }, - }, - Source='Lambda Backup Validation ', - ) - - return { - 'statusCode': 200, - 'body': event - } diff --git a/roles/aws/aws_iam_role/files/lambda_document_policy.json b/roles/aws/aws_iam_role/files/lambda_document_policy.json new file mode 100644 index 000000000..fd267525d --- /dev/null +++ b/roles/aws/aws_iam_role/files/lambda_document_policy.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "lambda.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/roles/aws/aws_iam_role/tasks/main.yml b/roles/aws/aws_iam_role/tasks/main.yml index fe75c1c50..c42d5296a 100644 --- a/roles/aws/aws_iam_role/tasks/main.yml +++ b/roles/aws/aws_iam_role/tasks/main.yml @@ -1,4 +1,4 @@ -- name: Create an IAM Managed Policy if defined. +- name: Create an inline IAM Managed Policy if defined. amazon.aws.iam_managed_policy: policy_name: "inline_{{ aws_iam_role.inline_policies.name }}_policy" policy: @@ -23,19 +23,19 @@ - name: Create list of strings for predefined policies. ansible.builtin.set_fact: - allowed_strings: ["ec2", "ecs", "backup"] + allowed_strings: ["ec2", "ecs", "backup", "event"] - name: Create assume role policy document if predefined string is passed. ansible.builtin.set_fact: _assume_role_policy: "{{ lookup('file', aws_iam_role.policy_document + '_document_policy.json') }}" - when: aws_iam_role.policy_document in allowed_strings + when: aws_iam_role.policy_document in allowed_strings and aws_iam_role.policy_document != "event" - name: Create assume role policy document if template is provided. ansible.builtin.set_fact: _assume_role_policy: "{{ aws_iam_role.policy_document }}" when: aws_iam_role.policy_document not in allowed_strings -- name: Create an IAM role. +- name: Create an IAM role {{ aws_iam_role.name }}. amazon.aws.iam_role: profile: "{{ aws_iam_role.aws_profile }}" name: "{{ aws_iam_role.name }}" @@ -50,6 +50,7 @@ - name: Wait for 6 seconds for IAM to be ready. ansible.builtin.wait_for: timeout: 6 + when: ansible_loop.last is defined and ansible_loop.last - name: Register aws_iam_role results. ansible.builtin.set_fact: diff --git a/roles/aws/aws_rds/tasks/main.yml b/roles/aws/aws_rds/tasks/main.yml index 602f13439..b75866e33 100644 --- a/roles/aws/aws_rds/tasks/main.yml +++ b/roles/aws/aws_rds/tasks/main.yml @@ -45,7 +45,7 @@ password: "{{ aws_rds.master_user_password }}" db_subnet_group_name: "{{ aws_rds.name }}" vpc_security_group_ids: "{{ _aws_security_group_list }}" - backup_retention_period: "{{ aws_rds.backup_retention_period | default(35) }}" + backup_retention_period: "{{ aws_rds.backup_retention_period | default(1) }}" character_set_name: "{{ aws_rds.character_set_name | default(omit) }}" tags: "{{ aws_rds.tags | combine({'Name': aws_rds.name}) }}" when: "'aurora' in aws_rds.engine" @@ -157,7 +157,7 @@ - "{{ _aws_sns_topic_info.sns_arn }}" when: - _aws_sns_topic_info is defined - - _aws_sns_topic_info.sns_arn + - _aws_sns_topic_info.sns_arn | length > 0 - name: Create RDS CloudWatch alarms. ansible.builtin.include_role: diff --git a/roles/debian/apache/defaults/main.yml b/roles/debian/apache/defaults/main.yml index c76ddc0b0..4b7779eff 100644 --- a/roles/debian/apache/defaults/main.yml +++ b/roles/debian/apache/defaults/main.yml @@ -4,6 +4,8 @@ php: - 8.1 # see https://www.php.net/supported-versions.php symfony_env: "{{ _env_type }}" apache: + # le_email: sysadm@codeenigma.com Globally defined email for Let's Encrypt notifications + # le_cron: Globally defined cron for LE renewals # Global default config for apache2.conf. user: www-data mods_enabled: diff --git a/roles/debian/nginx/defaults/main.yml b/roles/debian/nginx/defaults/main.yml index 86144f452..274656933 100644 --- a/roles/debian/nginx/defaults/main.yml +++ b/roles/debian/nginx/defaults/main.yml @@ -8,6 +8,8 @@ symfony_env: "{{ _env_type }}" # Nginx variables actually start here. nginx: # Global default config for nginx.conf. + # le_email: sysadm@codeenigma.com Globally defined email for Let's Encrypt notifications + # le_cron: Globally defined cron for LE renewals user: www-data worker_processes: auto events: diff --git a/roles/debian/ssl/tasks/letsencrypt.yml b/roles/debian/ssl/tasks/letsencrypt.yml index 74350945a..9c3eb69d5 100644 --- a/roles/debian/ssl/tasks/letsencrypt.yml +++ b/roles/debian/ssl/tasks/letsencrypt.yml @@ -35,6 +35,36 @@ when: - ssl.letsencrypt.venv_install_username is defined +- name: Set LE email if defined globally for nginx. + ansible.builtin.set_fact: + _le_email: "{{ nginx.le_email }}" + when: nginx.le_email is defined + +- name: Set LE email if defined globally for apache. + ansible.builtin.set_fact: + _le_email: "{{ apache.le_email }}" + when: apache.le_email is defined + +- name: Set LE email if defined per item. + ansible.builtin.set_fact: + _le_email: "{{ ssl.email }}" + when: ssl.email is defined + +- name: Set LE cron if defined globally for nginx. + ansible.builtin.set_fact: + _le_cron: "{{ nginx.le_cron }}" + when: nginx.le_cron is defined + +- name: Set LE cron if defined globally for apache. + ansible.builtin.set_fact: + _le_cron: "{{ apache.le_cron }}" + when: apache.le_cron is defined + +- name: Set LE cron if defined per item. + ansible.builtin.set_fact: + _le_cron: "{{ ssl.on_calendar }}" + when: ssl.on_calendar is defined + # Install Python applications. - name: Manage required pip packages. ansible.builtin.include_role: @@ -101,13 +131,13 @@ when: not _letsencrypt_cert.stat.exists - name: Register certificate bypassing web server if needed - standalone. - ansible.builtin.command: "{{ _venv_path }}/bin/certbot {{ ssl.certbot_register_command }} --cert-name {{ ssl_facts[_ssl_domains[0]].domain }} --http-01-port {{ ssl.http_01_port }} -m {{ ssl.email }} --{{ _ssl_web_server }}{{ _letsencrypt_domain_string }}" + ansible.builtin.command: "{{ _venv_path }}/bin/certbot {{ ssl.certbot_register_command }} --cert-name {{ ssl_facts[_ssl_domains[0]].domain }} --http-01-port {{ ssl.http_01_port }} -m {{ _le_email }} --{{ _ssl_web_server }}{{ _letsencrypt_domain_string }}" when: - not _letsencrypt_cert.stat.exists - ssl.web_server == "standalone" - name: Register certificate bypassing web server if needed - webroot. - ansible.builtin.command: "{{ _venv_path }}/bin/certbot certonly --webroot -w {{ domain.webroot }} --cert-name {{ ssl_facts[_ssl_domains[0]].domain }} {{ _letsencrypt_domain_string }}" + ansible.builtin.command: "{{ _venv_path }}/bin/certbot certonly --webroot -w /tmp --cert-name {{ ssl_facts[_ssl_domains[0]].domain }} {{ _letsencrypt_domain_string }} -m {{ _le_email }} --non-interactive" when: - not _letsencrypt_cert.stat.exists - ssl.web_server == "webroot" @@ -134,7 +164,11 @@ - name: Build timer variables with dynamic key. ansible.builtin.set_fact: - _certbot_renewal_timer: "{'certbot_renewal':{'timer_command':'/usr/local/bin/le_cron.sh','timer_OnCalendar':'{{ ssl.on_calendar }}','timer_persistent':'true'}}" + _certbot_renewal_timer: + certbot_renewal: + timer_command: "/usr/local/bin/le_cron.sh" + timer_OnCalendar: "{{ _le_cron }}" + timer_persistent: true when: ssl.autorenew - name: Create systemd timer for certificate renewal. diff --git a/roles/debian/ssl/templates/le_cron.sh.j2 b/roles/debian/ssl/templates/le_cron.sh.j2 index 31148628a..404524aad 100644 --- a/roles/debian/ssl/templates/le_cron.sh.j2 +++ b/roles/debian/ssl/templates/le_cron.sh.j2 @@ -35,7 +35,7 @@ certbot_exit_code=$? if [ $certbot_exit_code -ne 0 ]; then # Certbot failed, send email notification - recipient="{{ ssl.email }}" + recipient="{{ _le_email }}" subject="Certbot Renewal Failed" hostname=$(cat /etc/hostname) body="Certbot renewal failed on server $hostname with the following output: From c5446f9800cc8e51323c0da6e6ce908b751b8f07 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 14:03:37 +0200 Subject: [PATCH 56/61] Trying to fix AWS standalone builds. --- roles/aws/aws_ec2_with_eip/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/aws/aws_ec2_with_eip/tasks/main.yml b/roles/aws/aws_ec2_with_eip/tasks/main.yml index 7f13d4d1f..d03ddcc5c 100644 --- a/roles/aws/aws_ec2_with_eip/tasks/main.yml +++ b/roles/aws/aws_ec2_with_eip/tasks/main.yml @@ -15,7 +15,7 @@ ansible.builtin.set_fact: _aws_hostname: "{{ item }}" with_inventory_hostnames: - - "{{ aws_ec2_with_eip.hostname }}" + - "_{{ aws_ec2_with_eip.hostname|regex_replace('-', '_') }}" # Subnet ID is stored in ce-provision's data directory - name: Ensure server data directory exists. @@ -133,7 +133,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname | length == 0) or (_aws_hostname == aws_ec2_with_eip.hostname) or aws_ec2_with_eip.force + when: (_aws_hostname|length == 0) or aws_ec2_with_eip.force # This task deliberately omits `image_id` so it cannot create a new instance, only refresh the state of an existing one. - name: Refresh EC2 instance. @@ -159,7 +159,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname | length > 0) or (_aws_hostname != aws_ec2_with_eip.hostname) or not aws_ec2_with_eip.force + when: (_aws_hostname|length > 0) or not aws_ec2_with_eip.force - name: Check if we have an existing EIP. amazon.aws.ec2_eip_info: From 7cb9bae977bdc08c327199f97957380bd5714ebe Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 14:15:31 +0200 Subject: [PATCH 57/61] Bug fixes pr 2.x (#2714) * Fixing installer variable bug. * Fixing tests for external PRs. * Testing with a fork. * Adding repo owner's username into installer string. * Refactoring config repo detection to simplify. * No longer permitted to use an integer as a truthy value. * No longer permitted to use existence check as a truthy value. * Can't see a reason why linotp var shouldn't be a boolean. * No longer permitted to use existence check as a truthy value. * Fixing truthy errors in ce_deploy role. * No longer permitted to use an integer as a truthy value. * Updating clamav command to use flock avoiding duplicate processes running. * More truthy length fixes. * Fixing more LDAP role truthy issues. * Slight block refactor for LDAP. * DN length check should not be negated. * Forgot to add the length filter. * Another boolean Ansible 12 error in AMI role. * ALB port must be cast as a string for RedirectAction. * Setting the correct Jinja filter, it's string, not str. * Fixing more Ansible 12 length issues in autoscale role. * Simplifying ASG role by refactoring into blocks. * Further simplifying ASG CloudFront block. * Scaling rules refactor needs work. * Scaling policies list needs to be defined in case it is empty and we try to concatenate. * Enhancing installer to accept an Ansible version and putting Ansible 12 back into GitHub Actions containers. * Trying a different approach to defaulting the venv username. * Removing default() filter from python_pip_packages role. * Fixing up the ce_ansible role for Ansible 12. * Removing unnecessary from_json filter from CloudFront acc ID lookup. * Trying to fix AWS standalone builds. --- roles/aws/aws_ec2_with_eip/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/aws/aws_ec2_with_eip/tasks/main.yml b/roles/aws/aws_ec2_with_eip/tasks/main.yml index 7f13d4d1f..d03ddcc5c 100644 --- a/roles/aws/aws_ec2_with_eip/tasks/main.yml +++ b/roles/aws/aws_ec2_with_eip/tasks/main.yml @@ -15,7 +15,7 @@ ansible.builtin.set_fact: _aws_hostname: "{{ item }}" with_inventory_hostnames: - - "{{ aws_ec2_with_eip.hostname }}" + - "_{{ aws_ec2_with_eip.hostname|regex_replace('-', '_') }}" # Subnet ID is stored in ce-provision's data directory - name: Ensure server data directory exists. @@ -133,7 +133,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname | length == 0) or (_aws_hostname == aws_ec2_with_eip.hostname) or aws_ec2_with_eip.force + when: (_aws_hostname|length == 0) or aws_ec2_with_eip.force # This task deliberately omits `image_id` so it cannot create a new instance, only refresh the state of an existing one. - name: Refresh EC2 instance. @@ -159,7 +159,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname | length > 0) or (_aws_hostname != aws_ec2_with_eip.hostname) or not aws_ec2_with_eip.force + when: (_aws_hostname|length > 0) or not aws_ec2_with_eip.force - name: Check if we have an existing EIP. amazon.aws.ec2_eip_info: From f62404c586fc54eab3c0e3f34de22055da135950 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 15:33:29 +0200 Subject: [PATCH 58/61] Fixing standalone EC2 playbooks. --- plays/aws_ec2_standalone/ec2.yml | 2 +- plays/aws_ec2_standalone/launch.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plays/aws_ec2_standalone/ec2.yml b/plays/aws_ec2_standalone/ec2.yml index e64a9c7c7..bab077c83 100644 --- a/plays/aws_ec2_standalone/ec2.yml +++ b/plays/aws_ec2_standalone/ec2.yml @@ -1,6 +1,6 @@ --- # First step. Spin up a "blank" instance and add the controller user and Ansible via user-data. -- hosts: "{{ _aws_resource_name }}" +- hosts: "_{{ _aws_resource_name | regex_replace('-', '_') }}" connection: local become: false diff --git a/plays/aws_ec2_standalone/launch.yml b/plays/aws_ec2_standalone/launch.yml index 5f207ca44..cb1ed7373 100644 --- a/plays/aws_ec2_standalone/launch.yml +++ b/plays/aws_ec2_standalone/launch.yml @@ -25,7 +25,7 @@ - "_{{ _aws_resource_name | regex_replace('-', '_') }}" - name: If an Ansible host is not found, create it so we can execute EC2 orchestration. ansible.builtin.add_host: - name: "{{ _aws_resource_name }}" + name: "_{{ _aws_resource_name | regex_replace('-', '_') }}" groups: "_new_servers" when: _aws_hostname | length == 0 - ansible.builtin.import_role: From 2d885f00313a65db1480ba9b996503bfab47db3e Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 15:43:56 +0200 Subject: [PATCH 59/61] Adding hostname print out for debug. --- roles/aws/aws_ec2_with_eip/tasks/main.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/roles/aws/aws_ec2_with_eip/tasks/main.yml b/roles/aws/aws_ec2_with_eip/tasks/main.yml index d03ddcc5c..5a50c15b0 100644 --- a/roles/aws/aws_ec2_with_eip/tasks/main.yml +++ b/roles/aws/aws_ec2_with_eip/tasks/main.yml @@ -17,6 +17,10 @@ with_inventory_hostnames: - "_{{ aws_ec2_with_eip.hostname|regex_replace('-', '_') }}" +- name: Check the hostname. + ansible.builtin.debug: + msg: "Ansible hostname set to: {{ _aws_hostname }}" + # Subnet ID is stored in ce-provision's data directory - name: Ensure server data directory exists. ansible.builtin.file: From fecad193fa871216fd9c48a438eb2c9e83dc03bd Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 16:11:02 +0200 Subject: [PATCH 60/61] Adding back in the hostname check. --- roles/aws/aws_ec2_with_eip/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/aws/aws_ec2_with_eip/tasks/main.yml b/roles/aws/aws_ec2_with_eip/tasks/main.yml index 5a50c15b0..224d4b5c6 100644 --- a/roles/aws/aws_ec2_with_eip/tasks/main.yml +++ b/roles/aws/aws_ec2_with_eip/tasks/main.yml @@ -137,7 +137,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname|length == 0) or aws_ec2_with_eip.force + when: (_aws_hostname|length == 0) or (_aws_hostname == aws_ec2_with_eip.hostname|regex_replace('-', '_')) or aws_ec2_with_eip.force # This task deliberately omits `image_id` so it cannot create a new instance, only refresh the state of an existing one. - name: Refresh EC2 instance. @@ -163,7 +163,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname|length > 0) or not aws_ec2_with_eip.force + when: (_aws_hostname|length > 0) or (_aws_hostname != aws_ec2_with_eip.hostname|regex_replace('-', '_')) or not aws_ec2_with_eip.force - name: Check if we have an existing EIP. amazon.aws.ec2_eip_info: From 7f7afdaf81bc5bd6f8593ccab3e9eb14e094c367 Mon Sep 17 00:00:00 2001 From: Greg Harvey Date: Wed, 1 Oct 2025 16:17:53 +0200 Subject: [PATCH 61/61] Fixing AWS hostname variable in comparisons. --- roles/aws/aws_ec2_with_eip/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/aws/aws_ec2_with_eip/tasks/main.yml b/roles/aws/aws_ec2_with_eip/tasks/main.yml index 224d4b5c6..6ac8dee5a 100644 --- a/roles/aws/aws_ec2_with_eip/tasks/main.yml +++ b/roles/aws/aws_ec2_with_eip/tasks/main.yml @@ -137,7 +137,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname|length == 0) or (_aws_hostname == aws_ec2_with_eip.hostname|regex_replace('-', '_')) or aws_ec2_with_eip.force + when: (_aws_hostname|length == 0) or (_aws_hostname == '_' + aws_ec2_with_eip.hostname|regex_replace('-', '_')) or aws_ec2_with_eip.force # This task deliberately omits `image_id` so it cannot create a new instance, only refresh the state of an existing one. - name: Refresh EC2 instance. @@ -163,7 +163,7 @@ volume_type: "{{ aws_ec2_with_eip.root_volume_type }}" encrypted: "{{ aws_ec2_with_eip.root_volume_encrypted }}" register: _aws_ec2_with_eip_instances - when: (_aws_hostname|length > 0) or (_aws_hostname != aws_ec2_with_eip.hostname|regex_replace('-', '_')) or not aws_ec2_with_eip.force + when: (_aws_hostname|length > 0) or (_aws_hostname != '_' + aws_ec2_with_eip.hostname|regex_replace('-', '_')) or not aws_ec2_with_eip.force - name: Check if we have an existing EIP. amazon.aws.ec2_eip_info: