From de97a90412ac057a84595fc50bebba92750cfbeb Mon Sep 17 00:00:00 2001 From: Amadeuds Podvratnik Date: Fri, 17 Feb 2023 16:14:04 +0100 Subject: [PATCH] feat: Add NAT-based networking support besides macvtap (#77) This code change adds NAT support. In case of KVM NAT network mode and an installation from remote (e.g. from your laptop). In case of NAT the IP of the bastion and cluster nodes will use 192.168.x.x adr. which are usually not accessible from remote. To access those addresses an SSH tunnel is needed where usually the KVM host acts as a jumphost. These new features are adding new variables to the all.yaml to specify the network mode (NAT) and the required variables for the jumphost. If network mode set to NAT, a section will be added to the SSH config file containing the bastion node and the matching jumphost. In addition, many small other fixes and updates related to enabling NAT- based networking are included. Please read through these changes, as they most likely will cause errors if you leave the new variables undefined, etc. --------- Signed-off-by: Amadeus Podvratnik Signed-off-by: Jacob Emery Co-authored-by: Klaus Smolin <88041391+smolin-de@users.noreply.github.com> Co-authored-by: Jacob Emery --- docs/prerequisites.md | 13 ++++- docs/set-variables-group-vars.md | 14 ++++- .../default/group_vars/all.yaml.template | 18 +++++- playbooks/0_setup.yaml | 2 +- playbooks/3_setup_kvm_host.yaml | 54 +++++++++++++++--- playbooks/5_setup_bastion.yaml | 57 ++++++++++++++++++- roles/create_bastion/tasks/main.yaml | 7 +-- .../templates/bastion-ks.cfg.j2 | 3 +- roles/create_bootstrap/tasks/main.yaml | 6 +- roles/create_compute_nodes/tasks/main.yaml | 32 +++++------ roles/create_control_nodes/tasks/main.yaml | 36 ++++++------ roles/dns/tasks/initial-resolv.yaml | 24 ++++++++ roles/dns/tasks/main.yaml | 25 ++++---- roles/dns/templates/initial-resolv.conf.j2 | 2 + roles/dns/templates/resolv.conf.j2 | 2 +- roles/set_inventory/tasks/main.yaml | 8 ++- roles/set_inventory/templates/hosts.j2 | 5 ++ roles/ssh_add_config/tasks/main.yaml | 19 +++++++ roles/ssh_copy_id/tasks/main.yaml | 53 ++++++++++++----- .../ssh_copy_id/templates/ssh-copy-id.exp.j2 | 2 +- .../wait_for_install_complete/tasks/main.yaml | 23 +++++--- 21 files changed, 304 insertions(+), 101 deletions(-) create mode 100644 roles/dns/tasks/initial-resolv.yaml create mode 100644 roles/dns/templates/initial-resolv.conf.j2 create mode 100644 roles/ssh_add_config/tasks/main.yaml diff --git a/docs/prerequisites.md b/docs/prerequisites.md index 6ea1f961..b58e4241 100644 --- a/docs/prerequisites.md +++ b/docs/prerequisites.md @@ -55,7 +55,16 @@ ansible-galaxy collection install ibm.ibm_zhmc ``` /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" ``` - * and [Xcode](https://apps.apple.com/us/app/xcode/id497799835?mt=12): + * [Xcode](https://apps.apple.com/us/app/xcode/id497799835?mt=12): ``` xcode-select --install -``` \ No newline at end of file +``` +## Jumphost for NAT network +* If for KVM network NAT is used, instead of macvtap, a ssh tunnel using a jumphost is required to access the OCP cluster. To configure the ssh tunnel expect is required on the jumphost. Expect will be installed during the setup of the bastion (4_setup_bastion.yaml playbook). In case of missing access to install additional packages, install it manually on the jumphost by executing following command: +``` +yum install expect +``` +In addition make sure that python3 is installed on the jumphost otherwise ansible might fail to run the tasks. You can install python3 manually by executing the following command: +``` +yum install python3 +``` diff --git a/docs/set-variables-group-vars.md b/docs/set-variables-group-vars.md index 9a43c650..a5bf4ecd 100644 --- a/docs/set-variables-group-vars.md +++ b/docs/set-variables-group-vars.md @@ -16,6 +16,7 @@ **Variable Name** | **Description** | **Example** :--- | :--- | :--- **env.z.high_availability** | Is this cluster spread across three LPARs? If yes, mark True. If not (just in
one LPAR), mark False | True +**env.z.ip_forward** | This variable specifies if ip forwarding is enabled or not if NAT network is selected. If ip_forwarding is set to 0, the installed OCP cluster will not be able to access external services. This setting will be configured during 3_setup_kvm playbook. If NAT will be configured after 3_setup_kvm playbook, the setup needs to be done manually before bastion is being created, configured or reconfigured by running the 3_setup_kvm playbook with parameter: --tags cfg_ip_forward | 1 **env.z.lpar1.create** | To have Ansible create an LPAR and install RHEL on it for the KVM
host, mark True. If using a pre-existing LPAR with RHEL already
installed, mark False. | True **env.z.lpar1.hostname** | The hostname of the KVM host. | kvm-host-01 **env.z.lpar1.ip** | The IPv4 address of the KVM host. | 192.168.10.1 @@ -59,12 +60,13 @@ **env.bastion.resources.vcpu** | How many virtual CPUs would you like to allocate to the bastion? Recommended 4 or more. | 4 **env.bastion.networking.ip** | IPv4 address for the bastion. | 192.168.10.3 **env.bastion.networking.hostname** | Hostname of the bastion. Will be combined with
env.bastion.networking.base_domain to create a Fully Qualified Domain Name (FQDN). | ocpz-bastion +**env.bastion.networking.base_
domain** | Base domain that, when combined with the hostname, creates a fully-qualified
domain name (FQDN) for the bastion? | ihost.com **env.bastion.networking.
subnetmask** | Subnet of the bastion. | 255.255.255.0 **env.bastion.networking.gateway** | IPv4 of he bastion's gateway server. | 192.168.10.0 **env.bastion.networking.name
server1** | IPv4 address of the server that resolves the bastion's hostname. | 192.168.10.200 **env.bastion.networking.name
server2** | (Optional) A second IPv4 address that resolves the bastion's hostname. | 192.168.10.201 +**env.bastion.networking.forwarder** | What IPv4 address will be used to make external DNS calls for the bastion? Can use 1.1.1.1 or 8.8.8.8 as defaults. | 8.8.8.8 **env.bastion.networking.interface** | Name of the networking interface on the bastion from Linux's perspective. Most likely enc1. | enc1 -**env.bastion.networking.base_
domain** | Base domain that, when combined with the hostname, creates a fully-qualified
domain name (FQDN) for the bastion? | ihost.com **env.bastion.access.user** | What would you like the admin's username to be on the bastion?
If root, make pass and root_pass vars the same. | admin **env.bastion.access.pass** | The password to the bastion's admin user. If using root, make
pass and root_pass vars the same. | cH4ngeM3! **env.bastion.access.root_pass** | The root password for the bastion. If using root, make
pass and root_pass vars the same. | R0OtPa$s! @@ -80,7 +82,7 @@ **env.cluster.networking.base_domain** | The site name, where is the cluster being hosted? This will be combined with the metadata_name
and hostnames to create FQDNs. | ihost.com **env.cluster.networking.nameserver1** | IPv4 address that the cluster get its hostname resolution from. If env.bastion.options.dns
is True, this should be the IP address of the bastion. | 192.168.10.200 **env.cluster.networking.nameserver2** | (Optional) A second IPv4 address will the cluster get its hostname resolution from? If env.bastion.options.dns
is True, this should be left commented out. | 192.168.10.201 -**env.cluster.networking.forwarder** | What IPv4 address will be used to make external DNS calls? Can use 1.1.1.1 or 8.8.8.8 as defaults. | 8.8.8.8 +**env.cluster.networking.forwarder** | What IPv4 address will be used to make external DNS calls for the cluster? Can use 1.1.1.1 or 8.8.8.8 as defaults. | 8.8.8.8 ## 7 - Bootstrap Node **Variable Name** | **Description** | **Example** @@ -159,4 +161,10 @@ **env.timezone** | Which timezone would you like Red Hat Enterprise Linux to use? A list of available timezone
options can be found [here](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones). | America/New_York **env.ansible_key_name** | (Optional) Name of the SSH key that Ansible will use to connect to hosts. | ansible-ocpz **env.ocp_key_name** | Comment to describe the SSH key used for OCP. Arbitrary value. | OCPZ-01 key -**env.bridge_name** | (Optional) Name of the macvtap bridge that will be created on the KVM host. | macvtap-net +**env.bridge_name** | (Optional) Name of the macvtap bridge that will be created on the KVM host or in case of NAT the name of the NAT network defenition (usually it is 'default'). If NAT is being used and a jumphost is needed, the parameters network_mode, jumphost.name, jumphost.user and jumphost.pass must be specified, too. In case of default (NAT) network verify that the configured IP ranges does not interfere with the IPs defined for the controle and compute nodes. Modify the default network (dhcp range setting) to prevent issues with VMs using dhcp and OCP nodes having fixed IPs.| macvtap-net +**env.network_mode** | (Optional) In case the network mode will be NAT and the installation will be executed from remote (e.g. your laptop), a jumphost needs to be defined to let the installation access the bastion host. If macvtap for networking is being used this variable should be empty. | NAT +**env.jumphost.name** | (Optional) If env.network.mode is set to 'NAT' the name of the jumphost (e.g. the name of KVM host if used as jumphost) should be specified. | kvm-host-01 +**env.jumphost.ip** | (Optional) The ip of the jumphost. | 192.168.10.1 +**env.jumphost.user** | (Optional) The user name to login to the jumphost. | admin +**env.jumphost.pass** | (Optional) The password for user to login to the jumphost. | ch4ngeMe! +**env.jumphost.path_to_keypair** | (Optional) The absolute path to the public key file on the jumphost to be copied to the bastion. | /home/admin/.ssh/id_rsa.pub diff --git a/inventories/default/group_vars/all.yaml.template b/inventories/default/group_vars/all.yaml.template index 2ce69286..00a0f6af 100644 --- a/inventories/default/group_vars/all.yaml.template +++ b/inventories/default/group_vars/all.yaml.template @@ -11,6 +11,7 @@ env: # Section 2 - LPAR(s) z: high_availability: False + ip_forward: #X lpar1: create: True hostname: #X @@ -57,12 +58,13 @@ env: networking: ip: #X hostname: #X + base_domain: #X subnetmask: #X gateway: #X nameserver1: #X # nameserver2: + forwarder: 1.1.1.1 interface: #X - base_domain: #X access: user: #X pass: #X @@ -79,6 +81,8 @@ env: networking: metadata_name: #X base_domain: #X + subnetmask: #X + gateway: #X nameserver1: #X # nameserver2: forwarder: 1.1.1.1 @@ -148,7 +152,7 @@ env: # Section 11 - (Optional) Packages pkgs: galaxy: [ ibm.ibm_zhmc, community.general, community.crypto, ansible.posix, community.libvirt ] - controller: [ openssh, expect ] + controller: [ openssh, expect, sshuttle ] kvm: [ libguestfs, libvirt-client, libvirt-daemon-config-network, libvirt-daemon-kvm, cockpit-machines, libvirt-devel, virt-top, qemu-kvm, python3-lxml, cockpit, lvm2 ] bastion: [ haproxy, httpd, bind, bind-utils, expect, firewalld, mod_ssl, python3-policycoreutils, rsync ] @@ -183,4 +187,12 @@ env: ansible_key_name: ansible-ocpz ocp_ssh_key_comment: OpenShift key bridge_name: macvtap - + network_mode: + +#jumphost if network mode is NAT + jumphost: + name: + ip: + user: + pass: + path_to_keypair: diff --git a/playbooks/0_setup.yaml b/playbooks/0_setup.yaml index 659b9387..b57662a2 100644 --- a/playbooks/0_setup.yaml +++ b/playbooks/0_setup.yaml @@ -59,7 +59,7 @@ - "{{ inventory_dir }}/group_vars/all.yaml" vars: packages: "{{ env.pkgs.controller }}" - ssh_target: [ "{{ env.ftp.ip }}", "{{ env.ftp.user }}", "{{ env.ftp.pass }}" ] + ssh_target: [ "{{ env.ftp.ip }}", "{{ env.ftp.user }}", "{{ env.ftp.pass }}", "{{ path_to_key_pair }}" ] roles: - install_packages - ssh_key_gen diff --git a/playbooks/3_setup_kvm_host.yaml b/playbooks/3_setup_kvm_host.yaml index 488b16b8..46257b94 100644 --- a/playbooks/3_setup_kvm_host.yaml +++ b/playbooks/3_setup_kvm_host.yaml @@ -9,7 +9,7 @@ vars_files: - "{{ inventory_dir }}/group_vars/all.yaml" vars: - ssh_target: ["{{ env.z.lpar1.ip }}","{{ env.z.lpar1.user }}","{{ env.z.lpar1.pass }}"] + ssh_target: ["{{ env.z.lpar1.ip }}","{{ env.z.lpar1.user }}","{{ env.z.lpar1.pass }}","{{ path_to_key_pair }}"] tasks: - name: Include vars for the KVM host. include_vars: @@ -27,7 +27,7 @@ vars_files: - "{{ inventory_dir }}/group_vars/all.yaml" vars: - ssh_target: ["{{ env.z.lpar2.ip }}","{{ env.z.lpar2.user }}","{{ env.z.lpar2.pass }}"] + ssh_target: ["{{ env.z.lpar2.ip }}","{{ env.z.lpar2.user }}","{{ env.z.lpar2.pass }}","{{ path_to_key_pair }}"] tasks: - name: Include vars for second KVM host. include_vars: @@ -48,7 +48,7 @@ vars_files: - "{{ inventory_dir }}/group_vars/all.yaml" vars: - ssh_target: ["{{ env.z.lpar3.ip }}","{{ env.z.lpar3.user }}","{{ env.z.lpar3.pass }}"] + ssh_target: ["{{ env.z.lpar3.ip }}","{{ env.z.lpar3.user }}","{{ env.z.lpar3.pass }}","{{ path_to_key_pair }}"] tasks: - name: Include vars for third KVM host. include_vars: @@ -67,23 +67,59 @@ gather_facts: true become: true vars: - packages: "{{ env.pkgs.kvm}}" + packages: "{{ env.pkgs.kvm }}" roles: - { role: attach_subscription, when: env.redhat.username is defined and env.redhat.password is defined } - install_packages + - httpd post_tasks: - - name: Enable cockpit console - command: systemctl enable --now cockpit.socket - + - name: Add ports to firewall + tags: firewall-libvirt + ansible.posix.firewalld: + port: 80/tcp + permanent: yes + state: enabled + - name: Start and enable libvirt - service: + tags: firewall-libvirt + ansible.builtin.service: name: libvirtd enabled: yes state: started + - name: Permit traffic in libvirt zone + tags: firewall-libvirt + ansible.posix.firewalld: + service: http + permanent: yes + state: enabled + zone: libvirt + immediate: true + + - name: Enable cockpit console + ansible.builtin.command: systemctl enable --now cockpit.socket + +- name: Configure ip_forward in case of NAT + hosts: kvm_host + tags: cfg_ip_forward, section_3 + gather_facts: true + become: true + vars_files: + - "{{ inventory_dir }}/group_vars/all.yaml" + tasks: + - name: Configure ip_forward in case of network "NAT" + tags: cfg_ip_forward + ansible.posix.sysctl: + name: net.ipv4.ip_forward + value: "{{ env.z.ip_forward }}" + sysctl_set: true + state: present + reload: true + when: env.network_mode | upper == 'NAT' + - hosts: kvm_host tags: setup, section_3 become: true roles: - configure_storage - - macvtap \ No newline at end of file + - { role: macvtap, when: env.network_mode | upper != 'NAT' } diff --git a/playbooks/5_setup_bastion.yaml b/playbooks/5_setup_bastion.yaml index 78db013b..41bbd4f4 100644 --- a/playbooks/5_setup_bastion.yaml +++ b/playbooks/5_setup_bastion.yaml @@ -1,6 +1,6 @@ --- -- name: 5 setup bastion - copy SSH key to access bastion +- name: Copy ssh key to jumphost if network is NAT and jumphost defined, and add jumphost section to ssh config. hosts: localhost tags: ssh, ssh_copy_id, section_1 connection: local @@ -9,7 +9,54 @@ vars_files: - "{{ inventory_dir }}/group_vars/all.yaml" vars: - ssh_target: ["{{ env.bastion.networking.ip }}", "{{ env.bastion.access.user }}", "{{ env.bastion.access.pass }}"] + ssh_target: ["{{ env.jumphost.ip }}", "{{ env.jumphost.user }}", "{{ env.jumphost.pass }}", "{{ path_to_key_pair }}"] + roles: + - { role: ssh_copy_id, tags: ssh_copy_id, ssh, when: (env.network_mode | upper == "NAT") and ( env.jumphost.ip is not none ) } + - { role: ssh_add_config, tags: ssh_copy_id, ssh, when: (env.network_mode | upper == "NAT") and ( env.jumphost.ip is not none ) } + +- name: Configure jumphost if network mode == 'NAT' + hosts: jumphost + tags: ssh, ssh_copy_id, section_1 + become: false + gather_facts: true + vars_files: + - "{{ inventory_dir }}/group_vars/all.yaml" + vars: + ssh_target: ["{{ env.bastion.networking.ip }}", "{{ env.bastion.access.user }}", "{{ env.bastion.access.pass }}","{{ env.jumphost.path_to_keypair }}"] + pre_tasks: + - name: Generate an OpenSSH keypair with the default values (4096 bits, RSA), if using jumphost for NAT. + become: false + tags: ssh_key_gen, ssh, section_1 + community.crypto.openssh_keypair: + path: "{{ env.jumphost.path_to_keypair.split('.')[:-1] | join('.') }}" + passphrase: "" + regenerate: never + when: (env.network_mode | upper == "NAT") and ( env.jumphost.ip is not none ) + - block: + - name: Check if 'expect' is installed on jumphost, for use in ssh-copy-id role for NAT. + package_facts: + failed_when: "'expect' not in ansible_facts.packages" + when: (env.network_mode | upper == "NAT") and ( env.jumphost.ip is not none ) + rescue: + - name: Package 'expect' must be installed on the jumphost, attempting to install it. #Using 'block' and 'rescue' to avoid running the 'package' module (which requires 'sudo') unless necessary. + become: true + package: + name: expect + when: (env.network_mode | upper == "NAT") and ( env.jumphost.ip is not none ) + roles: + - { role: ssh_copy_id, ssh, when: (env.network_mode | upper == "NAT") and ( env.jumphost.ip is not none ) } + post_tasks: + - meta: clear_facts + +- name: 5 setup bastion - copy SSH key from localhost to access bastion. + hosts: localhost + tags: ssh, ssh_copy_id, section_1 + become: false + gather_facts: true + vars_files: + - "{{ inventory_dir }}/group_vars/all.yaml" + vars: + ssh_target: ["{{ env.bastion.networking.ip }}", "{{ env.bastion.access.user }}", "{{ env.bastion.access.pass }}","{{ path_to_key_pair }}"] roles: - ssh_copy_id @@ -21,6 +68,10 @@ packages: "{{ env.pkgs.bastion }}" vars_files: - "{{ inventory_dir }}/group_vars/all.yaml" + pre_tasks: + - import_role: + name: dns + tasks_from: initial-resolv.yaml roles: - { role: attach_subscription, when: env.redhat.username is defined and env.redhat.password is defined } - install_packages @@ -85,6 +136,7 @@ loop: - issued - private + when: env.z.high_availability == True - name: Copy certificates and keys from controller to KVM hosts. tags: openvpn @@ -112,6 +164,7 @@ file: state: absent path: tmp + when: env.z.high_availability == True - hosts: bastion tags: get_ocp, section_3 diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 5b1a5134..29e9a7bb 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -53,7 +53,7 @@ ansible.builtin.command: pwd register: kvm_host_home -- name: Boot and kickstart bastion (up to 3 min). To monitor, login to your KVM host and run 'virsh console ' +- name: Boot and kickstart bastion. To monitor, login to your KVM host and run 'virsh console ' tags: create_bastion, virt-install ansible.builtin.shell: | set -o pipefail @@ -72,8 +72,3 @@ --initrd-inject "/{{ kvm_host_home.stdout }}/{{ env.ftp.cfgs_dir }}/{{ env.bastion.networking.hostname }}/bastion-ks.cfg" \ --extra-args "inst.ks=file:/bastion-ks.cfg ip={{ env.bastion.networking.ip }}::{{ env.bastion.networking.gateway }}\ :{{ env.bastion.networking.subnetmask }}:{{ env.bastion.networking.hostname }}:enc1:none console=ttysclp0" - -- name: Waiting 1 minute for automated bastion installation and configuration to complete - tags: create_bastion, virt-install - ansible.builtin.pause: - minutes: 1 diff --git a/roles/create_bastion/templates/bastion-ks.cfg.j2 b/roles/create_bastion/templates/bastion-ks.cfg.j2 index 726e22de..88dd797c 100644 --- a/roles/create_bastion/templates/bastion-ks.cfg.j2 +++ b/roles/create_bastion/templates/bastion-ks.cfg.j2 @@ -34,7 +34,7 @@ eula --agreed # Network information network --bootproto=static --device={{ env.bastion.networking.interface }} --ip={{ env.bastion.networking.ip }} --gateway={{ env.bastion.networking.gateway }} --netmask={{ env.bastion.networking.subnetmask }} --noipv6 --nameserver={{ env.bastion.networking.nameserver1 }}{{ (',' + env.bastion.networking.nameserver2) if env.bastion.networking.nameserver2 is defined else '' }} --activate -network --hostname={{ env.bastion.networking.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} +network --hostname={{ env.bastion.networking.hostname }}.{{ env.cluster.networking.base_domain }} # Firewall and SELinux firewall --enabled --http --ftp --smtp --ssh --port=443,9090 @@ -74,6 +74,7 @@ python3-pip rsync vim wget +network-scripts %end %addon com_redhat_kdump --disable diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 4fb30009..9e8fc5bf 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -18,7 +18,7 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-{{ env.openshift.version }}-{{ env.install_config.control.architecture }},initrd=rhcos-live-initramfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img \ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda \ coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img \ - ip={{ env.cluster.nodes.bootstrap.ip }}::{{ networking.gateway }}:{{ networking.subnetmask }}:{{ env.cluster.nodes.bootstrap.hostname }}::none:1500 \ + ip={{ env.cluster.nodes.bootstrap.ip }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.bootstrap.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}::none:1500 \ nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} \ coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/bootstrap.ign" \ --graphics none \ @@ -28,9 +28,9 @@ - name: Set bootstrap qcow2 permissions become: true tags: create_bootstrap - command: chmod 600 /var/lib/libvirt/images/{{env.cluster.nodes.bootstrap.vm_name}}.qcow2 + command: chmod 600 /var/lib/libvirt/images/{{ env.cluster.nodes.bootstrap.vm_name }}.qcow2 - name: Set bootstrap qcow2 ownership to qemu become: true tags: create_bootstrap - command: chown qemu:qemu /var/lib/libvirt/images/{{env.cluster.nodes.bootstrap.vm_name}}.qcow2 + command: chown qemu:qemu /var/lib/libvirt/images/{{ env.cluster.nodes.bootstrap.vm_name }}.qcow2 diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index e389749c..1d86b398 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -7,12 +7,12 @@ --name {{ env.cluster.nodes.compute.vm_name[i] }} \ --autostart \ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.compute.disk_size }} \ - --ram {{env.cluster.nodes.compute.ram}} \ + --ram {{ env.cluster.nodes.compute.ram }} \ --cpu host \ - --vcpus {{env.cluster.nodes.compute.vcpu}} \ - --network network={{env.bridge_name}} \ + --vcpus {{ env.cluster.nodes.compute.vcpu }} \ + --network network={{ env.bridge_name }} \ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-{{ env.openshift.version }}-{{ env.install_config.control.architecture }},initrd=rhcos-live-initramfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.bastion.networking.ip}}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{env.cluster.nodes.compute.ip[i]}}::{{networking.gateway}}:{{networking.subnetmask}}:{{env.cluster.nodes.compute.hostname[i]}}::none:1500 nameserver={{env.cluster.networking.nameserver1}} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{env.bastion.networking.ip}}:8080/ignition/worker.ign" \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{ env.cluster.nodes.compute.ip[i] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.compute.hostname[i] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/worker.ign" \ --wait=-1 \ --noautoconsole with_sequence: start=0 end={{ ( env.cluster.nodes.compute.hostname | length ) - 1 }} stride=1 @@ -28,12 +28,12 @@ --name {{ env.cluster.nodes.infra.vm_name[i] }} \ --autostart \ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.infra.disk_size }} \ - --ram {{env.cluster.nodes.infra.ram}} \ + --ram {{ env.cluster.nodes.infra.ram }} \ --cpu host \ - --vcpus {{env.cluster.nodes.infra.vcpu}} \ - --network network={{env.bridge_name}} \ + --vcpus {{ env.cluster.nodes.infra.vcpu }} \ + --network network={{ env.bridge_name }} \ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-{{ env.openshift.version }}-{{ env.install_config.control.architecture }},initrd=rhcos-live-initramfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.bastion.networking.ip}}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{env.cluster.nodes.infra.ip[i]}}::{{networking.gateway}}:{{networking.subnetmask}}:{{env.cluster.nodes.infra.hostname[i]}}::none:1500 nameserver={{env.cluster.networking.nameserver1}} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{env.bastion.networking.ip}}:8080/ignition/worker.ign" \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{ env.cluster.nodes.infra.ip[i] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.infra.hostname[i] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/worker.ign" \ --wait=-1 \ --noautoconsole with_sequence: start=0 end={{ ( env.cluster.nodes.infra.hostname | length ) - 1}} stride=1 @@ -67,12 +67,12 @@ --name {{ compute_name[i] }} \ --autostart \ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.compute.disk_size }} \ - --ram {{env.cluster.nodes.compute.ram}} \ + --ram {{ env.cluster.nodes.compute.ram }} \ --cpu host \ - --vcpus {{env.cluster.nodes.compute.vcpu}} \ - --network network={{env.bridge_name}} \ + --vcpus {{ env.cluster.nodes.compute.vcpu }} \ + --network network={{ env.bridge_name }} \ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-{{ env.openshift.version }}-{{ env.install_config.control.architecture }},initrd=rhcos-live-initramfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.bastion.networking.ip}}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{compute_ip[i]}}::{{networking.gateway}}:{{networking.subnetmask}}:{{compute_hostname[i]}}::none:1500 nameserver={{env.cluster.networking.nameserver1}} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{env.bastion.networking.ip}}:8080/ignition/worker.ign" \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{ compute_ip[i] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ compute_hostname[i] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/worker.ign" \ --wait=-1 \ --noautoconsole loop: "{{ compute_name | zip(compute_hostname, compute_ip) | list }}" @@ -88,12 +88,12 @@ --name {{ infra_name[i] }} \ --autostart \ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.infra.disk_size }} \ - --ram {{env.cluster.nodes.infra.ram}} \ + --ram {{ env.cluster.nodes.infra.ram }} \ --cpu host \ - --vcpus {{env.cluster.nodes.infra.vcpu}} \ - --network network={{env.bridge_name}} \ + --vcpus {{ env.cluster.nodes.infra.vcpu }} \ + --network network={{ env.bridge_name }} \ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-{{ env.openshift.version }}-{{ env.install_config.control.architecture }},initrd=rhcos-live-initramfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.bastion.networking.ip}}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{infra_ip[i]}}::{{networking.gateway}}:{{networking.subnetmask}}:{{infra_hostname[i]}}::none:1500 nameserver={{env.cluster.networking.nameserver1}} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{env.bastion.networking.ip}}:8080/ignition/worker.ign" \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{ infra_ip[i] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ infra_hostname[i] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/worker.ign" \ --wait=-1 \ --noautoconsole loop: "{{ infra_name | zip(infra_hostname, infra_ip) | list }}" diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 1ff99869..cf04fc52 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -7,16 +7,16 @@ --name {{ env.cluster.nodes.control.vm_name[i] }} \ --autostart \ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.control.disk_size }} \ - --ram {{env.cluster.nodes.control.ram}} \ + --ram {{ env.cluster.nodes.control.ram }} \ --cpu host \ - --vcpus {{env.cluster.nodes.control.vcpu}} \ - --network network={{env.bridge_name}} \ + --vcpus {{ env.cluster.nodes.control.vcpu }} \ + --network network={{ env.bridge_name }} \ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-{{ env.openshift.version }}-{{ env.install_config.control.architecture }},initrd=rhcos-live-initramfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.bastion.networking.ip}}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{env.cluster.nodes.control.ip[i]}}::{{networking.gateway}}:{{networking.subnetmask}}:{{env.cluster.nodes.control.hostname[i]}}::none:1500 nameserver={{env.cluster.networking.nameserver1}} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{env.bastion.networking.ip}}:8080/ignition/master.ign" \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{ env.cluster.nodes.control.ip[i] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.control.hostname[i] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/master.ign" \ --graphics none \ --wait=-1 \ --noautoconsole - with_sequence: start=0 end={{(env.cluster.nodes.control.hostname | length) - 1}} stride=1 + with_sequence: start=0 end={{ (env.cluster.nodes.control.hostname | length) - 1 }} stride=1 loop_control: extended: yes index_var: i @@ -29,12 +29,12 @@ --name {{ env.cluster.nodes.control.vm_name[0] }} \ --autostart \ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.control.disk_size }} \ - --ram {{env.cluster.nodes.control.ram}} \ + --ram {{ env.cluster.nodes.control.ram }} \ --cpu host \ - --vcpus {{env.cluster.nodes.control.vcpu}} \ - --network network={{env.bridge_name}} \ + --vcpus {{ env.cluster.nodes.control.vcpu }} \ + --network network={{ env.bridge_name }} \ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-{{ env.openshift.version }}-{{ env.install_config.control.architecture }},initrd=rhcos-live-initramfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.bastion.networking.ip}}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{env.cluster.nodes.control.ip[0]}}::{{networking.gateway}}:{{networking.subnetmask}}:{{env.cluster.nodes.control.hostname[0]}}::none:1500 nameserver={{env.cluster.networking.nameserver1}} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{env.bastion.networking.ip}}:8080/ignition/master.ign" \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{ env.cluster.nodes.control.ip[0] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.control.hostname[0] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/master.ign" \ --graphics none \ --wait=-1 \ --noautoconsole @@ -47,12 +47,12 @@ --name {{ env.cluster.nodes.control.vm_name[1] }} \ --autostart \ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.control.disk_size }} \ - --ram {{env.cluster.nodes.control.ram}} \ + --ram {{ env.cluster.nodes.control.ram }} \ --cpu host \ - --vcpus {{env.cluster.nodes.control.vcpu}} \ - --network network={{env.bridge_name}} \ + --vcpus {{ env.cluster.nodes.control.vcpu }} \ + --network network={{ env.bridge_name }} \ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-{{ env.openshift.version }}-{{ env.install_config.control.architecture }},initrd=rhcos-live-initramfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.bastion.networking.ip}}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{env.cluster.nodes.control.ip[1]}}::{{networking.gateway}}:{{networking.subnetmask}}:{{env.cluster.nodes.control.hostname[1]}}::none:1500 nameserver={{env.cluster.networking.nameserver1}} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{env.bastion.networking.ip}}:8080/ignition/master.ign" \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{ env.cluster.nodes.control.ip[1] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.control.hostname[1] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/master.ign" \ --graphics none \ --wait=-1 \ --noautoconsole @@ -65,13 +65,13 @@ --name {{ env.cluster.nodes.control.vm_name[2] }} \ --autostart \ --disk pool={{ env.cluster.networking.metadata_name }}-vdisk,size={{ env.cluster.nodes.control.disk_size }} \ - --ram {{env.cluster.nodes.control.ram}} \ + --ram {{ env.cluster.nodes.control.ram }} \ --cpu host \ - --vcpus {{env.cluster.nodes.control.vcpu}} \ - --network network={{env.bridge_name}} \ + --vcpus {{ env.cluster.nodes.control.vcpu }} \ + --network network={{ env.bridge_name }} \ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-{{ env.openshift.version }}-{{ env.install_config.control.architecture }},initrd=rhcos-live-initramfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.bastion.networking.ip}}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{env.cluster.nodes.control.ip[2]}}::{{networking.gateway}}:{{networking.subnetmask}}:{{env.cluster.nodes.control.hostname[2]}}::none:1500 nameserver={{env.cluster.networking.nameserver1}} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{env.bastion.networking.ip}}:8080/ignition/master.ign" \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{ env.bastion.networking.ip }}:8080/bin/rhcos-live-rootfs-{{ env.openshift.version }}-{{ env.install_config.control.architecture }}.img ip={{ env.cluster.nodes.control.ip[2] }}::{{ env.cluster.networking.gateway }}:{{ env.cluster.networking.subnetmask }}:{{ env.cluster.nodes.control.hostname[2] }}::none:1500 nameserver={{ env.cluster.networking.nameserver1 }} {{ ('--nameserver=' + env.cluster.networking.nameserver2) if env.cluster.networking.nameserver2 is defined else '' }} coreos.inst.ignition_url=http://{{ env.bastion.networking.ip }}:8080/ignition/master.ign" \ --graphics none \ --wait=-1 \ --noautoconsole - when: env.z.high_availability == True and inventory_hostname == env.z.lpar3.hostname + when: env.z.high_availability == True and inventory_hostname == env.z.lpar3.hostname \ No newline at end of file diff --git a/roles/dns/tasks/initial-resolv.yaml b/roles/dns/tasks/initial-resolv.yaml new file mode 100644 index 00000000..bb5ab1b5 --- /dev/null +++ b/roles/dns/tasks/initial-resolv.yaml @@ -0,0 +1,24 @@ +- name: Template out bastion's resolv.conf file for initial installation. + tags: resolv + ansible.builtin.template: + src: initial-resolv.conf.j2 + dest: /etc/resolv.conf + owner: root + group: root + mode: "644" + +# NetworkManager modifies our /etc/resolv.conf file on next restart or reboot, we need to disable it +- name: Disable management of /etc/resolv.conf by NetworkManager + tags: resolv + ansible.builtin.copy: + src: 90-dns-none.conf + dest: /etc//NetworkManager/conf.d/90-dns-none.conf + group: root + owner: root + mode: "644" + +- name: Restart network to update changes made to /etc/resolv.conf + tags: resolv + ansible.builtin.service: + name: network + state: restarted \ No newline at end of file diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index 2cb2ac7d..bb48cf62 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -1,4 +1,5 @@ --- + - name: Enable named tags: dns ansible.builtin.systemd: @@ -115,7 +116,13 @@ index_var: i when: env.cluster.nodes.infra.hostname is defined -- name: Template out bastion's resolv.conf file, replacing default +- name: Restart named to update changes made to DNS + tags: dns, resolv + ansible.builtin.systemd: + name: named + state: restarted + +- name: Template out bastion's resolv.conf file, replacing initial resolv.conf tags: dns, resolv ansible.builtin.template: src: resolv.conf.j2 @@ -124,18 +131,8 @@ group: root mode: "644" -- name: Restart named to update changes made to DNS +- name: Restart network to update changes made to /etc/resolv.conf tags: dns, resolv - ansible.builtin.systemd: - name: named + ansible.builtin.service: + name: network state: restarted - -# NetworkManager modifies our /etc/resolv.conf file on next restart or reboot, we need to disable it -- name: Disable management of /etc/resolv.conf by NetworkManager - tags: dns, resolv - ansible.builtin.copy: - src: 90-dns-none.conf - dest: /etc//NetworkManager/conf.d/90-dns-none.conf - group: root - owner: root - mode: "644" diff --git a/roles/dns/templates/initial-resolv.conf.j2 b/roles/dns/templates/initial-resolv.conf.j2 new file mode 100644 index 00000000..5ef84020 --- /dev/null +++ b/roles/dns/templates/initial-resolv.conf.j2 @@ -0,0 +1,2 @@ +search {{ env.bastion.networking.base_domain }} +nameserver {{ env.bastion.networking.forwarder }} \ No newline at end of file diff --git a/roles/dns/templates/resolv.conf.j2 b/roles/dns/templates/resolv.conf.j2 index 0f1ecdb8..e640d670 100644 --- a/roles/dns/templates/resolv.conf.j2 +++ b/roles/dns/templates/resolv.conf.j2 @@ -1,3 +1,3 @@ -search {{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} +search {{ env.cluster.networking.base_domain }} nameserver {{ env.bastion.networking.nameserver1 }} {{ ('nameserver ' + env.bastion.networking.nameserver2) if env.bastion.networking.nameserver2 is defined else '' }} diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index 10f13b9e..73b0a182 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -13,7 +13,13 @@ echo "${ansible_config%/*}/" register: find_project -- name: Template out inventory with localhost, FTP, KVM host, and bastion information +- name: Fail if network_mode is NAT and jumphost vars are undefined. + tags: set_inventory + fail: + msg: "Error jumphost vars undefined: when env.network_mode is NAT, you must set all env.jumphost variables." + when: ( env.network_mode | upper == 'NAT' ) and (env.jumphost.name is none or env.jumphost.ip is none or env.jumphost.user is none or env.jumphost.pass is none or env.jumphost.path_to_keypair is none) + +- name: Template out inventory with localhost, FTP, KVM host, jumphost(optional) and bastion information tags: set_inventory template: src: hosts.j2 diff --git a/roles/set_inventory/templates/hosts.j2 b/roles/set_inventory/templates/hosts.j2 index 6fbfa2da..e6480a08 100644 --- a/roles/set_inventory/templates/hosts.j2 +++ b/roles/set_inventory/templates/hosts.j2 @@ -11,3 +11,8 @@ [bastion] {{ env.bastion.networking.hostname }} ansible_host={{ env.bastion.networking.ip }} ansible_user={{ env.bastion.access.user }} ansible_become_password={{ env.bastion.access.pass }} + +{% if ( env.network_mode | upper == 'NAT' ) and ( env.jumphost.name is not none ) and ( env.jumphost.ip is not none ) and ( env.jumphost.user is not none ) and ( env.jumphost.pass is not none ) -%} +{{ '[jumphost]' }} +{{ env.jumphost.name | string + ' ansible_host=' + env.jumphost.ip | string + ' ansible_user=' + env.jumphost.user | string + ' ansible_become_password=' + env.jumphost.pass | string }} +{% endif -%} diff --git a/roles/ssh_add_config/tasks/main.yaml b/roles/ssh_add_config/tasks/main.yaml new file mode 100644 index 00000000..a68b0a6d --- /dev/null +++ b/roles/ssh_add_config/tasks/main.yaml @@ -0,0 +1,19 @@ +--- + +- name: Create ssh config file (or add to an exsting file) to if network mode is NAT + tags: ssh_copy_id, ssh + ansible.builtin.blockinfile: + path: ~/.ssh/config + backup: true + create: true + mode: '0644' + block: | + Host {{ env.jumphost.name }} + HostName {{ env.jumphost.ip }} + User {{ env.jumphost.user }} + IdentityFile {{ path_to_key_pair.split('.')[:-1] | join('.') }} + Host {{ env.bastion.networking.ip }} + HostName {{ env.bastion.networking.ip }} + User {{ env.bastion.access.user }} + IdentityFile {{ path_to_key_pair.split('.')[:-1] | join('.') }} + ProxyJump {{ env.jumphost.name }} diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index f5c08259..e2012ca8 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -4,16 +4,6 @@ tags: ssh_copy_id, ssh include_vars: "{{ inventory_dir }}/group_vars/all.yaml" -- name: Get ansible public key for check in next task - tags: ssh_copy_id, ssh - set_fact: - ans_pub_key: "{{ lookup('file', '~/.ssh/{{ env.ansible_key_name }}.pub') }}" - -- name: Print Ansible public key - tags: ssh_copy_id, ssh - debug: - var: ans_pub_key - - name: Delete SSH key from known hosts if it already exists for idempotency tags: ssh_copy_id, ssh lineinfile: @@ -27,25 +17,62 @@ src: ssh-copy-id.exp.j2 dest: "{{ role_path }}/files/ssh-copy-id-expect-pass.exp" force: yes + delegate_to: 127.0.0.1 -- name: Copy SSH ID to remote host with pre-provided password +- name: Copy expect file to jumphost first, if not running on localhost. + tags: ssh_copy_id, ssh + copy: + src: "{{ role_path }}/files/ssh-copy-id-expect-pass.exp" + dest: "~/.ssh/ssh-copy-id-expect-pass.exp" + when: "inventory_hostname != '127.0.0.1'" + +- name: Print results of copying ssh id to remote host + tags: ssh_copy_id, ssh + debug: + var: ssh_copy + when: "inventory_hostname != '127.0.0.1'" + +- name: Copy SSH ID from controller to remote host with pre-provided password. tags: ssh_copy_id, ssh command: "expect {{ role_path }}/files/ssh-copy-id-expect-pass.exp" register: ssh_copy + when: "inventory_hostname == '127.0.0.1'" - name: Print results of copying ssh id to remote host tags: ssh_copy_id, ssh debug: var: ssh_copy + when: "inventory_hostname == '127.0.0.1'" + +- name: Copy SSH ID from jumphost to remote host with pre-provided password. + tags: ssh_copy_id, ssh + command: "expect ~/.ssh/ssh-copy-id-expect-pass.exp" + register: ssh_copy + when: "inventory_hostname != '127.0.0.1'" + +- name: Print results of copying ssh id to remote host + tags: ssh_copy_id, ssh + debug: + var: ssh_copy + when: "inventory_hostname != '127.0.0.1'" + +- name: Delete templated expect script on controller. + tags: ssh_copy_id, ssh + file: + path: "{{ role_path }}/files/ssh-copy-id-expect-pass.exp" + state: absent + delegate_to: 127.0.0.1 -- name: Delete templated expect script +- name: Delete templated expect script on jumphost. tags: ssh_copy_id, ssh file: path: "{{ role_path }}/files/ssh-copy-id-expect-pass.exp" state: absent + when: "inventory_hostname != '127.0.0.1'" -- name: Re-create ssh-copy-id files folder +- name: Ensure ssh-copy-id files folder exists for future runs. tags: ssh_copy_id, ssh file: path: "{{ role_path }}/files/" state: directory + delegate_to: 127.0.0.1 diff --git a/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 index d6e5bfa5..a80dfff1 100644 --- a/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 +++ b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 @@ -10,7 +10,7 @@ if {$force_conservative} { } set timeout 20 -spawn ssh-copy-id -f -o StrictHostKeyChecking=no -i {{ path_to_key_pair }} {{ ssh_target[1] }}@{{ ssh_target[0] }} +spawn ssh-copy-id -f -o StrictHostKeyChecking=no -i {{ ssh_target[3] }} {{ ssh_target[1] }}@{{ ssh_target[0] }} expect { "password: " { send -- "{{ ssh_target[2] }}\r" diff --git a/roles/wait_for_install_complete/tasks/main.yaml b/roles/wait_for_install_complete/tasks/main.yaml index 232de9f2..b45a57f6 100644 --- a/roles/wait_for_install_complete/tasks/main.yaml +++ b/roles/wait_for_install_complete/tasks/main.yaml @@ -3,13 +3,15 @@ - name: Almost there! Add host info to /etc/hosts so you can login to the cluster via web browser. Ansible Controller sudo password required tags: wait_for_install_complete become: true - lineinfile: + blockinfile: + create: true + backup: true + marker: "# {mark} ANSIBLE MANAGED BLOCK FOR OCP CLUSTER: {{ env.cluster.networking.metadata_name }}" path: /etc/hosts - line: "{{ item }}" - with_items: - - "{{ env.bastion.networking.ip }} oauth-openshift.apps.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}" - - "{{ env.bastion.networking.ip }} console-openshift-console.apps.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}" - - "{{ env.bastion.networking.ip }} api.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}" + block: | + {{ env.bastion.networking.ip }} oauth-openshift.apps.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} + {{ env.bastion.networking.ip }} console-openshift-console.apps.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} + {{ env.bastion.networking.ip }} api.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} delegate_to: 127.0.0.1 - name: Get OCP URL @@ -23,10 +25,17 @@ register: ocp_passwd changed_when: false +- name: "Additional step, if using NAT" + tags: wait_for_install_complete + debug: + msg: "NAT USERS ONLY: Create SSH tunnel to cluster, i.e run command in terminal window from controller: 'sshuttle -r {{ env.bastion.access.user }}@{{ env.bastion.networking.ip }} 192.168.122.0/15 --dns'" + when: ( env.network_mode | upper == "NAT" ) + changed_when: false + - name: Congratulations! OpenShift installation complete. Use the information below for first-time login via web browser. tags: wait_for_install_complete command: "echo {{ item }}" - loop: + loop: - " URL: {{ ocp_url }} " - " Username: kubeadmin " - " Password: {{ ocp_passwd.stdout }} "