From 3a52bcf2a93cadaaaca7832d8ae5a867db65bbec Mon Sep 17 00:00:00 2001 From: Phillip Wilson Date: Thu, 17 Jun 2021 11:04:02 -0400 Subject: [PATCH 001/885] created setup user install playbook --- ansible.cfg | 5 ++++ files/sudoers_zcts | 2 ++ roles/defaults/{main.yml => placeholder.yml} | 0 setup-mgmt-user.yml | 28 ++++++++++++++++++++ test01.yml | 0 test1.yml | 11 -------- 6 files changed, 35 insertions(+), 11 deletions(-) create mode 100644 ansible.cfg create mode 100644 files/sudoers_zcts rename roles/defaults/{main.yml => placeholder.yml} (100%) create mode 100644 setup-mgmt-user.yml create mode 100644 test01.yml delete mode 100644 test1.yml diff --git a/ansible.cfg b/ansible.cfg new file mode 100644 index 00000000..5d2ee146 --- /dev/null +++ b/ansible.cfg @@ -0,0 +1,5 @@ +[defaults] +inventory = inventory +private_key_file = ~/.ssh/ansible + + diff --git a/files/sudoers_zcts b/files/sudoers_zcts new file mode 100644 index 00000000..054d9c55 --- /dev/null +++ b/files/sudoers_zcts @@ -0,0 +1,2 @@ +zcts ALL=(ALL) NOPASSWD: ALL + diff --git a/roles/defaults/main.yml b/roles/defaults/placeholder.yml similarity index 100% rename from roles/defaults/main.yml rename to roles/defaults/placeholder.yml diff --git a/setup-mgmt-user.yml b/setup-mgmt-user.yml new file mode 100644 index 00000000..54dcba13 --- /dev/null +++ b/setup-mgmt-user.yml @@ -0,0 +1,28 @@ +--- + + +- hosts: all + become: true + tasks: + + - name: create zcts user + tags: always + user: + name: zcts + groups: root + + - name: add ssh key for zcts user + tags: always + authorized_key: + user: zcts + key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKTf6OEBNCzusceF3/dTWK9rIACxOw009HMkH//AuE8h zcts default" + + - name: add sudoers file for zcts user + tags: always + copy: + src: sudoers_zcts + dest: /etc/sudoers.d/zcts + owner: root + group: root + mode: 0440 + diff --git a/test01.yml b/test01.yml new file mode 100644 index 00000000..e69de29b diff --git a/test1.yml b/test1.yml deleted file mode 100644 index c25b92bb..00000000 --- a/test1.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- hosts: all - become: true - tasks: - - - name add ssh key for ansible - tags: always - authorized_key: - user: ansible - key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible" From 488f8f80dd164808f71e011ec7d877205998bfd6 Mon Sep 17 00:00:00 2001 From: Phillip Wilson Date: Thu, 17 Jun 2021 18:29:20 -0400 Subject: [PATCH 002/885] added sample copy image file to test logic in playbook --- copy-image.yml | 28 ++++++++++++++++++++ files/rhel-guest-image.txt | 1 + files/test.txt | 1 + roles/defaults/{placeholder.yml => main.yml} | 5 ++-- 4 files changed, 33 insertions(+), 2 deletions(-) create mode 100644 copy-image.yml create mode 100644 files/rhel-guest-image.txt create mode 100644 files/test.txt rename roles/defaults/{placeholder.yml => main.yml} (75%) diff --git a/copy-image.yml b/copy-image.yml new file mode 100644 index 00000000..13be2c2c --- /dev/null +++ b/copy-image.yml @@ -0,0 +1,28 @@ +--- +# tasks to check if kvm image file is present and copy if it is not + +- hosts: all + become: true + tasks: + + - name: check to see if kvm image file is present + stat: + path: /tmp/rhel-guest-image-8.3-400.s390x.qcow2 + get_checksum: no + get_mime: no + get_attributes: no + register: os_disk_file + + - name: fail if image file exists + fail: + msg: "Image file exists" + when: os_disk_file is true + + - name: copy kvm image to kvm host(s) + copy: + src: rhel-guest-image-8.3-400.s390x.qcow2 + dest: /tmp/rhel-guest-image-8.3-400.s390x.qcow2 + owner: root + owner: root + mode: 0644 + diff --git a/files/rhel-guest-image.txt b/files/rhel-guest-image.txt new file mode 100644 index 00000000..242bf2b8 --- /dev/null +++ b/files/rhel-guest-image.txt @@ -0,0 +1 @@ +# This file is a placeholder to identify the image file without uploading to github diff --git a/files/test.txt b/files/test.txt new file mode 100644 index 00000000..16b14f5d --- /dev/null +++ b/files/test.txt @@ -0,0 +1 @@ +test file diff --git a/roles/defaults/placeholder.yml b/roles/defaults/main.yml similarity index 75% rename from roles/defaults/placeholder.yml rename to roles/defaults/main.yml index 26cb17ee..06bce909 100644 --- a/roles/defaults/placeholder.yml +++ b/roles/defaults/main.yml @@ -5,7 +5,8 @@ kvm_vm_public_ip: [] kvm_vm_root_pwd: [] kvm_vm_base_img: [] #NOTE: This should be the name of a base image in /var/lib/libvirt/images on your KVM host kvm_vm_vcpus: "1" -kvm_vm_ram: "16384" +kvm_vm_ram: "8196" +# kvm_vm_ram: "16384" kvm_vm_os_disk_name: "{{ kvm_vm_hostname }}" kvm_vm_os_disk_size: "70G" -kvm_vm_nics: [] #NOTE: see example playbook for structure \ No newline at end of file +kvm_vm_nics: [] #NOTE: see example playbook for structure From 75574536ddcd03297a9cba681dd36cccf321528a Mon Sep 17 00:00:00 2001 From: Phillip Wilson Date: Wed, 30 Jun 2021 15:04:48 -0500 Subject: [PATCH 003/885] updated readme file in lesson 3 --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 86497635..10d0a3ae 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,4 @@ # Ansible-OpenShift-Provisioning + +Phillip adding comment via lesson 3 From a8e1f3cd01b461fd9817cb26a23cd2dadfc9149d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 9 Jul 2021 12:53:36 -0500 Subject: [PATCH 004/885] created roles folders for bootstrap, bastion, control nodes, and worker nodes --- roles/bastion_server/.DS_Store | Bin 0 -> 6148 bytes roles/bootstrap_server/.DS_Store | Bin 0 -> 6148 bytes roles/control_nodes/.DS_Store | Bin 0 -> 6148 bytes roles/worker_nodes/.DS_Store | Bin 0 -> 6148 bytes 4 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 roles/bastion_server/.DS_Store create mode 100644 roles/bootstrap_server/.DS_Store create mode 100644 roles/control_nodes/.DS_Store create mode 100644 roles/worker_nodes/.DS_Store diff --git a/roles/bastion_server/.DS_Store b/roles/bastion_server/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..1fbd689229062191b69aea2e8fd10736d076ce3d GIT binary patch literal 6148 zcmeHK%}T>S5T4Z*f*$PAi;yP}`UbJYgCJh|0=11=F*U_1UOePA1o0Jo0FOR{UVW50 zf3%zKsyC4`12f-dcV>6y3)#&Afa~2AU4RY%B&dX~Ei@6Kane9Gv}Xs=@f@?GvKkG` zY8d!hhcRFb{67Zh+ik-+TtW$F;D5igB^;pc&sjKi&$GPf_D9uZ%Kc)z(Mgr(x(_6; zCm)Z4<2RbC;ZgkJF&zghBrt|kxQ2qWy$SMt!8QDR1OG_Yfsdbi@%jF&xkvn#75wBE zsiO_ISjP;mRx5COD;0R2k=(seKkJA4`O|{&#Y&TEnQz=9lBv8|v+KwJp4lYHwxC&K zz!)$F77Wn-kfIW1inSnb9ccIpfY`!r7Hrdx1I46@nPM%7BPdQn5hc{=6~jq5_NVHX zDb|7#4o)v0PFHq%Lvg%1&Y!Y7xJ=NjF<=Zd8Q7G^4xRsp-{1e6LH1+}7z1m?fNSOb zyoV*}*%}l_XKji)KqVo+T98A~=yA*qI*NBuS#T_+24bdI3t|t7{Rl7(W{iP9W#9|1 CY>%%1 literal 0 HcmV?d00001 diff --git a/roles/bootstrap_server/.DS_Store b/roles/bootstrap_server/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..b50ed442a4efc075cb023c35ad50912059289d56 GIT binary patch literal 6148 zcmeHKJ5Iwu5S@X5EYY~6+>SaoFp(*c=zIZiB7%eyM7M%Q7fPPdZr@aN`MtG4I&obL~QQ9n&GK> zSU2r@iK^f}Bp3(=f`MQl7}%QuKR2?Iy)z;(7zhUb3j?x0Bs9Uou`|@I1D!qrfD@Qi zpv%Qsa*|`=*crkDVM_&CD*F?IEgkmcafM@NXz9fM_+X#;RI^{ul#1DXVgZkMgti)91-qo6xS&L?o^g1p<9=3BW-1 fkyCY=KZ%aG!m%@yDq`1kU|a;0km!PeA7J1e0y8^y literal 0 HcmV?d00001 diff --git a/roles/control_nodes/.DS_Store b/roles/control_nodes/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..78fb190302cbe70ee2c1103346ff465fe5b0e274 GIT binary patch literal 6148 zcmeHKOG*Pl5UtV(18%Z(*;fd7fH6!Kg1cNGCL?AslL?CZIY}<#8C-h;&!De9N``5{ zm55Y9^{ejsbkBq74-xVFsh$x{iKu`IvKSeW;jHV#gU^7hH5$632YS42x=p6gUmTLP z7qq2Y+EGnM>t8qB8a^TB4SM;m0xJ{W+|QPquHCEGno-TRt17Qq)UPjx)y?x&u2$RC zKZ@ILM{^!cA7(HR3?0TIG=CBsaiwE#C{<)!(}8gjP(q>!27ZBo55y5YxBvhE literal 0 HcmV?d00001 diff --git a/roles/worker_nodes/.DS_Store b/roles/worker_nodes/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..1fbd689229062191b69aea2e8fd10736d076ce3d GIT binary patch literal 6148 zcmeHK%}T>S5T4Z*f*$PAi;yP}`UbJYgCJh|0=11=F*U_1UOePA1o0Jo0FOR{UVW50 zf3%zKsyC4`12f-dcV>6y3)#&Afa~2AU4RY%B&dX~Ei@6Kane9Gv}Xs=@f@?GvKkG` zY8d!hhcRFb{67Zh+ik-+TtW$F;D5igB^;pc&sjKi&$GPf_D9uZ%Kc)z(Mgr(x(_6; zCm)Z4<2RbC;ZgkJF&zghBrt|kxQ2qWy$SMt!8QDR1OG_Yfsdbi@%jF&xkvn#75wBE zsiO_ISjP;mRx5COD;0R2k=(seKkJA4`O|{&#Y&TEnQz=9lBv8|v+KwJp4lYHwxC&K zz!)$F77Wn-kfIW1inSnb9ccIpfY`!r7Hrdx1I46@nPM%7BPdQn5hc{=6~jq5_NVHX zDb|7#4o)v0PFHq%Lvg%1&Y!Y7xJ=NjF<=Zd8Q7G^4xRsp-{1e6LH1+}7z1m?fNSOb zyoV*}*%}l_XKji)KqVo+T98A~=yA*qI*NBuS#T_+24bdI3t|t7{Rl7(W{iP9W#9|1 CY>%%1 literal 0 HcmV?d00001 From 4fb38a9658fb69cc61df870e223ec5d994ea3aaa Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 9 Jul 2021 12:55:58 -0500 Subject: [PATCH 005/885] added roles to inventory, no IPs yet --- inventory | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/inventory b/inventory index e69d13bd..02062f0f 100644 --- a/inventory +++ b/inventory @@ -1 +1,8 @@ +[bootstrap_server] 9.60.87.132 + +[bastion_server] + +[control_nodes + +[worker_nodes] From 84edb29d959f09b0450cbcb1567f945bee0bd84a Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 9 Jul 2021 12:59:07 -0500 Subject: [PATCH 006/885] added bootstrap.yml as a template for future use --- bootstrap.yml | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 bootstrap.yml diff --git a/bootstrap.yml b/bootstrap.yml new file mode 100644 index 00000000..64b68c77 --- /dev/null +++ b/bootstrap.yml @@ -0,0 +1,45 @@ +--- + +- hosts: all + become: true + pre_tasks: + + - name: install updates (CentOS) + tags: always + dnf: + update_only: yes + update_cache: yes + when: ansible_distribution == "CentOS" + + - name: install updates (Ubuntu) + tags: always + apt: + upgrade: dist + update_cache: yes + when: ansible_distribution == "Ubuntu" + +- hosts: all + become: true + tasks: + + - name: create simone user + tags: always + user: + name: simone + groups: root + + - name: add ssh key for simone + tags: always + authorized_key: + user: simone + key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKHEBRxXjvVaYY8mg0S05qqUWJQaDLbzO4w5uwN8ogJ2 ansible" + + - name: add sudeoers file for simone + tags: always + copy: + src: sudoer_simone + dest: /etc/sudoers.d/simone + owner: root + group: root + mode: 0440 + From aa9eb037d07e81bd5927016a21a14abc86c09339 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 9 Jul 2021 13:26:25 -0500 Subject: [PATCH 007/885] added ip addresses to inventory file --- inventory | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/inventory b/inventory index 02062f0f..cb2ecf31 100644 --- a/inventory +++ b/inventory @@ -1,8 +1,17 @@ -[bootstrap_server] +[kvm-host] 9.60.87.132 +[bootstrap_server] +9.60.87.133 + [bastion_server] +9.60.87.139 -[control_nodes +[control_nodes] +9.60.87.138 +9.60.87.137 +9.60.87.136 [worker_nodes] +9.60.87.135 +9.60.87.134 From 88fa68cbb8fa25cd4c999ebd44c9ef010c16c49d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 9 Jul 2021 13:56:57 -0500 Subject: [PATCH 008/885] modified inventory file --- inventory | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inventory b/inventory index cb2ecf31..521b9c44 100644 --- a/inventory +++ b/inventory @@ -1,4 +1,4 @@ -[kvm-host] +[kvm_host] 9.60.87.132 [bootstrap_server] From bf2157381ea7d231fe1ed0983368635b3dacde05 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 9 Jul 2021 13:59:11 -0500 Subject: [PATCH 009/885] added kvm_host to roles folder --- roles/kvm_host/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) create mode 100644 roles/kvm_host/tasks/main.yml diff --git a/roles/kvm_host/tasks/main.yml b/roles/kvm_host/tasks/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/roles/kvm_host/tasks/main.yml @@ -0,0 +1 @@ +--- From 967527f4be824bf89c10cf432b834129011591fc Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 9 Jul 2021 14:06:48 -0500 Subject: [PATCH 010/885] added .gitignore file --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .gitignore diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..ab7b3cb1 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.DS_Store +roles/.DS_Store From 60da86dd6809c2e1d165bc5ea47c438853a7808d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 9 Jul 2021 14:08:29 -0500 Subject: [PATCH 011/885] simplified .gitignore --- .gitignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitignore b/.gitignore index ab7b3cb1..e43b0f98 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1 @@ .DS_Store -roles/.DS_Store From 9f5a32a6ec90661e322e14f9e352f8efaf2f4295 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Mon, 12 Jul 2021 15:22:26 -0400 Subject: [PATCH 012/885] renamed test01.yml to avoid conflits --- test01-psw.yml | 8 ++++++++ test01.yml | 0 2 files changed, 8 insertions(+) create mode 100644 test01-psw.yml delete mode 100644 test01.yml diff --git a/test01-psw.yml b/test01-psw.yml new file mode 100644 index 00000000..3d48b5e7 --- /dev/null +++ b/test01-psw.yml @@ -0,0 +1,8 @@ +--- + +- host kvm_hosts + +- name: list all VMs + command: list_vms + register: all_vms + diff --git a/test01.yml b/test01.yml deleted file mode 100644 index e69de29b..00000000 From dff021c68561634fed62e0b5e20bde1803725bf8 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Mon, 12 Jul 2021 17:03:27 -0400 Subject: [PATCH 013/885] updated test01-psw to list running vms to test inclusion of community.libvirt functions --- test01-psw.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test01-psw.yml b/test01-psw.yml index 3d48b5e7..4280724f 100644 --- a/test01-psw.yml +++ b/test01-psw.yml @@ -1,8 +1,10 @@ --- -- host kvm_hosts +- hosts: kvm_host -- name: list all VMs - command: list_vms - register: all_vms + tasks: + - name: list all VMs + community.libvirt.virt: + command: list_vms + register: running_vms From 5786fc518485103da634b4dfbcde09f46136af65 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 13 Jul 2021 11:35:01 -0400 Subject: [PATCH 014/885] Updated kvm_host main.yml to ensure pre-requisite packages are installed and libvirtd is started. --- roles/kvm_host/tasks/main.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/roles/kvm_host/tasks/main.yml b/roles/kvm_host/tasks/main.yml index ed97d539..0af913df 100644 --- a/roles/kvm_host/tasks/main.yml +++ b/roles/kvm_host/tasks/main.yml @@ -1 +1,22 @@ --- + +- hosts: kvm_hosts + become: true + tasks: + + - name: Ensure pre-requisite packages are installed + yum: + - libvirt + - libvirt-devel + - libvirt-daemon-kvm + - qemu-kvm + - virt-manager + - libvirt-daemon-config-network + - libvirt-client + - qemu-img + + - name: Ensure libvirtd is started + ansible.builtin.shell: + - systemctl enable --now libvirtd + - systemctl status libvirtd.service + - systemctl status libvirtd From ac5088f0415e76f56f5bdfc132baad9d673410ab Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 13 Jul 2021 11:38:02 -0400 Subject: [PATCH 015/885] added a test file for me to use and a shell_scripts folder with a shell script to start libvirtd. --- files/shell_scripts/start_libvirtd.sh | 4 ++++ test02-joe.yml | 25 +++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 files/shell_scripts/start_libvirtd.sh create mode 100644 test02-joe.yml diff --git a/files/shell_scripts/start_libvirtd.sh b/files/shell_scripts/start_libvirtd.sh new file mode 100644 index 00000000..332d1b33 --- /dev/null +++ b/files/shell_scripts/start_libvirtd.sh @@ -0,0 +1,4 @@ +#!/bin/bash +systemctl enable --now libvirtd +systemctl status libvirtd.service +systemctl status libvirtd diff --git a/test02-joe.yml b/test02-joe.yml new file mode 100644 index 00000000..3eb2ee5f --- /dev/null +++ b/test02-joe.yml @@ -0,0 +1,25 @@ +--- + +- hosts: kvm_hosts + become: true + tasks: + + - name: Ensure pre-requisite packages are installed + yum: + names: + - libvirt + - libvirt-devel + - libvirt-daemon-kvm + - qemu-kvm + - virt-manager + - libvirt-daemon-config-network + - libvirt-client + - qemu-img + + - name: Ensure libvirtd is started + tasks: + - name: Transfer the script + copy: src=test.sh dest=/home/test_user mode=0777 + + - name: Execute the script + command: sh /home/test_user/test.sh From 9fc10a814c0ca423a1179b61e0dd0be0ec14c245 Mon Sep 17 00:00:00 2001 From: Phillip Wilson Date: Tue, 13 Jul 2021 12:17:06 -0500 Subject: [PATCH 016/885] test yml file for displaying running vms --- test01-psw.yml => list_vms.yml | 6 ++++++ 1 file changed, 6 insertions(+) rename test01-psw.yml => list_vms.yml (57%) diff --git a/test01-psw.yml b/list_vms.yml similarity index 57% rename from test01-psw.yml rename to list_vms.yml index 4280724f..951f9714 100644 --- a/test01-psw.yml +++ b/list_vms.yml @@ -8,3 +8,9 @@ command: list_vms register: running_vms + - name: Print running vms + ansible.builtin.debug: + var: running_vms + verbosity: 0 + + From 5a3ce70ffc2a7f65618405922d9fa6c74a4f01b0 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 13 Jul 2021 13:57:11 -0500 Subject: [PATCH 017/885] updated kvm_host main.yml to fix indentation --- roles/kvm_host/tasks/main.yml | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/roles/kvm_host/tasks/main.yml b/roles/kvm_host/tasks/main.yml index 0af913df..73c7013c 100644 --- a/roles/kvm_host/tasks/main.yml +++ b/roles/kvm_host/tasks/main.yml @@ -1,11 +1,16 @@ --- -- hosts: kvm_hosts +- host: kvm_hosts become: true tasks: + - name: update repository index + apt: + update_cache: yes + - name: Ensure pre-requisite packages are installed - yum: + yum: + name: - libvirt - libvirt-devel - libvirt-daemon-kvm @@ -14,9 +19,14 @@ - libvirt-daemon-config-network - libvirt-client - qemu-img + state: latest + update_cache: yes - name: Ensure libvirtd is started - ansible.builtin.shell: - - systemctl enable --now libvirtd - - systemctl status libvirtd.service - - systemctl status libvirtd + ansible.builtin.shell: + - systemctl enable --now libvirtd + - systemctl status libvirtd.service + - systemctl status libvirtd + + - name: get details about network interface + \ No newline at end of file From f990ecf4dcb0736f1dc316bbef82f068ff8a6aa0 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 13 Jul 2021 14:58:11 -0400 Subject: [PATCH 018/885] updated start_libvirtd.sh --- files/shell_scripts/start_libvirtd.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/files/shell_scripts/start_libvirtd.sh b/files/shell_scripts/start_libvirtd.sh index 332d1b33..ad4d21b7 100644 --- a/files/shell_scripts/start_libvirtd.sh +++ b/files/shell_scripts/start_libvirtd.sh @@ -1,3 +1,4 @@ + #!/bin/bash systemctl enable --now libvirtd systemctl status libvirtd.service From 5790f5d4682a7c4a70d53923b4771f6e94a87e47 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 13 Jul 2021 14:58:45 -0400 Subject: [PATCH 019/885] updated test yml file --- test02-joe.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/test02-joe.yml b/test02-joe.yml index 3eb2ee5f..8b689fd1 100644 --- a/test02-joe.yml +++ b/test02-joe.yml @@ -17,9 +17,7 @@ - qemu-img - name: Ensure libvirtd is started - tasks: - - name: Transfer the script - copy: src=test.sh dest=/home/test_user mode=0777 - - - name: Execute the script - command: sh /home/test_user/test.sh + ansible.builtin.shell: + - systemctl enable --now libvirtd + - systemctl status libvirtd.service + - systemctl status libvirtd From d3aa01a6fd34fbb8bb5c987a71012440d629d833 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 13 Jul 2021 15:51:11 -0500 Subject: [PATCH 020/885] updated kvm host main.yml with pre-requisite steps --- roles/kvm_host/tasks/main.yml | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/roles/kvm_host/tasks/main.yml b/roles/kvm_host/tasks/main.yml index 73c7013c..33cfcf2f 100644 --- a/roles/kvm_host/tasks/main.yml +++ b/roles/kvm_host/tasks/main.yml @@ -5,7 +5,7 @@ tasks: - name: update repository index - apt: + yum: update_cache: yes - name: Ensure pre-requisite packages are installed @@ -28,5 +28,24 @@ - systemctl status libvirtd.service - systemctl status libvirtd - - name: get details about network interface - \ No newline at end of file + - name: create macvtap xml file + file: + path: "~/files/macvtap.xml" + state: touch + + - name: Fill contents of macvtap xml file + copy: + dest: "~/files/macvtap.xml" + content: | + + macvtap-net + + + + + - name: Set up macvtap bridge + ansible.builtin.shell: + - virsh net-define macvtap.xml + - virsh net-start --network macvtap-net + - virsh net-autostart --network macvtap-net + - virsh net-list --all From a6af6ad8b4404472702810dfe780052ef1ed230b Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 13 Jul 2021 16:07:22 -0500 Subject: [PATCH 021/885] edited kvm_host main.yml from ansible.builtiin.shell to ansible.builtin.script --- roles/kvm_host/tasks/main.yml | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/roles/kvm_host/tasks/main.yml b/roles/kvm_host/tasks/main.yml index 33cfcf2f..1556b5d5 100644 --- a/roles/kvm_host/tasks/main.yml +++ b/roles/kvm_host/tasks/main.yml @@ -1,6 +1,6 @@ --- -- host: kvm_hosts +- hosts: kvm_hosts become: true tasks: @@ -23,10 +23,7 @@ update_cache: yes - name: Ensure libvirtd is started - ansible.builtin.shell: - - systemctl enable --now libvirtd - - systemctl status libvirtd.service - - systemctl status libvirtd + ansible.builtin.script: ~/files/shell_scripts/start_libvirtd.sh - name: create macvtap xml file file: From 76d0c8b1ab457dcc88c3d1cf4fc6686ba025ebda Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 13 Jul 2021 18:02:42 -0400 Subject: [PATCH 022/885] added macvtap-net.sh shell script for kvm host. Fixed syntax for kvm_host main.yml and edited shell script start_libvirtd.sh --- roles/kvm_host/tasks/main.yml | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/roles/kvm_host/tasks/main.yml b/roles/kvm_host/tasks/main.yml index 33cfcf2f..dc8862ee 100644 --- a/roles/kvm_host/tasks/main.yml +++ b/roles/kvm_host/tasks/main.yml @@ -1,6 +1,6 @@ --- -- host: kvm_hosts +- hosts: kvm_host become: true tasks: @@ -23,10 +23,7 @@ update_cache: yes - name: Ensure libvirtd is started - ansible.builtin.shell: - - systemctl enable --now libvirtd - - systemctl status libvirtd.service - - systemctl status libvirtd + script: ~/files/shell_scripts/start_libvirtd.sh - name: create macvtap xml file file: @@ -44,8 +41,4 @@ - name: Set up macvtap bridge - ansible.builtin.shell: - - virsh net-define macvtap.xml - - virsh net-start --network macvtap-net - - virsh net-autostart --network macvtap-net - - virsh net-list --all + script: ~/files/shell_scripts/macvtap-net.sh From 5935c6fa0529a3c0a7e4f0ff6dcfdef22c019615 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 13 Jul 2021 22:19:32 -0500 Subject: [PATCH 023/885] Adding playbook to download images needed for UPI install of RHOCP --- dwnload-image-files.yml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 dwnload-image-files.yml diff --git a/dwnload-image-files.yml b/dwnload-image-files.yml new file mode 100644 index 00000000..9563e17b --- /dev/null +++ b/dwnload-image-files.yml @@ -0,0 +1,30 @@ +--- + +hosts: kvm_host +become: true +tasks: + +- name: download RHCOS initramfs + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + dest: /var/lib/libvirt/images + mode: 0755 + +- name: download RHCOS kernel + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + dest: /var/lib/libvirt/images + mode: 0755 + +- name: download RHCOS rootfs + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + dest: /var/lib/libvirt/images + mode: 0755 + +- name: download QCOW2 image + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz + dest: /var/lib/libvirt/images + mode: 0755 + From 35a2272598a6f54fae7036d33c6486e41ea12507 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 14 Jul 2021 09:43:01 -0500 Subject: [PATCH 024/885] adding yml to download image files to provision vm's for openshift cluster --- dwnload-image-files.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dwnload-image-files.yml b/dwnload-image-files.yml index 9563e17b..eecbe56b 100644 --- a/dwnload-image-files.yml +++ b/dwnload-image-files.yml @@ -28,3 +28,5 @@ tasks: dest: /var/lib/libvirt/images mode: 0755 + + From 650b543a728ac9d54e9cc8c9bbfd092bb237f9f3 Mon Sep 17 00:00:00 2001 From: Phillip Wilson Date: Wed, 14 Jul 2021 11:44:35 -0500 Subject: [PATCH 025/885] spacing on dwnload file --- dwnload-image-files.yml | 46 ++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/dwnload-image-files.yml b/dwnload-image-files.yml index eecbe56b..c2ecaa80 100644 --- a/dwnload-image-files.yml +++ b/dwnload-image-files.yml @@ -1,32 +1,32 @@ --- -hosts: kvm_host -become: true -tasks: +- hosts: kvm_host + become: true + tasks: -- name: download RHCOS initramfs - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/lib/libvirt/images - mode: 0755 + - name: download RHCOS initramfs + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + dest: /var/lib/libvirt/images + mode: 0755 -- name: download RHCOS kernel - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/lib/libvirt/images - mode: 0755 + - name: download RHCOS kernel + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + dest: /var/lib/libvirt/images + mode: 0755 -- name: download RHCOS rootfs - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/lib/libvirt/images - mode: 0755 + - name: download RHCOS rootfs + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + dest: /var/lib/libvirt/images + mode: 0755 -- name: download QCOW2 image - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - dest: /var/lib/libvirt/images - mode: 0755 + - name: download QCOW2 image + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz + dest: /var/lib/libvirt/images + mode: 0755 From 52e3f7d832a19186ec885a8deb9a0aaa02879a8f Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 14 Jul 2021 15:39:35 -0400 Subject: [PATCH 026/885] Added playbooks for bastion and bootstrap and their related shell scripts. Incomplete. --- files/shell_scripts/create_bootstrap.sh | 15 ++++ files/shell_scripts/create_http.sh | 22 ++++++ files/shell_scripts/dl_rhel_iso.sh | 3 + files/shell_scripts/get_ocp_installer.sh | 15 ++++ files/shell_scripts/prep_kvm_guests.sh | 4 + files/shell_scripts/start_rhel_install.sh | 6 ++ files/shell_scripts/verify_bootstrap.sh | 3 + host_vars | 12 +++ roles/bastion_server/main.yaml | 92 +++++++++++++++++++++++ roles/bootstrap_server/tasks/main.yaml | 13 ++++ roles/kvm_host/tasks/main.yml | 10 +-- 11 files changed, 186 insertions(+), 9 deletions(-) create mode 100644 files/shell_scripts/create_bootstrap.sh create mode 100644 files/shell_scripts/create_http.sh create mode 100644 files/shell_scripts/dl_rhel_iso.sh create mode 100644 files/shell_scripts/get_ocp_installer.sh create mode 100644 files/shell_scripts/prep_kvm_guests.sh create mode 100644 files/shell_scripts/start_rhel_install.sh create mode 100644 files/shell_scripts/verify_bootstrap.sh create mode 100644 host_vars create mode 100644 roles/bastion_server/main.yaml create mode 100644 roles/bootstrap_server/tasks/main.yaml diff --git a/files/shell_scripts/create_bootstrap.sh b/files/shell_scripts/create_bootstrap.sh new file mode 100644 index 00000000..597d0c37 --- /dev/null +++ b/files/shell_scripts/create_bootstrap.sh @@ -0,0 +1,15 @@ +#!bin/bash + +##create +qemu-img create -f qcow2 -F qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G + +##boot +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 +coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://:8080/bin/rhcos-rootfs.img +coreos.inst.ignition_url=http://:8080/ignition/bootstrap.ign ip=::::::none +nameserver=’ --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/ +images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive +if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio- +blk,serial=ignition,drive=ignition" + + diff --git a/files/shell_scripts/create_http.sh b/files/shell_scripts/create_http.sh new file mode 100644 index 00000000..4978ee22 --- /dev/null +++ b/files/shell_scripts/create_http.sh @@ -0,0 +1,22 @@ +#!bin/bash + +##install HTTP +dnf install -y httpd + +##make folders +mkdir /var/www/html/bin /var/www/html/bootstrap + +##get mirror 1 +wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x +-O /var/www/html/bin/rhcos-kernel + +##get mirror 2 +wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live- +initramfs.s390x.img -O /var/www/html/bin/rhcos-initramfs.img + +##get mirror 3 +wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live- +rootfs.s390x.img -O rhcos-rootfs.img + +##enable http +systemctl enable --now httpd; systemctl status httpd diff --git a/files/shell_scripts/dl_rhel_iso.sh b/files/shell_scripts/dl_rhel_iso.sh new file mode 100644 index 00000000..1377f15b --- /dev/null +++ b/files/shell_scripts/dl_rhel_iso.sh @@ -0,0 +1,3 @@ +#!bin/bash +wget /URL/rhel-8.3-s390x-dvd.iso +rhel-8.3-s390x-dvd.iso rhel83.iso diff --git a/files/shell_scripts/get_ocp_installer.sh b/files/shell_scripts/get_ocp_installer.sh new file mode 100644 index 00000000..a19a0abb --- /dev/null +++ b/files/shell_scripts/get_ocp_installer.sh @@ -0,0 +1,15 @@ +#!bin/bash + +##get and extract mirror 1 +wget https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/latest/openshift-client-linux.tar.gz +tar -xvzf openshift-client-linux.tar.gz + +##get and extract mirror 2 +wget https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/latest/openshift-install-linux.tar.gz +tar -xvzf openshift-client-linux.tar.gz + +##Make executable +chmod +x kubectl oc openshift_install + +##move installed to bin folder +mv kubectl oc openshift_install /usr/local/bin/ diff --git a/files/shell_scripts/prep_kvm_guests.sh b/files/shell_scripts/prep_kvm_guests.sh new file mode 100644 index 00000000..50b36294 --- /dev/null +++ b/files/shell_scripts/prep_kvm_guests.sh @@ -0,0 +1,4 @@ +#!bin/bash +wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz +dnf install -y gzip +gunzip rhcos-qemu.s390x.qcow2.gz /var/lib/libvirt/images/ diff --git a/files/shell_scripts/start_rhel_install.sh b/files/shell_scripts/start_rhel_install.sh new file mode 100644 index 00000000..89a907a8 --- /dev/null +++ b/files/shell_scripts/start_rhel_install.sh @@ -0,0 +1,6 @@ +#!bin/bash +virt# virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso +--accelerate --import --network network=macvtap-net --extra-args "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none +nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive +if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" -- +noautoconsole diff --git a/files/shell_scripts/verify_bootstrap.sh b/files/shell_scripts/verify_bootstrap.sh new file mode 100644 index 00000000..fcb0845d --- /dev/null +++ b/files/shell_scripts/verify_bootstrap.sh @@ -0,0 +1,3 @@ +#!bin/bash +virsh console bootstrap +journalctl -u bootkube.service diff --git a/host_vars b/host_vars new file mode 100644 index 00000000..97570a25 --- /dev/null +++ b/host_vars @@ -0,0 +1,12 @@ +##placeholder until ready to simplify playbooks + +##list of needed variables: + +##in bastion main.yaml: +##baseDomain +##cluster_name +##pullsecret +##ssh-public-key +##installation_directory + + diff --git a/roles/bastion_server/main.yaml b/roles/bastion_server/main.yaml new file mode 100644 index 00000000..4c031b4a --- /dev/null +++ b/roles/bastion_server/main.yaml @@ -0,0 +1,92 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + - name: update repository index + yum: + update_cache: yes + + - name: Download RHEL ISO image to RHEL KVM + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/dl_rhel_iso.sh + + - name: start install process + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_rhel_install.sh + +#there has to be a way to do this through Ansible. Step 3 page 9 + - name: complete bastion install process + +#leaving this until I meet with Filipe + - name: download software + +#leaving this until I meet with Filipe + - name: DNS requirements and configuration + +#not sure what this instruction step is trying to say. Page 13 + - name: Load Balancer + +# Need to edit this script to automate changing port to 8080 and ensure latest versions of OpenShift mirrors + - name: Create and configure the HTTP server + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/create_http.sh + + - name: Get installer and oc Client Tools + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/get_ocp_installer.sh + +##create install-config.yaml file + - name: create install-config.yaml + file: + path: "~/files/install-config.yaml" + state: touch + +##Needs variables from host_vars for baseDomain, cluster_name, pullsecret, and ssh-public-key. +##I think it also needs cidr (pod's IP range) and service network IP range. + + - name: Fill contents of install-config.yaml file + copy: + dest: "~/files/macvtap.xml" + content: | + apiVersion: v1 + baseDomain: + compute: + - architecture: s390x + hyperthreading: Enabled + name: worker + replicas: 0 + controlPlane: + architecture: s390x + hyperthreading: Enabled + name: master + replicas: 3 + metadata: + name: + networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 + platform: + none: {} + fips: false + pullSecret: '' + sshKey: '' + +##need to use host_vars for + - name: Generate the ignition files 1 + shell: ./openshift-install create manifests --dir= + +##also needs variable + - name: Generate the ignition files 2 + shell: ./openshift-install create ignition-configs --dir= + +##also needs variable + - name: Generate the ignition files 3 + shell: cp /*.ign /var/www/html/ignition + + - name: Generate the ignition files 4 + shell: chmod 775 /var/www/html/ignition/*.ign + + - name: Prepare the KVM OCP guests + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/prep_kvm_guests.sh diff --git a/roles/bootstrap_server/tasks/main.yaml b/roles/bootstrap_server/tasks/main.yaml new file mode 100644 index 00000000..d0e92ea8 --- /dev/null +++ b/roles/bootstrap_server/tasks/main.yaml @@ -0,0 +1,13 @@ +--- + +- hosts: bootstrap_server + become: true + tasks: + +##need to implement wait for completion logic before starting the next one + - name: create bootstrap + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/create_bootstrap.sh + +##needs to wait for previous to finish before starting + - name: verify installation + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/verify_bootstrap.sh diff --git a/roles/kvm_host/tasks/main.yml b/roles/kvm_host/tasks/main.yml index d21c3fca..f35d18d5 100644 --- a/roles/kvm_host/tasks/main.yml +++ b/roles/kvm_host/tasks/main.yml @@ -1,10 +1,6 @@ --- -<<<<<<< HEAD - hosts: kvm_host -======= -- hosts: kvm_hosts ->>>>>>> 600d4e0610d13ddb6a7d652ff4a2aef7775f61e6 become: true tasks: @@ -27,11 +23,7 @@ update_cache: yes - name: Ensure libvirtd is started -<<<<<<< HEAD - script: ~/files/shell_scripts/start_libvirtd.sh -======= - ansible.builtin.script: ~/files/shell_scripts/start_libvirtd.sh ->>>>>>> 600d4e0610d13ddb6a7d652ff4a2aef7775f61e6 + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_libvirtd.sh - name: create macvtap xml file file: From 81b0a6dcbb375bb90c2ea76ce7d4a6e8ea5847e0 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 15 Jul 2021 08:22:09 -0500 Subject: [PATCH 027/885] add manual script to create vms for reference --- build_script.sh | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 build_script.sh diff --git a/build_script.sh b/build_script.sh new file mode 100644 index 00000000..be51c367 --- /dev/null +++ b/build_script.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Created by Phillip + + + +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 70G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 70G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 70G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-3.qcow2 70G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/worker-1.qcow2 70G +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/worker-2.qcow2 70G + +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-3 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-3.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name worker-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/worker-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name worker-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/worker-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From c9c222bca3cb92e738a8d8bafaf7e052dcec930f Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 15 Jul 2021 10:13:50 -0500 Subject: [PATCH 028/885] changed install script to reflect ATG DNS naming conventions --- build_script.sh | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/build_script.sh b/build_script.sh index be51c367..a073334b 100644 --- a/build_script.sh +++ b/build_script.sh @@ -3,21 +3,21 @@ -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 70G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 70G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 70G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-3.qcow2 70G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/worker-1.qcow2 70G -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/worker-2.qcow2 70G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap-0.qcow2 100G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-3 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-3.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name worker-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/worker-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name worker-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/worker-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From 08a1269b78cb6519ec0262b7b5ce4af666ad9901 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 15 Jul 2021 13:14:48 -0500 Subject: [PATCH 029/885] modified scripts and created RHOCP install-config yaml file --- files/shell_scripts/create_http.sh | 9 +++----- files/shell_scripts/start_rhel_install.sh | 7 ++---- install-config.yml | 26 +++++++++++++++++++++++ 3 files changed, 31 insertions(+), 11 deletions(-) create mode 100644 install-config.yml diff --git a/files/shell_scripts/create_http.sh b/files/shell_scripts/create_http.sh index 4978ee22..8650a27f 100644 --- a/files/shell_scripts/create_http.sh +++ b/files/shell_scripts/create_http.sh @@ -7,16 +7,13 @@ dnf install -y httpd mkdir /var/www/html/bin /var/www/html/bootstrap ##get mirror 1 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x --O /var/www/html/bin/rhcos-kernel +wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x-O /var/www/html/bin/rhcos-kernel ##get mirror 2 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live- -initramfs.s390x.img -O /var/www/html/bin/rhcos-initramfs.img +wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-initramfs.s390x.img -O /var/www/html/bin/rhcos-initramfs.img ##get mirror 3 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live- -rootfs.s390x.img -O rhcos-rootfs.img +wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-rootfs.s390x.img -O rhcos-rootfs.img ##enable http systemctl enable --now httpd; systemctl status httpd diff --git a/files/shell_scripts/start_rhel_install.sh b/files/shell_scripts/start_rhel_install.sh index 89a907a8..616f7749 100644 --- a/files/shell_scripts/start_rhel_install.sh +++ b/files/shell_scripts/start_rhel_install.sh @@ -1,6 +1,3 @@ #!bin/bash -virt# virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso ---accelerate --import --network network=macvtap-net --extra-args "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none -nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive -if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" -- -noautoconsole +virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso +--accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.255.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/install-config.yml b/install-config.yml new file mode 100644 index 00000000..4b2caeae --- /dev/null +++ b/install-config.yml @@ -0,0 +1,26 @@ +apiVersion: v1 +baseDomain: ocpz.wsclab.endicott.ibm.com +compute: +- hyperthreading: Enabled + name: worker + replicas: 0 + architecture : s390x +controlPlane: + hyperthreading: Enabled + name: master + replicas: 3 + architecture : s390x +metadata: + name: distribution +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 +platform: + none: {} +fips: false +pullSecret: '' +sshKey: '' \ No newline at end of file From 7672567e382ff49a6d3eb242427aff2206a8b8a5 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 15 Jul 2021 13:14:48 -0500 Subject: [PATCH 030/885] modified scripts and created RHOCP install-config yaml file --- files/shell_scripts/create_http.sh | 9 +++----- files/shell_scripts/start_rhel_install.sh | 7 ++---- install-config.yml | 26 +++++++++++++++++++++++ 3 files changed, 31 insertions(+), 11 deletions(-) create mode 100644 install-config.yml diff --git a/files/shell_scripts/create_http.sh b/files/shell_scripts/create_http.sh index 4978ee22..8650a27f 100644 --- a/files/shell_scripts/create_http.sh +++ b/files/shell_scripts/create_http.sh @@ -7,16 +7,13 @@ dnf install -y httpd mkdir /var/www/html/bin /var/www/html/bootstrap ##get mirror 1 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x --O /var/www/html/bin/rhcos-kernel +wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x-O /var/www/html/bin/rhcos-kernel ##get mirror 2 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live- -initramfs.s390x.img -O /var/www/html/bin/rhcos-initramfs.img +wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-initramfs.s390x.img -O /var/www/html/bin/rhcos-initramfs.img ##get mirror 3 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live- -rootfs.s390x.img -O rhcos-rootfs.img +wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-rootfs.s390x.img -O rhcos-rootfs.img ##enable http systemctl enable --now httpd; systemctl status httpd diff --git a/files/shell_scripts/start_rhel_install.sh b/files/shell_scripts/start_rhel_install.sh index 89a907a8..616f7749 100644 --- a/files/shell_scripts/start_rhel_install.sh +++ b/files/shell_scripts/start_rhel_install.sh @@ -1,6 +1,3 @@ #!bin/bash -virt# virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso ---accelerate --import --network network=macvtap-net --extra-args "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none -nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive -if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" -- -noautoconsole +virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso +--accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.255.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/install-config.yml b/install-config.yml new file mode 100644 index 00000000..4b2caeae --- /dev/null +++ b/install-config.yml @@ -0,0 +1,26 @@ +apiVersion: v1 +baseDomain: ocpz.wsclab.endicott.ibm.com +compute: +- hyperthreading: Enabled + name: worker + replicas: 0 + architecture : s390x +controlPlane: + hyperthreading: Enabled + name: master + replicas: 3 + architecture : s390x +metadata: + name: distribution +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 +platform: + none: {} +fips: false +pullSecret: '' +sshKey: '' \ No newline at end of file From e2e6743402db19e11af3ff5dfb0471cb14fc1d99 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 15 Jul 2021 14:27:44 -0500 Subject: [PATCH 031/885] remove carrige return on virt-install command --- files/shell_scripts/start_rhel_install.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/files/shell_scripts/start_rhel_install.sh b/files/shell_scripts/start_rhel_install.sh index 616f7749..d18ba2c1 100644 --- a/files/shell_scripts/start_rhel_install.sh +++ b/files/shell_scripts/start_rhel_install.sh @@ -1,3 +1,2 @@ #!bin/bash -virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso ---accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.255.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.255.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From 97ad32302848c9d6ed2209e346c649d87768b85f Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 15 Jul 2021 14:27:44 -0500 Subject: [PATCH 032/885] remove carrige return on virt-install command --- files/shell_scripts/start_rhel_install.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/files/shell_scripts/start_rhel_install.sh b/files/shell_scripts/start_rhel_install.sh index 616f7749..d18ba2c1 100644 --- a/files/shell_scripts/start_rhel_install.sh +++ b/files/shell_scripts/start_rhel_install.sh @@ -1,3 +1,2 @@ #!bin/bash -virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso ---accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.255.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.255.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From a6d7ce527526ab6dce744767986c7c8f1de321a2 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 15 Jul 2021 15:36:59 -0400 Subject: [PATCH 033/885] changed start_rhel_install.sh to start_bastion_install.sh --- .../{start_rhel_install.sh => start_bastion_install.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename files/shell_scripts/{start_rhel_install.sh => start_bastion_install.sh} (100%) diff --git a/files/shell_scripts/start_rhel_install.sh b/files/shell_scripts/start_bastion_install.sh similarity index 100% rename from files/shell_scripts/start_rhel_install.sh rename to files/shell_scripts/start_bastion_install.sh From 6f430076fcbd5830515fca107bd5b85db1dbe9d7 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 15 Jul 2021 15:36:59 -0400 Subject: [PATCH 034/885] changed start_rhel_install.sh to start_bastion_install.sh --- .../{start_rhel_install.sh => start_bastion_install.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename files/shell_scripts/{start_rhel_install.sh => start_bastion_install.sh} (100%) diff --git a/files/shell_scripts/start_rhel_install.sh b/files/shell_scripts/start_bastion_install.sh similarity index 100% rename from files/shell_scripts/start_rhel_install.sh rename to files/shell_scripts/start_bastion_install.sh From e6125967af1bd1c697dead7ce96281d106e63dfd Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 15 Jul 2021 15:37:56 -0400 Subject: [PATCH 035/885] updated the reference to that file in its playbook --- roles/bastion_server/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion_server/main.yaml b/roles/bastion_server/main.yaml index 4c031b4a..c0590de0 100644 --- a/roles/bastion_server/main.yaml +++ b/roles/bastion_server/main.yaml @@ -12,7 +12,7 @@ script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/dl_rhel_iso.sh - name: start install process - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_rhel_install.sh + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_bastion_install.sh #there has to be a way to do this through Ansible. Step 3 page 9 - name: complete bastion install process From 2b03052dce42c1d35bf870741d324dbb673f588e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 15 Jul 2021 15:37:56 -0400 Subject: [PATCH 036/885] updated the reference to that file in its playbook --- roles/bastion_server/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion_server/main.yaml b/roles/bastion_server/main.yaml index 4c031b4a..c0590de0 100644 --- a/roles/bastion_server/main.yaml +++ b/roles/bastion_server/main.yaml @@ -12,7 +12,7 @@ script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/dl_rhel_iso.sh - name: start install process - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_rhel_install.sh + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_bastion_install.sh #there has to be a way to do this through Ansible. Step 3 page 9 - name: complete bastion install process From d235c8216b894f4174601f37d326fabaccde73b5 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 16 Jul 2021 13:24:37 -0500 Subject: [PATCH 037/885] renamed file corrected naming conventiion on file --- dwnload-image-files.yml => dwnload-image-files.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename dwnload-image-files.yml => dwnload-image-files.yaml (100%) diff --git a/dwnload-image-files.yml b/dwnload-image-files.yaml similarity index 100% rename from dwnload-image-files.yml rename to dwnload-image-files.yaml From aba146ccf8162046329106ffe100ae3773e264d3 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 16 Jul 2021 13:24:37 -0500 Subject: [PATCH 038/885] renamed file corrected naming conventiion on file --- dwnload-image-files.yml => dwnload-image-files.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename dwnload-image-files.yml => dwnload-image-files.yaml (100%) diff --git a/dwnload-image-files.yml b/dwnload-image-files.yaml similarity index 100% rename from dwnload-image-files.yml rename to dwnload-image-files.yaml From 9b2932a1359034db51727512d51ea876555b7ba9 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 16 Jul 2021 14:29:04 -0400 Subject: [PATCH 039/885] fixed one syntax error in list_vms and moved kvm_host main.yml to root for the repo for now. --- host_vars/9.60.87.132.yaml | 1 + host_vars => host_vars/host_vars_file | 0 list_vms.yml | 2 +- main.yml | 52 ++++++++++++++++++++++----- roles/kvm_host/tasks/main.yml | 44 ----------------------- 5 files changed, 45 insertions(+), 54 deletions(-) create mode 100644 host_vars/9.60.87.132.yaml rename host_vars => host_vars/host_vars_file (100%) delete mode 100644 roles/kvm_host/tasks/main.yml diff --git a/host_vars/9.60.87.132.yaml b/host_vars/9.60.87.132.yaml new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/host_vars/9.60.87.132.yaml @@ -0,0 +1 @@ + diff --git a/host_vars b/host_vars/host_vars_file similarity index 100% rename from host_vars rename to host_vars/host_vars_file diff --git a/list_vms.yml b/list_vms.yml index 951f9714..3c82bab5 100644 --- a/list_vms.yml +++ b/list_vms.yml @@ -1,8 +1,8 @@ --- - hosts: kvm_host +- tasks: - tasks: - name: list all VMs community.libvirt.virt: command: list_vms diff --git a/main.yml b/main.yml index 555dd273..f35d18d5 100644 --- a/main.yml +++ b/main.yml @@ -1,10 +1,44 @@ --- -# tasks file for kvm-vm creation for OCP install -- name: Check if operating system disk already exists - stat: - path: /var/lib/libvirt/images/{{ kvm_vm_os_disk_name }}.qcow2 - get_checksum: no - get_md5: no - get_mime: no - get_attributes: no - register: os_disk_file \ No newline at end of file + +- hosts: kvm_host + become: true + tasks: + + - name: update repository index + yum: + update_cache: yes + + - name: Ensure pre-requisite packages are installed + yum: + name: + - libvirt + - libvirt-devel + - libvirt-daemon-kvm + - qemu-kvm + - virt-manager + - libvirt-daemon-config-network + - libvirt-client + - qemu-img + state: latest + update_cache: yes + + - name: Ensure libvirtd is started + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_libvirtd.sh + + - name: create macvtap xml file + file: + path: "~/files/macvtap.xml" + state: touch + + - name: Fill contents of macvtap xml file + copy: + dest: "~/files/macvtap.xml" + content: | + + macvtap-net + + + + + - name: Set up macvtap bridge + script: ~/files/shell_scripts/macvtap-net.sh diff --git a/roles/kvm_host/tasks/main.yml b/roles/kvm_host/tasks/main.yml deleted file mode 100644 index f35d18d5..00000000 --- a/roles/kvm_host/tasks/main.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: update repository index - yum: - update_cache: yes - - - name: Ensure pre-requisite packages are installed - yum: - name: - - libvirt - - libvirt-devel - - libvirt-daemon-kvm - - qemu-kvm - - virt-manager - - libvirt-daemon-config-network - - libvirt-client - - qemu-img - state: latest - update_cache: yes - - - name: Ensure libvirtd is started - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_libvirtd.sh - - - name: create macvtap xml file - file: - path: "~/files/macvtap.xml" - state: touch - - - name: Fill contents of macvtap xml file - copy: - dest: "~/files/macvtap.xml" - content: | - - macvtap-net - - - - - - name: Set up macvtap bridge - script: ~/files/shell_scripts/macvtap-net.sh From 3aa200869649edcc1ea4844ace3273e8365553a2 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 16 Jul 2021 14:29:04 -0400 Subject: [PATCH 040/885] fixed one syntax error in list_vms and moved kvm_host main.yml to root for the repo for now. --- host_vars/9.60.87.132.yaml | 1 + host_vars => host_vars/host_vars_file | 0 list_vms.yml | 2 +- main.yml | 52 ++++++++++++++++++++++----- roles/kvm_host/tasks/main.yml | 44 ----------------------- 5 files changed, 45 insertions(+), 54 deletions(-) create mode 100644 host_vars/9.60.87.132.yaml rename host_vars => host_vars/host_vars_file (100%) delete mode 100644 roles/kvm_host/tasks/main.yml diff --git a/host_vars/9.60.87.132.yaml b/host_vars/9.60.87.132.yaml new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/host_vars/9.60.87.132.yaml @@ -0,0 +1 @@ + diff --git a/host_vars b/host_vars/host_vars_file similarity index 100% rename from host_vars rename to host_vars/host_vars_file diff --git a/list_vms.yml b/list_vms.yml index 951f9714..3c82bab5 100644 --- a/list_vms.yml +++ b/list_vms.yml @@ -1,8 +1,8 @@ --- - hosts: kvm_host +- tasks: - tasks: - name: list all VMs community.libvirt.virt: command: list_vms diff --git a/main.yml b/main.yml index 555dd273..f35d18d5 100644 --- a/main.yml +++ b/main.yml @@ -1,10 +1,44 @@ --- -# tasks file for kvm-vm creation for OCP install -- name: Check if operating system disk already exists - stat: - path: /var/lib/libvirt/images/{{ kvm_vm_os_disk_name }}.qcow2 - get_checksum: no - get_md5: no - get_mime: no - get_attributes: no - register: os_disk_file \ No newline at end of file + +- hosts: kvm_host + become: true + tasks: + + - name: update repository index + yum: + update_cache: yes + + - name: Ensure pre-requisite packages are installed + yum: + name: + - libvirt + - libvirt-devel + - libvirt-daemon-kvm + - qemu-kvm + - virt-manager + - libvirt-daemon-config-network + - libvirt-client + - qemu-img + state: latest + update_cache: yes + + - name: Ensure libvirtd is started + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_libvirtd.sh + + - name: create macvtap xml file + file: + path: "~/files/macvtap.xml" + state: touch + + - name: Fill contents of macvtap xml file + copy: + dest: "~/files/macvtap.xml" + content: | + + macvtap-net + + + + + - name: Set up macvtap bridge + script: ~/files/shell_scripts/macvtap-net.sh diff --git a/roles/kvm_host/tasks/main.yml b/roles/kvm_host/tasks/main.yml deleted file mode 100644 index f35d18d5..00000000 --- a/roles/kvm_host/tasks/main.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: update repository index - yum: - update_cache: yes - - - name: Ensure pre-requisite packages are installed - yum: - name: - - libvirt - - libvirt-devel - - libvirt-daemon-kvm - - qemu-kvm - - virt-manager - - libvirt-daemon-config-network - - libvirt-client - - qemu-img - state: latest - update_cache: yes - - - name: Ensure libvirtd is started - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_libvirtd.sh - - - name: create macvtap xml file - file: - path: "~/files/macvtap.xml" - state: touch - - - name: Fill contents of macvtap xml file - copy: - dest: "~/files/macvtap.xml" - content: | - - macvtap-net - - - - - - name: Set up macvtap bridge - script: ~/files/shell_scripts/macvtap-net.sh From c0fbe2f5e21a98d3e700da0847cf1346d1d1734e Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 16 Jul 2021 13:39:27 -0500 Subject: [PATCH 041/885] Added script directory from Felipe Scripts for creating RHOCP cluster --- scripts-naranja/._3-generate-ignitions.sh | Bin 0 -> 367 bytes scripts-naranja/._4-make-bootstrap-vm.sh | Bin 0 -> 484 bytes scripts-naranja/._4-make-master-vms.sh | Bin 0 -> 367 bytes scripts-naranja/._4-make-worker-vms.sh | Bin 0 -> 211 bytes scripts-naranja/._env | Bin 0 -> 327 bytes scripts-naranja/._install-config.yaml | Bin 0 -> 211 bytes scripts-naranja/2-generate-bin-tree.sh | 23 ++++++++++++++ scripts-naranja/3-generate-ignitions.sh | 20 +++++++++++++ scripts-naranja/4-make-bootstrap-vm.sh | 12 ++++++++ scripts-naranja/4-make-master-vms.sh | 26 ++++++++++++++++ scripts-naranja/4-make-worker-vms.sh | 26 ++++++++++++++++ scripts-naranja/env | 35 ++++++++++++++++++++++ scripts-naranja/install-config.yaml | 26 ++++++++++++++++ 13 files changed, 168 insertions(+) create mode 100644 scripts-naranja/._3-generate-ignitions.sh create mode 100644 scripts-naranja/._4-make-bootstrap-vm.sh create mode 100644 scripts-naranja/._4-make-master-vms.sh create mode 100644 scripts-naranja/._4-make-worker-vms.sh create mode 100644 scripts-naranja/._env create mode 100644 scripts-naranja/._install-config.yaml create mode 100644 scripts-naranja/2-generate-bin-tree.sh create mode 100644 scripts-naranja/3-generate-ignitions.sh create mode 100644 scripts-naranja/4-make-bootstrap-vm.sh create mode 100644 scripts-naranja/4-make-master-vms.sh create mode 100644 scripts-naranja/4-make-worker-vms.sh create mode 100644 scripts-naranja/env create mode 100644 scripts-naranja/install-config.yaml diff --git a/scripts-naranja/._3-generate-ignitions.sh b/scripts-naranja/._3-generate-ignitions.sh new file mode 100644 index 0000000000000000000000000000000000000000..09168a43131df8648f9ebb21bd754dc11c0376d7 GIT binary patch literal 367 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFfiHzX&|35A4n4c9795aAj-fx?f}`7 z(X=rG`5y`042lG5VTl$6AhRONtRsGc`K+Jk|CAE7QcF*ye; z$!G$ktQiKhnX7+IScq!?RRCZ$*h=OiX)TRU60Ia(MxndllA8ae8kxH>xNTDlk-=~_Bl OI++3$nz@*oFaQ9#bvc0m literal 0 HcmV?d00001 diff --git a/scripts-naranja/._4-make-bootstrap-vm.sh b/scripts-naranja/._4-make-bootstrap-vm.sh new file mode 100644 index 0000000000000000000000000000000000000000..afc0ba7662930e8ffead5b8fe5b4e0e8af44cc81 GIT binary patch literal 484 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFfeWc(m+1r6Ch0la103vf+z#&U{nFJ z51?sdfr|4pFo-AT=jtUE6y&7pg``%LxaKA2r)1`(gEcT311SLp1}TJ^oW$ai(&E&V zl*E!$<$z#_xs1+0+Jk|CAE7QcF*yffMmLa(WMD8yl1nW~1nN(;%Jy~fNlZ%3i7(76 z$~P`cO)Sn!%*`k(Dk>~WOin7#PfsodYA!8F)3vZRG&V6ZG`F-cSwH=90?6c@&Y6-R znwf#eqE;!F(>H5X_bk4;@O|RU4DSvYv&AX7rOt`kusS$Vqox1Ojhs@R)|o50+1L3ClDJkFfiHzX&|35A4n4c9795aAj-fx?f}`7 z(X=rG`5y`042lG5VTl$6AhRONtRsGc`K+Jk|CAE7QcF*ye; z$!G$ktQi$Vqox1Ojhs@R)|o50+1L3ClDI}@j@U5@h<~05x_AdBnYYuq+J^qI7A5ADWagzZ6zUroSQuHG8Kf9nSSF=d2j?UvXInd4xH(!F jI+^Gi7#cb1nz%YT=~}uN8tGa(TRNEn6`Hx2nlJzWUC18? literal 0 HcmV?d00001 diff --git a/scripts-naranja/._env b/scripts-naranja/._env new file mode 100644 index 0000000000000000000000000000000000000000..7bc8835b4714ae7fe106ea43b545fd520f707388 GIT binary patch literal 327 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFffV&X&|4`9Y_-a9795aAj-fx9st?t zXxf;8e2_Rl1A};Sey(0(K|xNcUPx+1iECbReoAH@RKt6q8UY3dDTJDw#Nv|D;?$Is z#FA9yfMB3J2>b_P51?@fb-9VjIY7bEk~CclYeQobBSUjb3zJ>*FDC#+8KmNE*g-Ti Z!@C2tj-P*76*xv4gj%xF696K literal 0 HcmV?d00001 diff --git a/scripts-naranja/._install-config.yaml b/scripts-naranja/._install-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eead508b93a824dd3af73d88dfe12c202db7afae GIT binary patch literal 211 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDI}@j@U5@h<~05x_AdBnYYuq+J^qI7A5ADWagzZ6zUroSQuHG8Kf9nSSF=d2j?UvXInd4xH(!F jI+^Gi7#cb1nz%YT=~}uN8tGa(TRNEn6`Hx2nlJzWUC18? literal 0 HcmV?d00001 diff --git a/scripts-naranja/2-generate-bin-tree.sh b/scripts-naranja/2-generate-bin-tree.sh new file mode 100644 index 00000000..5fa1edac --- /dev/null +++ b/scripts-naranja/2-generate-bin-tree.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +. ./env + +# create ocp and ignitions directories in http server +mkdir -p /var/www/html/ocp/ignitions + +# copy binaries to http server directory +cp /root/ocpbin/rhcos-live-initramfs.s390x.img /var/www/html/${CLUSTER_NAME}/${INITRAMFS} +cp /root/ocpbin/rhcos-live-kernel-s390x /var/www/html/${CLUSTER_NAME}/${KERNEL} +cp /root/ocpbin/rhcos-live-rootfs.s390x.img /var/www/html/${CLUSTER_NAME}/${ROOTFS} + +#generating .treeinfo file to be read by --location parameter +cat << EOF >> /var/www/html/${CLUSTER_NAME}/.treeinfo +[general] +arch = ${ARCHITECTURE} +family = Red Hat CoreOS +platforms = ${ARCHITECTURE} +version = ${OCP_RELEASE} +[images-${ARCHITECTURE}] +initrd = ${INITRAMFS} +kernel = ${KERNEL} +EOF diff --git a/scripts-naranja/3-generate-ignitions.sh b/scripts-naranja/3-generate-ignitions.sh new file mode 100644 index 00000000..3319a9ec --- /dev/null +++ b/scripts-naranja/3-generate-ignitions.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +rm -rf ignitions +mkdir -p ignitions +cp install-config.yaml ignitions + +# create kubernetes manifests +openshift-install create manifests --dir=./ignitions + +# ensure masters are not schedulable +sed -i 's/mastersSchedulable: true/mastersSchedulable: false/g' ignitions/manifests/cluster-scheduler-02-config.yml + +# create ignition config files +openshift-install create ignition-configs --dir=./ignitions + +# copy ign files to http server directory +cp ./ignitions/*.ign /var/www/html/ocp/ignitions + +# setting permissions in http server directory for binaries and ignitions +chmod -R 777 /var/www/html/ocp diff --git a/scripts-naranja/4-make-bootstrap-vm.sh b/scripts-naranja/4-make-bootstrap-vm.sh new file mode 100644 index 00000000..ce239e82 --- /dev/null +++ b/scripts-naranja/4-make-bootstrap-vm.sh @@ -0,0 +1,12 @@ +#/bin/bash + +. ./env + +echo "using LOCATION: ${LOCATION}" + + virt-install --name bootstrap \ + --disk ${VIRT_IMAGE_DIR}/bootstrap.qcow2 --ram 16000 --cpu host --vcpus 4 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${BOOTSTRAP_IP}::${DEFAULT_GW}:${SUBNET_MASK}:bootstrap::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/bootstrap.ign" diff --git a/scripts-naranja/4-make-master-vms.sh b/scripts-naranja/4-make-master-vms.sh new file mode 100644 index 00000000..72dedc27 --- /dev/null +++ b/scripts-naranja/4-make-master-vms.sh @@ -0,0 +1,26 @@ +#/bin/bash + +. ./env + +echo "LOCATION: ${LOCATION}" + + virt-install --name master1 \ + --disk ${VIRT_IMAGE_DIR}/master1.qcow2 --ram 16000 --cpu host --vcpus 4 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${MASTER1_IP}::${DEFAULT_GW}:${SUBNET_MASK}:master1::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/master.ign" + + virt-install --name master2 \ + --disk ${VIRT_IMAGE_DIR}/master2.qcow2 --ram 16000 --cpu host --vcpus 4 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${MASTER2_IP}::${DEFAULT_GW}:${SUBNET_MASK}:master2::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/master.ign" + + virt-install --name master3 \ + --disk ${VIRT_IMAGE_DIR}/master3.qcow2 --ram 16000 --cpu host --vcpus 4 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${MASTER3_IP}::${DEFAULT_GW}:${SUBNET_MASK}:master3::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/master.ign" diff --git a/scripts-naranja/4-make-worker-vms.sh b/scripts-naranja/4-make-worker-vms.sh new file mode 100644 index 00000000..2556b4d5 --- /dev/null +++ b/scripts-naranja/4-make-worker-vms.sh @@ -0,0 +1,26 @@ +#/bin/bash + +. ./env + +echo "LOCATION: ${LOCATION}" + + virt-install --name worker1 \ + --disk ${VIRT_IMAGE_DIR}/worker1.qcow2 --ram 32000 --cpu host --vcpus 8 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${WORKER1_IP}::${DEFAULT_GW}:${SUBNET_MASK}:worker1::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/worker.ign" + + virt-install --name worker2 \ + --disk ${VIRT_IMAGE_DIR}/worker2.qcow2 --ram 32000 --cpu host --vcpus 8 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${WORKER2_IP}::${DEFAULT_GW}:${SUBNET_MASK}:worker2::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/worker.ign" + + virt-install --name worker3 \ + --disk ${VIRT_IMAGE_DIR}/worker3.qcow2 --ram 32000 --cpu host --vcpus 8 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${WORKER3_IP}::${DEFAULT_GW}:${SUBNET_MASK}:worker3::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/worker.ign" diff --git a/scripts-naranja/env b/scripts-naranja/env new file mode 100644 index 00000000..2ffafcf9 --- /dev/null +++ b/scripts-naranja/env @@ -0,0 +1,35 @@ +#!/bin/bash + +OCP_RELEASE="4.7.13" +ARCHITECTURE="s390x" + +# VIRT_IMAGE_DIR is the diretory where the KVM guest virtual disks will be stored +VIRT_IMAGE_DIR=/var/lib/libvirt/images + +# CLUSTER_NAME will be used as a prefix to name the KVM guests, amoung oter things +CLUSTER_NAME="ocp" + +# HOST_IP and WEB_PORT together will be the HTTP server where the images can be downloaded from +HOST_IP="10.10.195.89" +WEB_PORT="8080" +LOCATION="http://${HOST_IP}:${WEB_PORT}/${CLUSTER_NAME}" + +# VIR_NET defines the KVM network to which the KVM guests will be configured to connect +VIR_NET="macvtap" + +# The names of the files to be retrieved from LOCATION +KERNEL=vmlinuz +INITRAMFS=initramfs.img +ROOTFS=rootfs + +# Static IP addresses and other network configuration details for the KVM guests +BOOTSTRAP_IP="10.10.195.88" +MASTER1_IP="10.10.195.80" +MASTER2_IP="10.10.195.81" +MASTER3_IP="10.10.195.82" +WORKER1_IP="10.10.195.83" +WORKER2_IP="10.10.195.84" +WORKER3_IP="10.10.195.85" +DEFAULT_GW="10.10.195.1" +SUBNET_MASK="24" +NAMESERVER="10.10.195.89" diff --git a/scripts-naranja/install-config.yaml b/scripts-naranja/install-config.yaml new file mode 100644 index 00000000..021e7719 --- /dev/null +++ b/scripts-naranja/install-config.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +baseDomain: naranja.local +compute: +- hyperthreading: Enabled + name: worker + replicas: 0 + architecture: s390x +controlPlane: + hyperthreading: Enabled + name: master + replicas: 3 + architecture: s390x +metadata: + name: ocp +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 +platform: + none: {} +fips: false +pullSecret: '' +sshKey: '' From b5adbc503013d4fe055739ef251dbda7a0617977 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 16 Jul 2021 13:39:27 -0500 Subject: [PATCH 042/885] Added script directory from Felipe Scripts for creating RHOCP cluster --- scripts-naranja/._3-generate-ignitions.sh | Bin 0 -> 367 bytes scripts-naranja/._4-make-bootstrap-vm.sh | Bin 0 -> 484 bytes scripts-naranja/._4-make-master-vms.sh | Bin 0 -> 367 bytes scripts-naranja/._4-make-worker-vms.sh | Bin 0 -> 211 bytes scripts-naranja/._env | Bin 0 -> 327 bytes scripts-naranja/._install-config.yaml | Bin 0 -> 211 bytes scripts-naranja/2-generate-bin-tree.sh | 23 ++++++++++++++ scripts-naranja/3-generate-ignitions.sh | 20 +++++++++++++ scripts-naranja/4-make-bootstrap-vm.sh | 12 ++++++++ scripts-naranja/4-make-master-vms.sh | 26 ++++++++++++++++ scripts-naranja/4-make-worker-vms.sh | 26 ++++++++++++++++ scripts-naranja/env | 35 ++++++++++++++++++++++ scripts-naranja/install-config.yaml | 26 ++++++++++++++++ 13 files changed, 168 insertions(+) create mode 100644 scripts-naranja/._3-generate-ignitions.sh create mode 100644 scripts-naranja/._4-make-bootstrap-vm.sh create mode 100644 scripts-naranja/._4-make-master-vms.sh create mode 100644 scripts-naranja/._4-make-worker-vms.sh create mode 100644 scripts-naranja/._env create mode 100644 scripts-naranja/._install-config.yaml create mode 100644 scripts-naranja/2-generate-bin-tree.sh create mode 100644 scripts-naranja/3-generate-ignitions.sh create mode 100644 scripts-naranja/4-make-bootstrap-vm.sh create mode 100644 scripts-naranja/4-make-master-vms.sh create mode 100644 scripts-naranja/4-make-worker-vms.sh create mode 100644 scripts-naranja/env create mode 100644 scripts-naranja/install-config.yaml diff --git a/scripts-naranja/._3-generate-ignitions.sh b/scripts-naranja/._3-generate-ignitions.sh new file mode 100644 index 0000000000000000000000000000000000000000..09168a43131df8648f9ebb21bd754dc11c0376d7 GIT binary patch literal 367 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFfiHzX&|35A4n4c9795aAj-fx?f}`7 z(X=rG`5y`042lG5VTl$6AhRONtRsGc`K+Jk|CAE7QcF*ye; z$!G$ktQiKhnX7+IScq!?RRCZ$*h=OiX)TRU60Ia(MxndllA8ae8kxH>xNTDlk-=~_Bl OI++3$nz@*oFaQ9#bvc0m literal 0 HcmV?d00001 diff --git a/scripts-naranja/._4-make-bootstrap-vm.sh b/scripts-naranja/._4-make-bootstrap-vm.sh new file mode 100644 index 0000000000000000000000000000000000000000..afc0ba7662930e8ffead5b8fe5b4e0e8af44cc81 GIT binary patch literal 484 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFfeWc(m+1r6Ch0la103vf+z#&U{nFJ z51?sdfr|4pFo-AT=jtUE6y&7pg``%LxaKA2r)1`(gEcT311SLp1}TJ^oW$ai(&E&V zl*E!$<$z#_xs1+0+Jk|CAE7QcF*yffMmLa(WMD8yl1nW~1nN(;%Jy~fNlZ%3i7(76 z$~P`cO)Sn!%*`k(Dk>~WOin7#PfsodYA!8F)3vZRG&V6ZG`F-cSwH=90?6c@&Y6-R znwf#eqE;!F(>H5X_bk4;@O|RU4DSvYv&AX7rOt`kusS$Vqox1Ojhs@R)|o50+1L3ClDJkFfiHzX&|35A4n4c9795aAj-fx?f}`7 z(X=rG`5y`042lG5VTl$6AhRONtRsGc`K+Jk|CAE7QcF*ye; z$!G$ktQi$Vqox1Ojhs@R)|o50+1L3ClDI}@j@U5@h<~05x_AdBnYYuq+J^qI7A5ADWagzZ6zUroSQuHG8Kf9nSSF=d2j?UvXInd4xH(!F jI+^Gi7#cb1nz%YT=~}uN8tGa(TRNEn6`Hx2nlJzWUC18? literal 0 HcmV?d00001 diff --git a/scripts-naranja/._env b/scripts-naranja/._env new file mode 100644 index 0000000000000000000000000000000000000000..7bc8835b4714ae7fe106ea43b545fd520f707388 GIT binary patch literal 327 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFffV&X&|4`9Y_-a9795aAj-fx9st?t zXxf;8e2_Rl1A};Sey(0(K|xNcUPx+1iECbReoAH@RKt6q8UY3dDTJDw#Nv|D;?$Is z#FA9yfMB3J2>b_P51?@fb-9VjIY7bEk~CclYeQobBSUjb3zJ>*FDC#+8KmNE*g-Ti Z!@C2tj-P*76*xv4gj%xF696K literal 0 HcmV?d00001 diff --git a/scripts-naranja/._install-config.yaml b/scripts-naranja/._install-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eead508b93a824dd3af73d88dfe12c202db7afae GIT binary patch literal 211 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDI}@j@U5@h<~05x_AdBnYYuq+J^qI7A5ADWagzZ6zUroSQuHG8Kf9nSSF=d2j?UvXInd4xH(!F jI+^Gi7#cb1nz%YT=~}uN8tGa(TRNEn6`Hx2nlJzWUC18? literal 0 HcmV?d00001 diff --git a/scripts-naranja/2-generate-bin-tree.sh b/scripts-naranja/2-generate-bin-tree.sh new file mode 100644 index 00000000..5fa1edac --- /dev/null +++ b/scripts-naranja/2-generate-bin-tree.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +. ./env + +# create ocp and ignitions directories in http server +mkdir -p /var/www/html/ocp/ignitions + +# copy binaries to http server directory +cp /root/ocpbin/rhcos-live-initramfs.s390x.img /var/www/html/${CLUSTER_NAME}/${INITRAMFS} +cp /root/ocpbin/rhcos-live-kernel-s390x /var/www/html/${CLUSTER_NAME}/${KERNEL} +cp /root/ocpbin/rhcos-live-rootfs.s390x.img /var/www/html/${CLUSTER_NAME}/${ROOTFS} + +#generating .treeinfo file to be read by --location parameter +cat << EOF >> /var/www/html/${CLUSTER_NAME}/.treeinfo +[general] +arch = ${ARCHITECTURE} +family = Red Hat CoreOS +platforms = ${ARCHITECTURE} +version = ${OCP_RELEASE} +[images-${ARCHITECTURE}] +initrd = ${INITRAMFS} +kernel = ${KERNEL} +EOF diff --git a/scripts-naranja/3-generate-ignitions.sh b/scripts-naranja/3-generate-ignitions.sh new file mode 100644 index 00000000..3319a9ec --- /dev/null +++ b/scripts-naranja/3-generate-ignitions.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +rm -rf ignitions +mkdir -p ignitions +cp install-config.yaml ignitions + +# create kubernetes manifests +openshift-install create manifests --dir=./ignitions + +# ensure masters are not schedulable +sed -i 's/mastersSchedulable: true/mastersSchedulable: false/g' ignitions/manifests/cluster-scheduler-02-config.yml + +# create ignition config files +openshift-install create ignition-configs --dir=./ignitions + +# copy ign files to http server directory +cp ./ignitions/*.ign /var/www/html/ocp/ignitions + +# setting permissions in http server directory for binaries and ignitions +chmod -R 777 /var/www/html/ocp diff --git a/scripts-naranja/4-make-bootstrap-vm.sh b/scripts-naranja/4-make-bootstrap-vm.sh new file mode 100644 index 00000000..ce239e82 --- /dev/null +++ b/scripts-naranja/4-make-bootstrap-vm.sh @@ -0,0 +1,12 @@ +#/bin/bash + +. ./env + +echo "using LOCATION: ${LOCATION}" + + virt-install --name bootstrap \ + --disk ${VIRT_IMAGE_DIR}/bootstrap.qcow2 --ram 16000 --cpu host --vcpus 4 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${BOOTSTRAP_IP}::${DEFAULT_GW}:${SUBNET_MASK}:bootstrap::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/bootstrap.ign" diff --git a/scripts-naranja/4-make-master-vms.sh b/scripts-naranja/4-make-master-vms.sh new file mode 100644 index 00000000..72dedc27 --- /dev/null +++ b/scripts-naranja/4-make-master-vms.sh @@ -0,0 +1,26 @@ +#/bin/bash + +. ./env + +echo "LOCATION: ${LOCATION}" + + virt-install --name master1 \ + --disk ${VIRT_IMAGE_DIR}/master1.qcow2 --ram 16000 --cpu host --vcpus 4 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${MASTER1_IP}::${DEFAULT_GW}:${SUBNET_MASK}:master1::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/master.ign" + + virt-install --name master2 \ + --disk ${VIRT_IMAGE_DIR}/master2.qcow2 --ram 16000 --cpu host --vcpus 4 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${MASTER2_IP}::${DEFAULT_GW}:${SUBNET_MASK}:master2::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/master.ign" + + virt-install --name master3 \ + --disk ${VIRT_IMAGE_DIR}/master3.qcow2 --ram 16000 --cpu host --vcpus 4 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${MASTER3_IP}::${DEFAULT_GW}:${SUBNET_MASK}:master3::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/master.ign" diff --git a/scripts-naranja/4-make-worker-vms.sh b/scripts-naranja/4-make-worker-vms.sh new file mode 100644 index 00000000..2556b4d5 --- /dev/null +++ b/scripts-naranja/4-make-worker-vms.sh @@ -0,0 +1,26 @@ +#/bin/bash + +. ./env + +echo "LOCATION: ${LOCATION}" + + virt-install --name worker1 \ + --disk ${VIRT_IMAGE_DIR}/worker1.qcow2 --ram 32000 --cpu host --vcpus 8 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${WORKER1_IP}::${DEFAULT_GW}:${SUBNET_MASK}:worker1::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/worker.ign" + + virt-install --name worker2 \ + --disk ${VIRT_IMAGE_DIR}/worker2.qcow2 --ram 32000 --cpu host --vcpus 8 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${WORKER2_IP}::${DEFAULT_GW}:${SUBNET_MASK}:worker2::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/worker.ign" + + virt-install --name worker3 \ + --disk ${VIRT_IMAGE_DIR}/worker3.qcow2 --ram 32000 --cpu host --vcpus 8 \ + --os-type linux --os-variant rhel8.0 \ + --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ + --location ${LOCATION} \ + --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${WORKER3_IP}::${DEFAULT_GW}:${SUBNET_MASK}:worker3::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/worker.ign" diff --git a/scripts-naranja/env b/scripts-naranja/env new file mode 100644 index 00000000..2ffafcf9 --- /dev/null +++ b/scripts-naranja/env @@ -0,0 +1,35 @@ +#!/bin/bash + +OCP_RELEASE="4.7.13" +ARCHITECTURE="s390x" + +# VIRT_IMAGE_DIR is the diretory where the KVM guest virtual disks will be stored +VIRT_IMAGE_DIR=/var/lib/libvirt/images + +# CLUSTER_NAME will be used as a prefix to name the KVM guests, amoung oter things +CLUSTER_NAME="ocp" + +# HOST_IP and WEB_PORT together will be the HTTP server where the images can be downloaded from +HOST_IP="10.10.195.89" +WEB_PORT="8080" +LOCATION="http://${HOST_IP}:${WEB_PORT}/${CLUSTER_NAME}" + +# VIR_NET defines the KVM network to which the KVM guests will be configured to connect +VIR_NET="macvtap" + +# The names of the files to be retrieved from LOCATION +KERNEL=vmlinuz +INITRAMFS=initramfs.img +ROOTFS=rootfs + +# Static IP addresses and other network configuration details for the KVM guests +BOOTSTRAP_IP="10.10.195.88" +MASTER1_IP="10.10.195.80" +MASTER2_IP="10.10.195.81" +MASTER3_IP="10.10.195.82" +WORKER1_IP="10.10.195.83" +WORKER2_IP="10.10.195.84" +WORKER3_IP="10.10.195.85" +DEFAULT_GW="10.10.195.1" +SUBNET_MASK="24" +NAMESERVER="10.10.195.89" diff --git a/scripts-naranja/install-config.yaml b/scripts-naranja/install-config.yaml new file mode 100644 index 00000000..021e7719 --- /dev/null +++ b/scripts-naranja/install-config.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +baseDomain: naranja.local +compute: +- hyperthreading: Enabled + name: worker + replicas: 0 + architecture: s390x +controlPlane: + hyperthreading: Enabled + name: master + replicas: 3 + architecture: s390x +metadata: + name: ocp +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 +platform: + none: {} +fips: false +pullSecret: '' +sshKey: '' From 8908ef802dc9bc5914ae863174614bfec70184d4 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 16 Jul 2021 13:42:43 -0500 Subject: [PATCH 043/885] Remove binary files remove unneeded files --- scripts-naranja/._3-generate-ignitions.sh | Bin 367 -> 0 bytes scripts-naranja/._4-make-bootstrap-vm.sh | Bin 484 -> 0 bytes scripts-naranja/._4-make-master-vms.sh | Bin 367 -> 0 bytes scripts-naranja/._4-make-worker-vms.sh | Bin 211 -> 0 bytes scripts-naranja/._env | Bin 327 -> 0 bytes scripts-naranja/._install-config.yaml | Bin 211 -> 0 bytes 6 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 scripts-naranja/._3-generate-ignitions.sh delete mode 100644 scripts-naranja/._4-make-bootstrap-vm.sh delete mode 100644 scripts-naranja/._4-make-master-vms.sh delete mode 100644 scripts-naranja/._4-make-worker-vms.sh delete mode 100644 scripts-naranja/._env delete mode 100644 scripts-naranja/._install-config.yaml diff --git a/scripts-naranja/._3-generate-ignitions.sh b/scripts-naranja/._3-generate-ignitions.sh deleted file mode 100644 index 09168a43131df8648f9ebb21bd754dc11c0376d7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 367 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFfiHzX&|35A4n4c9795aAj-fx?f}`7 z(X=rG`5y`042lG5VTl$6AhRONtRsGc`K+Jk|CAE7QcF*ye; z$!G$ktQiKhnX7+IScq!?RRCZ$*h=OiX)TRU60Ia(MxndllA8ae8kxH>xNTDlk-=~_Bl OI++3$nz@*oFaQ9#bvc0m diff --git a/scripts-naranja/._4-make-bootstrap-vm.sh b/scripts-naranja/._4-make-bootstrap-vm.sh deleted file mode 100644 index afc0ba7662930e8ffead5b8fe5b4e0e8af44cc81..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 484 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFfeWc(m+1r6Ch0la103vf+z#&U{nFJ z51?sdfr|4pFo-AT=jtUE6y&7pg``%LxaKA2r)1`(gEcT311SLp1}TJ^oW$ai(&E&V zl*E!$<$z#_xs1+0+Jk|CAE7QcF*yffMmLa(WMD8yl1nW~1nN(;%Jy~fNlZ%3i7(76 z$~P`cO)Sn!%*`k(Dk>~WOin7#PfsodYA!8F)3vZRG&V6ZG`F-cSwH=90?6c@&Y6-R znwf#eqE;!F(>H5X_bk4;@O|RU4DSvYv&AX7rOt`kusS$Vqox1Ojhs@R)|o50+1L3ClDJkFfiHzX&|35A4n4c9795aAj-fx?f}`7 z(X=rG`5y`042lG5VTl$6AhRONtRsGc`K+Jk|CAE7QcF*ye; z$!G$ktQi$Vqox1Ojhs@R)|o50+1L3ClDI}@j@U5@h<~05x_AdBnYYuq+J^qI7A5ADWagzZ6zUroSQuHG8Kf9nSSF=d2j?UvXInd4xH(!F jI+^Gi7#cb1nz%YT=~}uN8tGa(TRNEn6`Hx2nlJzWUC18? diff --git a/scripts-naranja/._env b/scripts-naranja/._env deleted file mode 100644 index 7bc8835b4714ae7fe106ea43b545fd520f707388..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 327 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFffV&X&|4`9Y_-a9795aAj-fx9st?t zXxf;8e2_Rl1A};Sey(0(K|xNcUPx+1iECbReoAH@RKt6q8UY3dDTJDw#Nv|D;?$Is z#FA9yfMB3J2>b_P51?@fb-9VjIY7bEk~CclYeQobBSUjb3zJ>*FDC#+8KmNE*g-Ti Z!@C2tj-P*76*xv4gj%xF696K diff --git a/scripts-naranja/._install-config.yaml b/scripts-naranja/._install-config.yaml deleted file mode 100644 index eead508b93a824dd3af73d88dfe12c202db7afae..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 211 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDI}@j@U5@h<~05x_AdBnYYuq+J^qI7A5ADWagzZ6zUroSQuHG8Kf9nSSF=d2j?UvXInd4xH(!F jI+^Gi7#cb1nz%YT=~}uN8tGa(TRNEn6`Hx2nlJzWUC18? From 5420a9964264d28a03d3cd8d7b01b4e8782579c3 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 16 Jul 2021 13:42:43 -0500 Subject: [PATCH 044/885] Remove binary files remove unneeded files --- scripts-naranja/._3-generate-ignitions.sh | Bin 367 -> 0 bytes scripts-naranja/._4-make-bootstrap-vm.sh | Bin 484 -> 0 bytes scripts-naranja/._4-make-master-vms.sh | Bin 367 -> 0 bytes scripts-naranja/._4-make-worker-vms.sh | Bin 211 -> 0 bytes scripts-naranja/._env | Bin 327 -> 0 bytes scripts-naranja/._install-config.yaml | Bin 211 -> 0 bytes 6 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 scripts-naranja/._3-generate-ignitions.sh delete mode 100644 scripts-naranja/._4-make-bootstrap-vm.sh delete mode 100644 scripts-naranja/._4-make-master-vms.sh delete mode 100644 scripts-naranja/._4-make-worker-vms.sh delete mode 100644 scripts-naranja/._env delete mode 100644 scripts-naranja/._install-config.yaml diff --git a/scripts-naranja/._3-generate-ignitions.sh b/scripts-naranja/._3-generate-ignitions.sh deleted file mode 100644 index 09168a43131df8648f9ebb21bd754dc11c0376d7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 367 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFfiHzX&|35A4n4c9795aAj-fx?f}`7 z(X=rG`5y`042lG5VTl$6AhRONtRsGc`K+Jk|CAE7QcF*ye; z$!G$ktQiKhnX7+IScq!?RRCZ$*h=OiX)TRU60Ia(MxndllA8ae8kxH>xNTDlk-=~_Bl OI++3$nz@*oFaQ9#bvc0m diff --git a/scripts-naranja/._4-make-bootstrap-vm.sh b/scripts-naranja/._4-make-bootstrap-vm.sh deleted file mode 100644 index afc0ba7662930e8ffead5b8fe5b4e0e8af44cc81..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 484 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFfeWc(m+1r6Ch0la103vf+z#&U{nFJ z51?sdfr|4pFo-AT=jtUE6y&7pg``%LxaKA2r)1`(gEcT311SLp1}TJ^oW$ai(&E&V zl*E!$<$z#_xs1+0+Jk|CAE7QcF*yffMmLa(WMD8yl1nW~1nN(;%Jy~fNlZ%3i7(76 z$~P`cO)Sn!%*`k(Dk>~WOin7#PfsodYA!8F)3vZRG&V6ZG`F-cSwH=90?6c@&Y6-R znwf#eqE;!F(>H5X_bk4;@O|RU4DSvYv&AX7rOt`kusS$Vqox1Ojhs@R)|o50+1L3ClDJkFfiHzX&|35A4n4c9795aAj-fx?f}`7 z(X=rG`5y`042lG5VTl$6AhRONtRsGc`K+Jk|CAE7QcF*ye; z$!G$ktQi$Vqox1Ojhs@R)|o50+1L3ClDI}@j@U5@h<~05x_AdBnYYuq+J^qI7A5ADWagzZ6zUroSQuHG8Kf9nSSF=d2j?UvXInd4xH(!F jI+^Gi7#cb1nz%YT=~}uN8tGa(TRNEn6`Hx2nlJzWUC18? diff --git a/scripts-naranja/._env b/scripts-naranja/._env deleted file mode 100644 index 7bc8835b4714ae7fe106ea43b545fd520f707388..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 327 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDJkFffV&X&|4`9Y_-a9795aAj-fx9st?t zXxf;8e2_Rl1A};Sey(0(K|xNcUPx+1iECbReoAH@RKt6q8UY3dDTJDw#Nv|D;?$Is z#FA9yfMB3J2>b_P51?@fb-9VjIY7bEk~CclYeQobBSUjb3zJ>*FDC#+8KmNE*g-Ti Z!@C2tj-P*76*xv4gj%xF696K diff --git a/scripts-naranja/._install-config.yaml b/scripts-naranja/._install-config.yaml deleted file mode 100644 index eead508b93a824dd3af73d88dfe12c202db7afae..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 211 zcmZQz6=P>$Vqox1Ojhs@R)|o50+1L3ClDI}@j@U5@h<~05x_AdBnYYuq+J^qI7A5ADWagzZ6zUroSQuHG8Kf9nSSF=d2j?UvXInd4xH(!F jI+^Gi7#cb1nz%YT=~}uN8tGa(TRNEn6`Hx2nlJzWUC18? From b13c9a37aef10d4d1951103a93e6485a6dbcc3aa Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 16 Jul 2021 17:30:36 -0400 Subject: [PATCH 045/885] define_macvtap playbook successful. macvtap.xml edited and ported to j2 format also. --- define_macvtap.yaml | 24 ++++++++++++++ main.yml | 44 ------------------------- roles/kvm_host/templates/macvtap.xml.j2 | 6 ++++ test02-joe.yml | 4 +++ 4 files changed, 34 insertions(+), 44 deletions(-) create mode 100644 define_macvtap.yaml delete mode 100644 main.yml create mode 100644 roles/kvm_host/templates/macvtap.xml.j2 diff --git a/define_macvtap.yaml b/define_macvtap.yaml new file mode 100644 index 00000000..8f858c6e --- /dev/null +++ b/define_macvtap.yaml @@ -0,0 +1,24 @@ +## Playbook works, need to change absolute path to relative or variable. Need to move to roles file for kvm_host +--- + +- hosts: kvm_host + become: true + tasks: + + - name: Set up macvtap bridge + community.libvirt.virt_net: + command: define + name: macvtap-net + autostart: true + xml: "{{ lookup ('template', '/home/jacob/.git/Ansible-OpenShift-Provisioning/roles/kvm_host/templates/macvtap.xml.j2') }}" + + - name: Start macvtap-net + community.libvirt.virt_net: + autostart: yes + command: start + name: macvtap-net + + - name: Set autostart for macvtap-net + community.libvirt.virt_net: + autostart: yes + name: macvtap-net diff --git a/main.yml b/main.yml deleted file mode 100644 index f35d18d5..00000000 --- a/main.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: update repository index - yum: - update_cache: yes - - - name: Ensure pre-requisite packages are installed - yum: - name: - - libvirt - - libvirt-devel - - libvirt-daemon-kvm - - qemu-kvm - - virt-manager - - libvirt-daemon-config-network - - libvirt-client - - qemu-img - state: latest - update_cache: yes - - - name: Ensure libvirtd is started - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_libvirtd.sh - - - name: create macvtap xml file - file: - path: "~/files/macvtap.xml" - state: touch - - - name: Fill contents of macvtap xml file - copy: - dest: "~/files/macvtap.xml" - content: | - - macvtap-net - - - - - - name: Set up macvtap bridge - script: ~/files/shell_scripts/macvtap-net.sh diff --git a/roles/kvm_host/templates/macvtap.xml.j2 b/roles/kvm_host/templates/macvtap.xml.j2 new file mode 100644 index 00000000..388477ea --- /dev/null +++ b/roles/kvm_host/templates/macvtap.xml.j2 @@ -0,0 +1,6 @@ + + macvtap-net + + + + diff --git a/test02-joe.yml b/test02-joe.yml index 8b689fd1..c73a4dde 100644 --- a/test02-joe.yml +++ b/test02-joe.yml @@ -16,6 +16,10 @@ - libvirt-client - qemu-img +- name: update repository index + yum: + update_cache: yes + - name: Ensure libvirtd is started ansible.builtin.shell: - systemctl enable --now libvirtd From 3f999e09b8c303107755c6fd5da4143169ee6528 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 16 Jul 2021 17:30:36 -0400 Subject: [PATCH 046/885] define_macvtap playbook successful. macvtap.xml edited and ported to j2 format also. --- define_macvtap.yaml | 24 ++++++++++++++ main.yml | 44 ------------------------- roles/kvm_host/templates/macvtap.xml.j2 | 6 ++++ test02-joe.yml | 4 +++ 4 files changed, 34 insertions(+), 44 deletions(-) create mode 100644 define_macvtap.yaml delete mode 100644 main.yml create mode 100644 roles/kvm_host/templates/macvtap.xml.j2 diff --git a/define_macvtap.yaml b/define_macvtap.yaml new file mode 100644 index 00000000..8f858c6e --- /dev/null +++ b/define_macvtap.yaml @@ -0,0 +1,24 @@ +## Playbook works, need to change absolute path to relative or variable. Need to move to roles file for kvm_host +--- + +- hosts: kvm_host + become: true + tasks: + + - name: Set up macvtap bridge + community.libvirt.virt_net: + command: define + name: macvtap-net + autostart: true + xml: "{{ lookup ('template', '/home/jacob/.git/Ansible-OpenShift-Provisioning/roles/kvm_host/templates/macvtap.xml.j2') }}" + + - name: Start macvtap-net + community.libvirt.virt_net: + autostart: yes + command: start + name: macvtap-net + + - name: Set autostart for macvtap-net + community.libvirt.virt_net: + autostart: yes + name: macvtap-net diff --git a/main.yml b/main.yml deleted file mode 100644 index f35d18d5..00000000 --- a/main.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: update repository index - yum: - update_cache: yes - - - name: Ensure pre-requisite packages are installed - yum: - name: - - libvirt - - libvirt-devel - - libvirt-daemon-kvm - - qemu-kvm - - virt-manager - - libvirt-daemon-config-network - - libvirt-client - - qemu-img - state: latest - update_cache: yes - - - name: Ensure libvirtd is started - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_libvirtd.sh - - - name: create macvtap xml file - file: - path: "~/files/macvtap.xml" - state: touch - - - name: Fill contents of macvtap xml file - copy: - dest: "~/files/macvtap.xml" - content: | - - macvtap-net - - - - - - name: Set up macvtap bridge - script: ~/files/shell_scripts/macvtap-net.sh diff --git a/roles/kvm_host/templates/macvtap.xml.j2 b/roles/kvm_host/templates/macvtap.xml.j2 new file mode 100644 index 00000000..388477ea --- /dev/null +++ b/roles/kvm_host/templates/macvtap.xml.j2 @@ -0,0 +1,6 @@ + + macvtap-net + + + + diff --git a/test02-joe.yml b/test02-joe.yml index 8b689fd1..c73a4dde 100644 --- a/test02-joe.yml +++ b/test02-joe.yml @@ -16,6 +16,10 @@ - libvirt-client - qemu-img +- name: update repository index + yum: + update_cache: yes + - name: Ensure libvirtd is started ansible.builtin.shell: - systemctl enable --now libvirtd From 9c17a3f914ce0c151f1ecfe97fe482cee33316a0 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 19 Jul 2021 16:45:14 -0400 Subject: [PATCH 047/885] Started working on the bastion install playbook --- .../{main.yaml => bastion.yaml} | 18 ++++++++++------- .../bastion_server/start_bastion_install.yaml | 20 +++++++++++++++++++ 2 files changed, 31 insertions(+), 7 deletions(-) rename roles/bastion_server/{main.yaml => bastion.yaml} (79%) create mode 100644 roles/bastion_server/start_bastion_install.yaml diff --git a/roles/bastion_server/main.yaml b/roles/bastion_server/bastion.yaml similarity index 79% rename from roles/bastion_server/main.yaml rename to roles/bastion_server/bastion.yaml index c0590de0..c0cd3073 100644 --- a/roles/bastion_server/main.yaml +++ b/roles/bastion_server/bastion.yaml @@ -4,15 +4,19 @@ become: true tasks: - - name: update repository index - yum: - update_cache: yes + - name: start bastion install process + community.libvirt.virt: + command: + virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso + --accelerate --import --network network=macvtap-net --extra-args "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive + if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" + --noautoconsole + + + + - - name: Download RHEL ISO image to RHEL KVM - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/dl_rhel_iso.sh - - name: start install process - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_bastion_install.sh #there has to be a way to do this through Ansible. Step 3 page 9 - name: complete bastion install process diff --git a/roles/bastion_server/start_bastion_install.yaml b/roles/bastion_server/start_bastion_install.yaml new file mode 100644 index 00000000..5667dae4 --- /dev/null +++ b/roles/bastion_server/start_bastion_install.yaml @@ -0,0 +1,20 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + - name: start bastion install process + community.libvirt.virt: + name: bastion + memory: 4096 + vcpus: 2 + disk size: 20 + cdrom: /var/lib/libvirt/images/rhel83.iso + accelerate: yes + import: yes + network: network=macvtap-net + extra-args: "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" + location: /rhcos-install + qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" + noautoconsole: yes From e4ceeebc260fbe9652c973baf66efbe923292910 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 19 Jul 2021 16:45:14 -0400 Subject: [PATCH 048/885] Started working on the bastion install playbook --- .../{main.yaml => bastion.yaml} | 18 ++++++++++------- .../bastion_server/start_bastion_install.yaml | 20 +++++++++++++++++++ 2 files changed, 31 insertions(+), 7 deletions(-) rename roles/bastion_server/{main.yaml => bastion.yaml} (79%) create mode 100644 roles/bastion_server/start_bastion_install.yaml diff --git a/roles/bastion_server/main.yaml b/roles/bastion_server/bastion.yaml similarity index 79% rename from roles/bastion_server/main.yaml rename to roles/bastion_server/bastion.yaml index c0590de0..c0cd3073 100644 --- a/roles/bastion_server/main.yaml +++ b/roles/bastion_server/bastion.yaml @@ -4,15 +4,19 @@ become: true tasks: - - name: update repository index - yum: - update_cache: yes + - name: start bastion install process + community.libvirt.virt: + command: + virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso + --accelerate --import --network network=macvtap-net --extra-args "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive + if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" + --noautoconsole + + + + - - name: Download RHEL ISO image to RHEL KVM - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/dl_rhel_iso.sh - - name: start install process - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/start_bastion_install.sh #there has to be a way to do this through Ansible. Step 3 page 9 - name: complete bastion install process diff --git a/roles/bastion_server/start_bastion_install.yaml b/roles/bastion_server/start_bastion_install.yaml new file mode 100644 index 00000000..5667dae4 --- /dev/null +++ b/roles/bastion_server/start_bastion_install.yaml @@ -0,0 +1,20 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + - name: start bastion install process + community.libvirt.virt: + name: bastion + memory: 4096 + vcpus: 2 + disk size: 20 + cdrom: /var/lib/libvirt/images/rhel83.iso + accelerate: yes + import: yes + network: network=macvtap-net + extra-args: "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" + location: /rhcos-install + qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" + noautoconsole: yes From 007c7ae333c4f0350e5afac93926a410fa810719 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 20 Jul 2021 16:27:23 -0400 Subject: [PATCH 049/885] created x86 versions of some bastion and kvm host files. Filled out http_setup.yaml for bastion completely. --- .../bastion_s390x/tasks/bastion.yaml | 43 +++++++++++++ .../tasks/fill_install_config.yaml | 44 +++++++++++++ .../bastion_s390x/tasks/http_setup.yaml | 61 +++++++++++++++++++ .../tasks}/start_bastion_install.yaml | 0 .../{ => bastion_x86/tasks}/bastion.yaml | 47 -------------- .../tasks/fill_install_config.yaml | 44 +++++++++++++ .../bastion_x86/tasks/http_setup.yaml.save | 23 +++++++ .../tasks/start_bastion_install.yaml | 20 ++++++ .../kvm_host_s390x/tasks/define_macvtap.yaml | 2 +- .../tasks/dwnload-image-files.yaml | 0 .../templates/macvtap.xml.j2 | 0 roles/kvm_host/kvm_host_x86/tasks/main.yml | 41 +++++++++++++ .../kvm_host_x86/templates/macvtap.xml.j2 | 6 ++ 13 files changed, 283 insertions(+), 48 deletions(-) create mode 100644 roles/bastion_server/bastion_s390x/tasks/bastion.yaml create mode 100644 roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml create mode 100644 roles/bastion_server/bastion_s390x/tasks/http_setup.yaml rename roles/bastion_server/{ => bastion_s390x/tasks}/start_bastion_install.yaml (100%) rename roles/bastion_server/{ => bastion_x86/tasks}/bastion.yaml (50%) create mode 100644 roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml create mode 100644 roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save create mode 100644 roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml rename define_macvtap.yaml => roles/kvm_host/kvm_host_s390x/tasks/define_macvtap.yaml (88%) rename dwnload-image-files.yaml => roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml (100%) rename roles/kvm_host/{ => kvm_host_s390x}/templates/macvtap.xml.j2 (100%) create mode 100644 roles/kvm_host/kvm_host_x86/tasks/main.yml create mode 100644 roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 diff --git a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml new file mode 100644 index 00000000..1fc0ccb3 --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml @@ -0,0 +1,43 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + +#there has to be a way to do this through Ansible. Step 3 page 9 + - name: complete bastion install process + +#leaving this until I meet with Filipe + - name: download software + +#leaving this until I meet with Filipe + - name: DNS requirements and configuration + +#not sure what this instruction step is trying to say. Page 13 + - name: Load Balancer + +# Need to edit this script to automate changing port to 8080 and ensure latest versions of OpenShift mirrors + - name: Create and configure the HTTP server + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/create_http.sh + + - name: Get installer and oc Client Tools + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/get_ocp_installer.sh + +##need to use host_vars for + - name: Generate the ignition files 1 + shell: ./openshift-install create manifests --dir= + +##also needs variable + - name: Generate the ignition files 2 + shell: ./openshift-install create ignition-configs --dir= + +##also needs variable + - name: Generate the ignition files 3 + shell: cp /*.ign /var/www/html/ignition + + - name: Generate the ignition files 4 + shell: chmod 775 /var/www/html/ignition/*.ign + + - name: Prepare the KVM OCP guests + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/prep_kvm_guests.sh diff --git a/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml b/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml new file mode 100644 index 00000000..ef798242 --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml @@ -0,0 +1,44 @@ +##Needs variables from host_vars for baseDomain, cluster_name, pullsecret, and ssh-public-key. +##I think it also needs cidr (pod's IP range) and service network IP range. +##Ensure PATHs are correct +--- + +- hosts: bastion_server + become: true + tasks: + + - name: create install-config.yaml + file: + path: "~/files/install-config.yaml" + state: touch + + - name: Fill contents of install-config.yaml file + copy: + dest: "~/files/macvtap.xml" + content: | + apiVersion: v1 + baseDomain: + compute: + - architecture: s390x + hyperthreading: Enabled + name: worker + replicas: 0 + controlPlane: + architecture: s390x + hyperthreading: Enabled + name: master + replicas: 3 + metadata: + name: + networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 + platform: + none: {} + fips: false + pullSecret: '' + sshKey: '' diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml new file mode 100644 index 00000000..329395ba --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -0,0 +1,61 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + - name: update repository index + dnf: + update_cache: yes + + - name: install httpd + dnf: + name: httpd + +##this may not work, and is definitely not idempotent. If having problems, go to: +## https://dmsimard.com/2016/03/15/changing-the-ssh-port-with-ansible/ + - name: change default port to 8080 + set_fact: + ansible_port: 8080 + + - name: create directory bin for mirrors + file: + path: /var/www/html/bin + state: directory + mode: '0755' + + - name: create directory bootstrap for mirrors + file: + path: /var/www/html/bootstrap + state: directory + mode: '0755' + + - name: get mirrors 1 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x + dest: /var/www/html/bin/rhcos-kernel + mode: '0440' + + - name: get mirrors 2 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-initramfs.s390x.img + dest: /var/www/html/bin/rhcos-initramfs.img + mode: '0440' + +##I think the destination path given from Filipe's document mistakenly left out the sub-directories, so I added them in here. + - name: get mirrors 3 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-rootfs.s390x.img + dest: /var/www/html/bin/rhcos-rootfs.img + + - name: enable httpd + systemd: + name: httpd + enable: yes + + - name: check httpd status + systemd: + state: started + name: httpd + + diff --git a/roles/bastion_server/start_bastion_install.yaml b/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml similarity index 100% rename from roles/bastion_server/start_bastion_install.yaml rename to roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml diff --git a/roles/bastion_server/bastion.yaml b/roles/bastion_server/bastion_x86/tasks/bastion.yaml similarity index 50% rename from roles/bastion_server/bastion.yaml rename to roles/bastion_server/bastion_x86/tasks/bastion.yaml index c0cd3073..ac86bf27 100644 --- a/roles/bastion_server/bastion.yaml +++ b/roles/bastion_server/bastion_x86/tasks/bastion.yaml @@ -4,19 +4,6 @@ become: true tasks: - - name: start bastion install process - community.libvirt.virt: - command: - virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso - --accelerate --import --network network=macvtap-net --extra-args "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive - if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" - --noautoconsole - - - - - - #there has to be a way to do this through Ansible. Step 3 page 9 - name: complete bastion install process @@ -43,40 +30,6 @@ path: "~/files/install-config.yaml" state: touch -##Needs variables from host_vars for baseDomain, cluster_name, pullsecret, and ssh-public-key. -##I think it also needs cidr (pod's IP range) and service network IP range. - - - name: Fill contents of install-config.yaml file - copy: - dest: "~/files/macvtap.xml" - content: | - apiVersion: v1 - baseDomain: - compute: - - architecture: s390x - hyperthreading: Enabled - name: worker - replicas: 0 - controlPlane: - architecture: s390x - hyperthreading: Enabled - name: master - replicas: 3 - metadata: - name: - networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN - serviceNetwork: - - 172.30.0.0/16 - platform: - none: {} - fips: false - pullSecret: '' - sshKey: '' - ##need to use host_vars for - name: Generate the ignition files 1 shell: ./openshift-install create manifests --dir= diff --git a/roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml b/roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml new file mode 100644 index 00000000..24709068 --- /dev/null +++ b/roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml @@ -0,0 +1,44 @@ +##Needs variables from host_vars for baseDomain, cluster_name, pullsecret, and ssh-public-key. +##I think it also needs cidr (pod's IP range) and service network IP range. +## Ensure PATH references are correct +--- + +- hosts: bastion_server + become: true + tasks: + + - name: create install-config.yaml + file: + path: "~/files/install-config.yaml" + state: touch + + - name: Fill contents of install-config.yaml file + copy: + dest: "~/files/macvtap.xml" + content: | + apiVersion: v1 + baseDomain: + compute: + - architecture: x86 + hyperthreading: Enabled + name: worker + replicas: 0 + controlPlane: + architecture: x86 + hyperthreading: Enabled + name: master + replicas: 3 + metadata: + name: + networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 + platform: + none: {} + fips: false + pullSecret: '' + sshKey: '' diff --git a/roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save b/roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save new file mode 100644 index 00000000..f5887aa8 --- /dev/null +++ b/roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save @@ -0,0 +1,23 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + - name: update repository index + dnf: + update_cache: yes + + - name: install httpd + dnf: + name: httpd + state: latest + +## Not sure if this will work, especially after running once. Check this page out if encountering problems: +##https://dmsimard.com/2016/03/15/changing-the-ssh-port-with-ansible/ + + - name: Change ssh port to 8080 + set_fact: + ansible_port: 8080 + + diff --git a/roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml b/roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml new file mode 100644 index 00000000..5667dae4 --- /dev/null +++ b/roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml @@ -0,0 +1,20 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + - name: start bastion install process + community.libvirt.virt: + name: bastion + memory: 4096 + vcpus: 2 + disk size: 20 + cdrom: /var/lib/libvirt/images/rhel83.iso + accelerate: yes + import: yes + network: network=macvtap-net + extra-args: "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" + location: /rhcos-install + qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" + noautoconsole: yes diff --git a/define_macvtap.yaml b/roles/kvm_host/kvm_host_s390x/tasks/define_macvtap.yaml similarity index 88% rename from define_macvtap.yaml rename to roles/kvm_host/kvm_host_s390x/tasks/define_macvtap.yaml index 8f858c6e..57a66471 100644 --- a/define_macvtap.yaml +++ b/roles/kvm_host/kvm_host_s390x/tasks/define_macvtap.yaml @@ -10,7 +10,7 @@ command: define name: macvtap-net autostart: true - xml: "{{ lookup ('template', '/home/jacob/.git/Ansible-OpenShift-Provisioning/roles/kvm_host/templates/macvtap.xml.j2') }}" + xml: "{{ lookup ('template', '/home/jacob/.git/Ansible-OpenShift-Provisioning/roles/kvm_host/kvm_host_s90x/templates/macvtap.xml.j2') }}" - name: Start macvtap-net community.libvirt.virt_net: diff --git a/dwnload-image-files.yaml b/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml similarity index 100% rename from dwnload-image-files.yaml rename to roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml diff --git a/roles/kvm_host/templates/macvtap.xml.j2 b/roles/kvm_host/kvm_host_s390x/templates/macvtap.xml.j2 similarity index 100% rename from roles/kvm_host/templates/macvtap.xml.j2 rename to roles/kvm_host/kvm_host_s390x/templates/macvtap.xml.j2 diff --git a/roles/kvm_host/kvm_host_x86/tasks/main.yml b/roles/kvm_host/kvm_host_x86/tasks/main.yml new file mode 100644 index 00000000..ed7b03a3 --- /dev/null +++ b/roles/kvm_host/kvm_host_x86/tasks/main.yml @@ -0,0 +1,41 @@ +--- + +- hosts: kvm_hosts + become: true + tasks: + + - name: Ensure pre-requisite packages are installed + yum: + names: + - libvirt + - libvirt-devel + - libvirt-daemon-kvm + - qemu-kvm + - virt-manager + - libvirt-daemon-config-network + - libvirt-client + - qemu-img + + - name: update repository index + yum: + update_cache: yes + +## Playbook works, need to change absolute path to relative or variable. Need to move to roles file for kvm_host + + - name: Set up macvtap bridge + community.libvirt.virt_net: + command: define + name: macvtap-net + autostart: true + xml: "{{ lookup ('template', '/home/jacob/.git/Ansible-OpenShift-Provisioning/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2') }}" + + - name: Start macvtap-net + community.libvirt.virt_net: + autostart: yes + command: start + name: macvtap-net + + - name: Set autostart for macvtap-net + community.libvirt.virt_net: + autostart: yes + name: macvtap-net diff --git a/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 b/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 new file mode 100644 index 00000000..388477ea --- /dev/null +++ b/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 @@ -0,0 +1,6 @@ + + macvtap-net + + + + From a0da73a924dd750096a3b83dc6d93049580a862d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 20 Jul 2021 16:27:23 -0400 Subject: [PATCH 050/885] created x86 versions of some bastion and kvm host files. Filled out http_setup.yaml for bastion completely. --- .../bastion_s390x/tasks/bastion.yaml | 43 +++++++++++++ .../tasks/fill_install_config.yaml | 44 +++++++++++++ .../bastion_s390x/tasks/http_setup.yaml | 61 +++++++++++++++++++ .../tasks}/start_bastion_install.yaml | 0 .../{ => bastion_x86/tasks}/bastion.yaml | 47 -------------- .../tasks/fill_install_config.yaml | 44 +++++++++++++ .../bastion_x86/tasks/http_setup.yaml.save | 23 +++++++ .../tasks/start_bastion_install.yaml | 20 ++++++ .../kvm_host_s390x/tasks/define_macvtap.yaml | 2 +- .../tasks/dwnload-image-files.yaml | 0 .../templates/macvtap.xml.j2 | 0 roles/kvm_host/kvm_host_x86/tasks/main.yml | 41 +++++++++++++ .../kvm_host_x86/templates/macvtap.xml.j2 | 6 ++ 13 files changed, 283 insertions(+), 48 deletions(-) create mode 100644 roles/bastion_server/bastion_s390x/tasks/bastion.yaml create mode 100644 roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml create mode 100644 roles/bastion_server/bastion_s390x/tasks/http_setup.yaml rename roles/bastion_server/{ => bastion_s390x/tasks}/start_bastion_install.yaml (100%) rename roles/bastion_server/{ => bastion_x86/tasks}/bastion.yaml (50%) create mode 100644 roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml create mode 100644 roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save create mode 100644 roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml rename define_macvtap.yaml => roles/kvm_host/kvm_host_s390x/tasks/define_macvtap.yaml (88%) rename dwnload-image-files.yaml => roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml (100%) rename roles/kvm_host/{ => kvm_host_s390x}/templates/macvtap.xml.j2 (100%) create mode 100644 roles/kvm_host/kvm_host_x86/tasks/main.yml create mode 100644 roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 diff --git a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml new file mode 100644 index 00000000..1fc0ccb3 --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml @@ -0,0 +1,43 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + +#there has to be a way to do this through Ansible. Step 3 page 9 + - name: complete bastion install process + +#leaving this until I meet with Filipe + - name: download software + +#leaving this until I meet with Filipe + - name: DNS requirements and configuration + +#not sure what this instruction step is trying to say. Page 13 + - name: Load Balancer + +# Need to edit this script to automate changing port to 8080 and ensure latest versions of OpenShift mirrors + - name: Create and configure the HTTP server + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/create_http.sh + + - name: Get installer and oc Client Tools + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/get_ocp_installer.sh + +##need to use host_vars for + - name: Generate the ignition files 1 + shell: ./openshift-install create manifests --dir= + +##also needs variable + - name: Generate the ignition files 2 + shell: ./openshift-install create ignition-configs --dir= + +##also needs variable + - name: Generate the ignition files 3 + shell: cp /*.ign /var/www/html/ignition + + - name: Generate the ignition files 4 + shell: chmod 775 /var/www/html/ignition/*.ign + + - name: Prepare the KVM OCP guests + script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/prep_kvm_guests.sh diff --git a/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml b/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml new file mode 100644 index 00000000..ef798242 --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml @@ -0,0 +1,44 @@ +##Needs variables from host_vars for baseDomain, cluster_name, pullsecret, and ssh-public-key. +##I think it also needs cidr (pod's IP range) and service network IP range. +##Ensure PATHs are correct +--- + +- hosts: bastion_server + become: true + tasks: + + - name: create install-config.yaml + file: + path: "~/files/install-config.yaml" + state: touch + + - name: Fill contents of install-config.yaml file + copy: + dest: "~/files/macvtap.xml" + content: | + apiVersion: v1 + baseDomain: + compute: + - architecture: s390x + hyperthreading: Enabled + name: worker + replicas: 0 + controlPlane: + architecture: s390x + hyperthreading: Enabled + name: master + replicas: 3 + metadata: + name: + networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 + platform: + none: {} + fips: false + pullSecret: '' + sshKey: '' diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml new file mode 100644 index 00000000..329395ba --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -0,0 +1,61 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + - name: update repository index + dnf: + update_cache: yes + + - name: install httpd + dnf: + name: httpd + +##this may not work, and is definitely not idempotent. If having problems, go to: +## https://dmsimard.com/2016/03/15/changing-the-ssh-port-with-ansible/ + - name: change default port to 8080 + set_fact: + ansible_port: 8080 + + - name: create directory bin for mirrors + file: + path: /var/www/html/bin + state: directory + mode: '0755' + + - name: create directory bootstrap for mirrors + file: + path: /var/www/html/bootstrap + state: directory + mode: '0755' + + - name: get mirrors 1 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x + dest: /var/www/html/bin/rhcos-kernel + mode: '0440' + + - name: get mirrors 2 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-initramfs.s390x.img + dest: /var/www/html/bin/rhcos-initramfs.img + mode: '0440' + +##I think the destination path given from Filipe's document mistakenly left out the sub-directories, so I added them in here. + - name: get mirrors 3 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-rootfs.s390x.img + dest: /var/www/html/bin/rhcos-rootfs.img + + - name: enable httpd + systemd: + name: httpd + enable: yes + + - name: check httpd status + systemd: + state: started + name: httpd + + diff --git a/roles/bastion_server/start_bastion_install.yaml b/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml similarity index 100% rename from roles/bastion_server/start_bastion_install.yaml rename to roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml diff --git a/roles/bastion_server/bastion.yaml b/roles/bastion_server/bastion_x86/tasks/bastion.yaml similarity index 50% rename from roles/bastion_server/bastion.yaml rename to roles/bastion_server/bastion_x86/tasks/bastion.yaml index c0cd3073..ac86bf27 100644 --- a/roles/bastion_server/bastion.yaml +++ b/roles/bastion_server/bastion_x86/tasks/bastion.yaml @@ -4,19 +4,6 @@ become: true tasks: - - name: start bastion install process - community.libvirt.virt: - command: - virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso - --accelerate --import --network network=macvtap-net --extra-args "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive - if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" - --noautoconsole - - - - - - #there has to be a way to do this through Ansible. Step 3 page 9 - name: complete bastion install process @@ -43,40 +30,6 @@ path: "~/files/install-config.yaml" state: touch -##Needs variables from host_vars for baseDomain, cluster_name, pullsecret, and ssh-public-key. -##I think it also needs cidr (pod's IP range) and service network IP range. - - - name: Fill contents of install-config.yaml file - copy: - dest: "~/files/macvtap.xml" - content: | - apiVersion: v1 - baseDomain: - compute: - - architecture: s390x - hyperthreading: Enabled - name: worker - replicas: 0 - controlPlane: - architecture: s390x - hyperthreading: Enabled - name: master - replicas: 3 - metadata: - name: - networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN - serviceNetwork: - - 172.30.0.0/16 - platform: - none: {} - fips: false - pullSecret: '' - sshKey: '' - ##need to use host_vars for - name: Generate the ignition files 1 shell: ./openshift-install create manifests --dir= diff --git a/roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml b/roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml new file mode 100644 index 00000000..24709068 --- /dev/null +++ b/roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml @@ -0,0 +1,44 @@ +##Needs variables from host_vars for baseDomain, cluster_name, pullsecret, and ssh-public-key. +##I think it also needs cidr (pod's IP range) and service network IP range. +## Ensure PATH references are correct +--- + +- hosts: bastion_server + become: true + tasks: + + - name: create install-config.yaml + file: + path: "~/files/install-config.yaml" + state: touch + + - name: Fill contents of install-config.yaml file + copy: + dest: "~/files/macvtap.xml" + content: | + apiVersion: v1 + baseDomain: + compute: + - architecture: x86 + hyperthreading: Enabled + name: worker + replicas: 0 + controlPlane: + architecture: x86 + hyperthreading: Enabled + name: master + replicas: 3 + metadata: + name: + networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 + platform: + none: {} + fips: false + pullSecret: '' + sshKey: '' diff --git a/roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save b/roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save new file mode 100644 index 00000000..f5887aa8 --- /dev/null +++ b/roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save @@ -0,0 +1,23 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + - name: update repository index + dnf: + update_cache: yes + + - name: install httpd + dnf: + name: httpd + state: latest + +## Not sure if this will work, especially after running once. Check this page out if encountering problems: +##https://dmsimard.com/2016/03/15/changing-the-ssh-port-with-ansible/ + + - name: Change ssh port to 8080 + set_fact: + ansible_port: 8080 + + diff --git a/roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml b/roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml new file mode 100644 index 00000000..5667dae4 --- /dev/null +++ b/roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml @@ -0,0 +1,20 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + - name: start bastion install process + community.libvirt.virt: + name: bastion + memory: 4096 + vcpus: 2 + disk size: 20 + cdrom: /var/lib/libvirt/images/rhel83.iso + accelerate: yes + import: yes + network: network=macvtap-net + extra-args: "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" + location: /rhcos-install + qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" + noautoconsole: yes diff --git a/define_macvtap.yaml b/roles/kvm_host/kvm_host_s390x/tasks/define_macvtap.yaml similarity index 88% rename from define_macvtap.yaml rename to roles/kvm_host/kvm_host_s390x/tasks/define_macvtap.yaml index 8f858c6e..57a66471 100644 --- a/define_macvtap.yaml +++ b/roles/kvm_host/kvm_host_s390x/tasks/define_macvtap.yaml @@ -10,7 +10,7 @@ command: define name: macvtap-net autostart: true - xml: "{{ lookup ('template', '/home/jacob/.git/Ansible-OpenShift-Provisioning/roles/kvm_host/templates/macvtap.xml.j2') }}" + xml: "{{ lookup ('template', '/home/jacob/.git/Ansible-OpenShift-Provisioning/roles/kvm_host/kvm_host_s90x/templates/macvtap.xml.j2') }}" - name: Start macvtap-net community.libvirt.virt_net: diff --git a/dwnload-image-files.yaml b/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml similarity index 100% rename from dwnload-image-files.yaml rename to roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml diff --git a/roles/kvm_host/templates/macvtap.xml.j2 b/roles/kvm_host/kvm_host_s390x/templates/macvtap.xml.j2 similarity index 100% rename from roles/kvm_host/templates/macvtap.xml.j2 rename to roles/kvm_host/kvm_host_s390x/templates/macvtap.xml.j2 diff --git a/roles/kvm_host/kvm_host_x86/tasks/main.yml b/roles/kvm_host/kvm_host_x86/tasks/main.yml new file mode 100644 index 00000000..ed7b03a3 --- /dev/null +++ b/roles/kvm_host/kvm_host_x86/tasks/main.yml @@ -0,0 +1,41 @@ +--- + +- hosts: kvm_hosts + become: true + tasks: + + - name: Ensure pre-requisite packages are installed + yum: + names: + - libvirt + - libvirt-devel + - libvirt-daemon-kvm + - qemu-kvm + - virt-manager + - libvirt-daemon-config-network + - libvirt-client + - qemu-img + + - name: update repository index + yum: + update_cache: yes + +## Playbook works, need to change absolute path to relative or variable. Need to move to roles file for kvm_host + + - name: Set up macvtap bridge + community.libvirt.virt_net: + command: define + name: macvtap-net + autostart: true + xml: "{{ lookup ('template', '/home/jacob/.git/Ansible-OpenShift-Provisioning/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2') }}" + + - name: Start macvtap-net + community.libvirt.virt_net: + autostart: yes + command: start + name: macvtap-net + + - name: Set autostart for macvtap-net + community.libvirt.virt_net: + autostart: yes + name: macvtap-net diff --git a/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 b/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 new file mode 100644 index 00000000..388477ea --- /dev/null +++ b/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 @@ -0,0 +1,6 @@ + + macvtap-net + + + + From a6a77a3871845a2173105ad2639ff83ed4319ec8 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 20 Jul 2021 16:45:29 -0500 Subject: [PATCH 051/885] adding haproxy config file for use --- haproxy.cfg | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 haproxy.cfg diff --git a/haproxy.cfg b/haproxy.cfg new file mode 100644 index 00000000..fcec60f0 --- /dev/null +++ b/haproxy.cfg @@ -0,0 +1,37 @@ +global + daemon + maxconn 256 + +defaults + mode http + timeout connect 10s + timeout client 1m + timeout server 1m + +listen ingress-http + bind *:80 + mode tcp + server compute-0 9.60.87.135:80 check + server compute-1 9.60.87.134:80 check + +listen ingress-https + bind *:443 + mode tcp + server compute-0 9.60.87.135:443 check + server compute-1 9.60.87.134:443 check + +listen api + bind *:6443 + mode tcp + server bootstrap 9.60.87.133:6443 check + server control-0 9.60.87.138:6443 check + server control-1 9.60.87.137:6443 check + server control-2 9.60.87.136:6443 check + +listen api-int + bind *:22623 + mode tcp + server bootstrap 9.60.87.133:22623 check + server control-0 9.60.87.138:22623 check + server control-1 9.60.87.137:22623 check + server control-2 9.60.87.136:22623 check From b9b43da65b78f08227d93084d0d893113a0488a3 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 20 Jul 2021 16:45:29 -0500 Subject: [PATCH 052/885] adding haproxy config file for use --- haproxy.cfg | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 haproxy.cfg diff --git a/haproxy.cfg b/haproxy.cfg new file mode 100644 index 00000000..fcec60f0 --- /dev/null +++ b/haproxy.cfg @@ -0,0 +1,37 @@ +global + daemon + maxconn 256 + +defaults + mode http + timeout connect 10s + timeout client 1m + timeout server 1m + +listen ingress-http + bind *:80 + mode tcp + server compute-0 9.60.87.135:80 check + server compute-1 9.60.87.134:80 check + +listen ingress-https + bind *:443 + mode tcp + server compute-0 9.60.87.135:443 check + server compute-1 9.60.87.134:443 check + +listen api + bind *:6443 + mode tcp + server bootstrap 9.60.87.133:6443 check + server control-0 9.60.87.138:6443 check + server control-1 9.60.87.137:6443 check + server control-2 9.60.87.136:6443 check + +listen api-int + bind *:22623 + mode tcp + server bootstrap 9.60.87.133:22623 check + server control-0 9.60.87.138:22623 check + server control-1 9.60.87.137:22623 check + server control-2 9.60.87.136:22623 check From 2d746c20c9ad4287b753337ab8b735381e8b5664 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 21 Jul 2021 13:31:56 -0500 Subject: [PATCH 053/885] Small updates Update to bootstrap name in build script. Added pull secret from my redhat .com login --- build_script.sh | 2 +- pull-secret.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 pull-secret.txt diff --git a/build_script.sh b/build_script.sh index a073334b..863090b0 100644 --- a/build_script.sh +++ b/build_script.sh @@ -3,7 +3,7 @@ -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap-0.qcow2 100G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G diff --git a/pull-secret.txt b/pull-secret.txt new file mode 100644 index 00000000..ee0cd16d --- /dev/null +++ b/pull-secret.txt @@ -0,0 +1 @@ +{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}} From ab761ea7389ccae213a26e91b7c6523159257327 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 21 Jul 2021 13:31:56 -0500 Subject: [PATCH 054/885] Small updates Update to bootstrap name in build script. Added pull secret from my redhat .com login --- build_script.sh | 2 +- pull-secret.txt | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 pull-secret.txt diff --git a/build_script.sh b/build_script.sh index a073334b..863090b0 100644 --- a/build_script.sh +++ b/build_script.sh @@ -3,7 +3,7 @@ -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap-0.qcow2 100G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G diff --git a/pull-secret.txt b/pull-secret.txt new file mode 100644 index 00000000..ee0cd16d --- /dev/null +++ b/pull-secret.txt @@ -0,0 +1 @@ +{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}} From 544ecc83410f4f8ff4730f0d152d00562da21624 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 21 Jul 2021 13:57:13 -0500 Subject: [PATCH 055/885] update bastion roles Updated bastion playbooks - added ansible public and private key for our use. Should be deleted in future. --- ansible | 49 +++++++++++++++++++ ansible.pub | 1 + .../tasks/fill_install_config.yaml | 8 +-- .../tasks/start_bastion_install.yaml | 4 +- 4 files changed, 56 insertions(+), 6 deletions(-) create mode 100644 ansible create mode 100644 ansible.pub diff --git a/ansible b/ansible new file mode 100644 index 00000000..b1c29d16 --- /dev/null +++ b/ansible @@ -0,0 +1,49 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn +NhAAAAAwEAAQAAAgEA4dy9CDQ05+jhj8pyGhw7g9uOXwELDGOLzYxz/nnVcUznyBsJuiO+ +5jH0/G/EUmUegu5PuWHvULzWnbZMQm9Imnc4WYhnMEuM5Uk5Dp1Ox1WkfLENuJ5Ph5qOe2 +1MhDjfB5SoPN4TJcqKwgmNrj/qRic/AifqKkTygRAxptut/mByiMaJES0WvXAgJTTb8apa +bMmXMIndhhmCgYv225CR/pf9y3P0C3x83B4uuAcyx7Ilfa7Fj1al/AzYLNfs+LThIlTAF6 +COhf6wRTUNaKHxdVcgciJ3ae11smA02zu5q2WslwicwhqqO1tt159DrMdM3M2FYoJBGQ4Q +bjryALHmRE/L6s1ozH9wy0LH9gItEcSa3js3+zAlE7N2Lr8Ck7FI4FWR0WlNmcj7m28j8I +FjKNAPyY3WS0cpu7UJzM7M8SSXBeuk0vAgK19/jW1zn5tzui2MRVKQLNbwMnXNIXeXIkOE +2+/Tn3/QWWdhE+1Cs14l4vVadiM5vdRgcfPLP9XK0EhuS4HkFpbXwr9mpPlQ/PK92N5K+I +ivpBy2wopyrafZnUbqAaXo2Z9NRB5d3kqSQLCSB+enNbdT6+hjjOg1+xUYdwxEFH7EQrIl +YI8qNNAfYvjT67jebSqTfDgW2Qf33RLxFqZ23pY+Vk3+yMxgZre8+PHKw/MxPrWntNVY9F +cAAAdAT0bHFU9GxxUAAAAHc3NoLXJzYQAAAgEA4dy9CDQ05+jhj8pyGhw7g9uOXwELDGOL +zYxz/nnVcUznyBsJuiO+5jH0/G/EUmUegu5PuWHvULzWnbZMQm9Imnc4WYhnMEuM5Uk5Dp +1Ox1WkfLENuJ5Ph5qOe21MhDjfB5SoPN4TJcqKwgmNrj/qRic/AifqKkTygRAxptut/mBy +iMaJES0WvXAgJTTb8apabMmXMIndhhmCgYv225CR/pf9y3P0C3x83B4uuAcyx7Ilfa7Fj1 +al/AzYLNfs+LThIlTAF6COhf6wRTUNaKHxdVcgciJ3ae11smA02zu5q2WslwicwhqqO1tt +159DrMdM3M2FYoJBGQ4QbjryALHmRE/L6s1ozH9wy0LH9gItEcSa3js3+zAlE7N2Lr8Ck7 +FI4FWR0WlNmcj7m28j8IFjKNAPyY3WS0cpu7UJzM7M8SSXBeuk0vAgK19/jW1zn5tzui2M +RVKQLNbwMnXNIXeXIkOE2+/Tn3/QWWdhE+1Cs14l4vVadiM5vdRgcfPLP9XK0EhuS4HkFp +bXwr9mpPlQ/PK92N5K+IivpBy2wopyrafZnUbqAaXo2Z9NRB5d3kqSQLCSB+enNbdT6+hj +jOg1+xUYdwxEFH7EQrIlYI8qNNAfYvjT67jebSqTfDgW2Qf33RLxFqZ23pY+Vk3+yMxgZr +e8+PHKw/MxPrWntNVY9FcAAAADAQABAAACAQChPzolVUBAIN6IDXwQb/99T44A/XmpypfQ +k2lsUAa18LW5ArSbE1ICfb/tSM3cFR0HVKPOMcDs3yRJTBB9jX1D6cFtChjBPe7E8tWtPV +k0rkvUSYMGPt+AbDM0ov9xljiD7Pl/GvyIimUtyixALplOpE4zd9ORgc+vnKQV/9fkN28N +fs/0V0BY1mfcqDQt//S2neU2Xfas+iFV41rFvhy8qj4ithGQ8bsiWtn3wAvcza4+YDvi7X +Vq0LC0syMXG+tu2xqYw6N8viuR/SIphRQhNaaQVVLzaPgR9OVHbwhK+TeSTC+909OctNVi +ju19uHuDV6bWgS+Ad637RFHpdRhybRNZYwn+0vxEEljzIcW1f7bYSSPXCzWctL0Hdhn51/ +IVUj+SLsAAi/ZlBghKh7LrThBc2blwcDrQCm5N/ZZUP22HCGbE8H5kmSYYWNo9Q+p/weYM +w4Jv341N6N6PBJdZj2tQMitYlprsB2c0YixWW4BbNzwj4QcqLLIQtjbMkhjOdcYqo1DWov +PL8nB+SSf8mefqo1TKkRKzswuAGp5O2Q3mcD3+tGnkhQi6EPE3cGsLQUpN1Ot2y2peEitv ++z73FgFS/IGgfF0tfZms9DvDWnVQGu+BfBqYGooBb035pGF9yv/XOlZkpVYdSNrgcW7Ig3 +cP6Bd6Bg8w+eCmp2kr4QAAAQEAnprftFCahU3kWYtaGKTgL1Km0etJd2V/HEzSyIqIK9mW +wjpOlS49zbaka0UNIPNKWsMSVLNZdRn+31VeiEc4PuyfixP8o0pomtB7VB1dg05nYZ2RRi +MI8T+kFIcmLRWapQ+AYz2JEdQMi0pvJBIBqiIYygG3qub5bgATsOlMl+sayeCShClAhVbm +Zebj3HvW6eWOoiVSOdNsZVNmbvnx2TgTUqR6+LE978fU7cQDZqwdqsAOlct3tndkKcyBOm +lqlaz7ZaW+w0vLTtmv+RMxjJSJUhEWT6jXRmCwse8Z+lgGMMbR9e7zhRzC96AH57m6WXe1 +c+HQwP68fjQYaKpRAQAAAQEA+BYfwUWWOLOCc0HsFRkBSDoW1dFO/ta6n4536a45mLT2vL +CjnLxDQqbfW7rUih+Yo/tXzL8qYgMd2lDAPYuHcF+az1ORL7gbeUpJSbRQBIaMiAqfMUWE +zLQhNQ/+T5xXyM5XL/l7uzShKrma6Od/5O84acnd9gSy70pcl9If8E1qsKrCm4SOQuymmr +tsvhqeQ3e+NmWbQLzmi541nu1ck3CDZb2nHwqMIMfkh+NKitQ9vnakSuoiKxwopPUD86TK +rzfik1QyGRkRh2+DK09XLwnysrSSFU4s166/7qwLEnTh0qmKqGKJc/DRUIJd3Bkrvge1zb +bA/one9km0l1nn/QAAAQEA6REhs+WSK4pF/rRA+eu7iebUkk9zXCe4PzU7r5GAaGVxEcXR +sLeCzNpz0/S9EYTl8BefrnPvW6rZnj03T672xW5Ws81XxdwYb/GK9+VFfQPJzPT5Lb/Fgm +0Era89iIymJ8hFKaEiNBN8scUmt+H+ZqOv6NMLh/OboKAtFQWEySlwdL4ror+Z50Nhy0Ml +4cCGucEfQYnZTG2J0Z6854CgBp5JZ3aNggc/CJIdrhkB6ffyBqvXWXQ90M1SvqnP8E4eI8 +eJecPuk6RafoQrcmIfiZ5Mns51FgbObFA9TBzGouABugWobqx9QJ8jx0YLwT/jT1QGwyBz +/PxXLyxyv8fr4wAAAAdhbnNpYmxlAQI= +-----END OPENSSH PRIVATE KEY----- diff --git a/ansible.pub b/ansible.pub new file mode 100644 index 00000000..7ccf5c0b --- /dev/null +++ b/ansible.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible diff --git a/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml b/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml index ef798242..1b4156b6 100644 --- a/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml @@ -14,10 +14,10 @@ - name: Fill contents of install-config.yaml file copy: - dest: "~/files/macvtap.xml" + dest: "~/files/install-config.yaml" content: | apiVersion: v1 - baseDomain: + baseDomain: ocpz.wsclab.endicott.ibm.com compute: - architecture: s390x hyperthreading: Enabled @@ -29,7 +29,7 @@ name: master replicas: 3 metadata: - name: + name: distribution networking: clusterNetwork: - cidr: 10.128.0.0/14 @@ -40,5 +40,5 @@ platform: none: {} fips: false - pullSecret: '' + pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' sshKey: '' diff --git a/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml b/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml index 5667dae4..4e81c48b 100644 --- a/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml @@ -9,12 +9,12 @@ name: bastion memory: 4096 vcpus: 2 - disk size: 20 + disk size: 30 cdrom: /var/lib/libvirt/images/rhel83.iso accelerate: yes import: yes network: network=macvtap-net - extra-args: "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" + extra-args: ""ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" location: /rhcos-install qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" noautoconsole: yes From 82b2d454e04828e3ac8b4f70661d2fa1aefa6771 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 21 Jul 2021 13:57:13 -0500 Subject: [PATCH 056/885] update bastion roles Updated bastion playbooks - added ansible public and private key for our use. Should be deleted in future. --- ansible | 49 +++++++++++++++++++ ansible.pub | 1 + .../tasks/fill_install_config.yaml | 8 +-- .../tasks/start_bastion_install.yaml | 4 +- 4 files changed, 56 insertions(+), 6 deletions(-) create mode 100644 ansible create mode 100644 ansible.pub diff --git a/ansible b/ansible new file mode 100644 index 00000000..b1c29d16 --- /dev/null +++ b/ansible @@ -0,0 +1,49 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn +NhAAAAAwEAAQAAAgEA4dy9CDQ05+jhj8pyGhw7g9uOXwELDGOLzYxz/nnVcUznyBsJuiO+ +5jH0/G/EUmUegu5PuWHvULzWnbZMQm9Imnc4WYhnMEuM5Uk5Dp1Ox1WkfLENuJ5Ph5qOe2 +1MhDjfB5SoPN4TJcqKwgmNrj/qRic/AifqKkTygRAxptut/mByiMaJES0WvXAgJTTb8apa +bMmXMIndhhmCgYv225CR/pf9y3P0C3x83B4uuAcyx7Ilfa7Fj1al/AzYLNfs+LThIlTAF6 +COhf6wRTUNaKHxdVcgciJ3ae11smA02zu5q2WslwicwhqqO1tt159DrMdM3M2FYoJBGQ4Q +bjryALHmRE/L6s1ozH9wy0LH9gItEcSa3js3+zAlE7N2Lr8Ck7FI4FWR0WlNmcj7m28j8I +FjKNAPyY3WS0cpu7UJzM7M8SSXBeuk0vAgK19/jW1zn5tzui2MRVKQLNbwMnXNIXeXIkOE +2+/Tn3/QWWdhE+1Cs14l4vVadiM5vdRgcfPLP9XK0EhuS4HkFpbXwr9mpPlQ/PK92N5K+I +ivpBy2wopyrafZnUbqAaXo2Z9NRB5d3kqSQLCSB+enNbdT6+hjjOg1+xUYdwxEFH7EQrIl +YI8qNNAfYvjT67jebSqTfDgW2Qf33RLxFqZ23pY+Vk3+yMxgZre8+PHKw/MxPrWntNVY9F +cAAAdAT0bHFU9GxxUAAAAHc3NoLXJzYQAAAgEA4dy9CDQ05+jhj8pyGhw7g9uOXwELDGOL +zYxz/nnVcUznyBsJuiO+5jH0/G/EUmUegu5PuWHvULzWnbZMQm9Imnc4WYhnMEuM5Uk5Dp +1Ox1WkfLENuJ5Ph5qOe21MhDjfB5SoPN4TJcqKwgmNrj/qRic/AifqKkTygRAxptut/mBy +iMaJES0WvXAgJTTb8apabMmXMIndhhmCgYv225CR/pf9y3P0C3x83B4uuAcyx7Ilfa7Fj1 +al/AzYLNfs+LThIlTAF6COhf6wRTUNaKHxdVcgciJ3ae11smA02zu5q2WslwicwhqqO1tt +159DrMdM3M2FYoJBGQ4QbjryALHmRE/L6s1ozH9wy0LH9gItEcSa3js3+zAlE7N2Lr8Ck7 +FI4FWR0WlNmcj7m28j8IFjKNAPyY3WS0cpu7UJzM7M8SSXBeuk0vAgK19/jW1zn5tzui2M +RVKQLNbwMnXNIXeXIkOE2+/Tn3/QWWdhE+1Cs14l4vVadiM5vdRgcfPLP9XK0EhuS4HkFp +bXwr9mpPlQ/PK92N5K+IivpBy2wopyrafZnUbqAaXo2Z9NRB5d3kqSQLCSB+enNbdT6+hj +jOg1+xUYdwxEFH7EQrIlYI8qNNAfYvjT67jebSqTfDgW2Qf33RLxFqZ23pY+Vk3+yMxgZr +e8+PHKw/MxPrWntNVY9FcAAAADAQABAAACAQChPzolVUBAIN6IDXwQb/99T44A/XmpypfQ +k2lsUAa18LW5ArSbE1ICfb/tSM3cFR0HVKPOMcDs3yRJTBB9jX1D6cFtChjBPe7E8tWtPV +k0rkvUSYMGPt+AbDM0ov9xljiD7Pl/GvyIimUtyixALplOpE4zd9ORgc+vnKQV/9fkN28N +fs/0V0BY1mfcqDQt//S2neU2Xfas+iFV41rFvhy8qj4ithGQ8bsiWtn3wAvcza4+YDvi7X +Vq0LC0syMXG+tu2xqYw6N8viuR/SIphRQhNaaQVVLzaPgR9OVHbwhK+TeSTC+909OctNVi +ju19uHuDV6bWgS+Ad637RFHpdRhybRNZYwn+0vxEEljzIcW1f7bYSSPXCzWctL0Hdhn51/ +IVUj+SLsAAi/ZlBghKh7LrThBc2blwcDrQCm5N/ZZUP22HCGbE8H5kmSYYWNo9Q+p/weYM +w4Jv341N6N6PBJdZj2tQMitYlprsB2c0YixWW4BbNzwj4QcqLLIQtjbMkhjOdcYqo1DWov +PL8nB+SSf8mefqo1TKkRKzswuAGp5O2Q3mcD3+tGnkhQi6EPE3cGsLQUpN1Ot2y2peEitv ++z73FgFS/IGgfF0tfZms9DvDWnVQGu+BfBqYGooBb035pGF9yv/XOlZkpVYdSNrgcW7Ig3 +cP6Bd6Bg8w+eCmp2kr4QAAAQEAnprftFCahU3kWYtaGKTgL1Km0etJd2V/HEzSyIqIK9mW +wjpOlS49zbaka0UNIPNKWsMSVLNZdRn+31VeiEc4PuyfixP8o0pomtB7VB1dg05nYZ2RRi +MI8T+kFIcmLRWapQ+AYz2JEdQMi0pvJBIBqiIYygG3qub5bgATsOlMl+sayeCShClAhVbm +Zebj3HvW6eWOoiVSOdNsZVNmbvnx2TgTUqR6+LE978fU7cQDZqwdqsAOlct3tndkKcyBOm +lqlaz7ZaW+w0vLTtmv+RMxjJSJUhEWT6jXRmCwse8Z+lgGMMbR9e7zhRzC96AH57m6WXe1 +c+HQwP68fjQYaKpRAQAAAQEA+BYfwUWWOLOCc0HsFRkBSDoW1dFO/ta6n4536a45mLT2vL +CjnLxDQqbfW7rUih+Yo/tXzL8qYgMd2lDAPYuHcF+az1ORL7gbeUpJSbRQBIaMiAqfMUWE +zLQhNQ/+T5xXyM5XL/l7uzShKrma6Od/5O84acnd9gSy70pcl9If8E1qsKrCm4SOQuymmr +tsvhqeQ3e+NmWbQLzmi541nu1ck3CDZb2nHwqMIMfkh+NKitQ9vnakSuoiKxwopPUD86TK +rzfik1QyGRkRh2+DK09XLwnysrSSFU4s166/7qwLEnTh0qmKqGKJc/DRUIJd3Bkrvge1zb +bA/one9km0l1nn/QAAAQEA6REhs+WSK4pF/rRA+eu7iebUkk9zXCe4PzU7r5GAaGVxEcXR +sLeCzNpz0/S9EYTl8BefrnPvW6rZnj03T672xW5Ws81XxdwYb/GK9+VFfQPJzPT5Lb/Fgm +0Era89iIymJ8hFKaEiNBN8scUmt+H+ZqOv6NMLh/OboKAtFQWEySlwdL4ror+Z50Nhy0Ml +4cCGucEfQYnZTG2J0Z6854CgBp5JZ3aNggc/CJIdrhkB6ffyBqvXWXQ90M1SvqnP8E4eI8 +eJecPuk6RafoQrcmIfiZ5Mns51FgbObFA9TBzGouABugWobqx9QJ8jx0YLwT/jT1QGwyBz +/PxXLyxyv8fr4wAAAAdhbnNpYmxlAQI= +-----END OPENSSH PRIVATE KEY----- diff --git a/ansible.pub b/ansible.pub new file mode 100644 index 00000000..7ccf5c0b --- /dev/null +++ b/ansible.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible diff --git a/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml b/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml index ef798242..1b4156b6 100644 --- a/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml @@ -14,10 +14,10 @@ - name: Fill contents of install-config.yaml file copy: - dest: "~/files/macvtap.xml" + dest: "~/files/install-config.yaml" content: | apiVersion: v1 - baseDomain: + baseDomain: ocpz.wsclab.endicott.ibm.com compute: - architecture: s390x hyperthreading: Enabled @@ -29,7 +29,7 @@ name: master replicas: 3 metadata: - name: + name: distribution networking: clusterNetwork: - cidr: 10.128.0.0/14 @@ -40,5 +40,5 @@ platform: none: {} fips: false - pullSecret: '' + pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' sshKey: '' diff --git a/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml b/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml index 5667dae4..4e81c48b 100644 --- a/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml @@ -9,12 +9,12 @@ name: bastion memory: 4096 vcpus: 2 - disk size: 20 + disk size: 30 cdrom: /var/lib/libvirt/images/rhel83.iso accelerate: yes import: yes network: network=macvtap-net - extra-args: "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" + extra-args: ""ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" location: /rhcos-install qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" noautoconsole: yes From 2bdc97353f885ac517ce700e87cd57669a0b5c36 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 21 Jul 2021 15:34:05 -0400 Subject: [PATCH 057/885] deleted ansible keys. Made small change to http_setup.yaml --- ansible | 49 ------------------- ansible.pub | 1 - .../bastion_s390x/tasks/http_setup.yaml | 7 +-- 3 files changed, 4 insertions(+), 53 deletions(-) delete mode 100644 ansible delete mode 100644 ansible.pub diff --git a/ansible b/ansible deleted file mode 100644 index b1c29d16..00000000 --- a/ansible +++ /dev/null @@ -1,49 +0,0 @@ ------BEGIN OPENSSH PRIVATE KEY----- -b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn -NhAAAAAwEAAQAAAgEA4dy9CDQ05+jhj8pyGhw7g9uOXwELDGOLzYxz/nnVcUznyBsJuiO+ -5jH0/G/EUmUegu5PuWHvULzWnbZMQm9Imnc4WYhnMEuM5Uk5Dp1Ox1WkfLENuJ5Ph5qOe2 -1MhDjfB5SoPN4TJcqKwgmNrj/qRic/AifqKkTygRAxptut/mByiMaJES0WvXAgJTTb8apa -bMmXMIndhhmCgYv225CR/pf9y3P0C3x83B4uuAcyx7Ilfa7Fj1al/AzYLNfs+LThIlTAF6 -COhf6wRTUNaKHxdVcgciJ3ae11smA02zu5q2WslwicwhqqO1tt159DrMdM3M2FYoJBGQ4Q -bjryALHmRE/L6s1ozH9wy0LH9gItEcSa3js3+zAlE7N2Lr8Ck7FI4FWR0WlNmcj7m28j8I -FjKNAPyY3WS0cpu7UJzM7M8SSXBeuk0vAgK19/jW1zn5tzui2MRVKQLNbwMnXNIXeXIkOE -2+/Tn3/QWWdhE+1Cs14l4vVadiM5vdRgcfPLP9XK0EhuS4HkFpbXwr9mpPlQ/PK92N5K+I -ivpBy2wopyrafZnUbqAaXo2Z9NRB5d3kqSQLCSB+enNbdT6+hjjOg1+xUYdwxEFH7EQrIl -YI8qNNAfYvjT67jebSqTfDgW2Qf33RLxFqZ23pY+Vk3+yMxgZre8+PHKw/MxPrWntNVY9F -cAAAdAT0bHFU9GxxUAAAAHc3NoLXJzYQAAAgEA4dy9CDQ05+jhj8pyGhw7g9uOXwELDGOL -zYxz/nnVcUznyBsJuiO+5jH0/G/EUmUegu5PuWHvULzWnbZMQm9Imnc4WYhnMEuM5Uk5Dp -1Ox1WkfLENuJ5Ph5qOe21MhDjfB5SoPN4TJcqKwgmNrj/qRic/AifqKkTygRAxptut/mBy -iMaJES0WvXAgJTTb8apabMmXMIndhhmCgYv225CR/pf9y3P0C3x83B4uuAcyx7Ilfa7Fj1 -al/AzYLNfs+LThIlTAF6COhf6wRTUNaKHxdVcgciJ3ae11smA02zu5q2WslwicwhqqO1tt -159DrMdM3M2FYoJBGQ4QbjryALHmRE/L6s1ozH9wy0LH9gItEcSa3js3+zAlE7N2Lr8Ck7 -FI4FWR0WlNmcj7m28j8IFjKNAPyY3WS0cpu7UJzM7M8SSXBeuk0vAgK19/jW1zn5tzui2M -RVKQLNbwMnXNIXeXIkOE2+/Tn3/QWWdhE+1Cs14l4vVadiM5vdRgcfPLP9XK0EhuS4HkFp -bXwr9mpPlQ/PK92N5K+IivpBy2wopyrafZnUbqAaXo2Z9NRB5d3kqSQLCSB+enNbdT6+hj -jOg1+xUYdwxEFH7EQrIlYI8qNNAfYvjT67jebSqTfDgW2Qf33RLxFqZ23pY+Vk3+yMxgZr -e8+PHKw/MxPrWntNVY9FcAAAADAQABAAACAQChPzolVUBAIN6IDXwQb/99T44A/XmpypfQ -k2lsUAa18LW5ArSbE1ICfb/tSM3cFR0HVKPOMcDs3yRJTBB9jX1D6cFtChjBPe7E8tWtPV -k0rkvUSYMGPt+AbDM0ov9xljiD7Pl/GvyIimUtyixALplOpE4zd9ORgc+vnKQV/9fkN28N -fs/0V0BY1mfcqDQt//S2neU2Xfas+iFV41rFvhy8qj4ithGQ8bsiWtn3wAvcza4+YDvi7X -Vq0LC0syMXG+tu2xqYw6N8viuR/SIphRQhNaaQVVLzaPgR9OVHbwhK+TeSTC+909OctNVi -ju19uHuDV6bWgS+Ad637RFHpdRhybRNZYwn+0vxEEljzIcW1f7bYSSPXCzWctL0Hdhn51/ -IVUj+SLsAAi/ZlBghKh7LrThBc2blwcDrQCm5N/ZZUP22HCGbE8H5kmSYYWNo9Q+p/weYM -w4Jv341N6N6PBJdZj2tQMitYlprsB2c0YixWW4BbNzwj4QcqLLIQtjbMkhjOdcYqo1DWov -PL8nB+SSf8mefqo1TKkRKzswuAGp5O2Q3mcD3+tGnkhQi6EPE3cGsLQUpN1Ot2y2peEitv -+z73FgFS/IGgfF0tfZms9DvDWnVQGu+BfBqYGooBb035pGF9yv/XOlZkpVYdSNrgcW7Ig3 -cP6Bd6Bg8w+eCmp2kr4QAAAQEAnprftFCahU3kWYtaGKTgL1Km0etJd2V/HEzSyIqIK9mW -wjpOlS49zbaka0UNIPNKWsMSVLNZdRn+31VeiEc4PuyfixP8o0pomtB7VB1dg05nYZ2RRi -MI8T+kFIcmLRWapQ+AYz2JEdQMi0pvJBIBqiIYygG3qub5bgATsOlMl+sayeCShClAhVbm -Zebj3HvW6eWOoiVSOdNsZVNmbvnx2TgTUqR6+LE978fU7cQDZqwdqsAOlct3tndkKcyBOm -lqlaz7ZaW+w0vLTtmv+RMxjJSJUhEWT6jXRmCwse8Z+lgGMMbR9e7zhRzC96AH57m6WXe1 -c+HQwP68fjQYaKpRAQAAAQEA+BYfwUWWOLOCc0HsFRkBSDoW1dFO/ta6n4536a45mLT2vL -CjnLxDQqbfW7rUih+Yo/tXzL8qYgMd2lDAPYuHcF+az1ORL7gbeUpJSbRQBIaMiAqfMUWE -zLQhNQ/+T5xXyM5XL/l7uzShKrma6Od/5O84acnd9gSy70pcl9If8E1qsKrCm4SOQuymmr -tsvhqeQ3e+NmWbQLzmi541nu1ck3CDZb2nHwqMIMfkh+NKitQ9vnakSuoiKxwopPUD86TK -rzfik1QyGRkRh2+DK09XLwnysrSSFU4s166/7qwLEnTh0qmKqGKJc/DRUIJd3Bkrvge1zb -bA/one9km0l1nn/QAAAQEA6REhs+WSK4pF/rRA+eu7iebUkk9zXCe4PzU7r5GAaGVxEcXR -sLeCzNpz0/S9EYTl8BefrnPvW6rZnj03T672xW5Ws81XxdwYb/GK9+VFfQPJzPT5Lb/Fgm -0Era89iIymJ8hFKaEiNBN8scUmt+H+ZqOv6NMLh/OboKAtFQWEySlwdL4ror+Z50Nhy0Ml -4cCGucEfQYnZTG2J0Z6854CgBp5JZ3aNggc/CJIdrhkB6ffyBqvXWXQ90M1SvqnP8E4eI8 -eJecPuk6RafoQrcmIfiZ5Mns51FgbObFA9TBzGouABugWobqx9QJ8jx0YLwT/jT1QGwyBz -/PxXLyxyv8fr4wAAAAdhbnNpYmxlAQI= ------END OPENSSH PRIVATE KEY----- diff --git a/ansible.pub b/ansible.pub deleted file mode 100644 index 7ccf5c0b..00000000 --- a/ansible.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml index 329395ba..1f36284d 100644 --- a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -11,6 +11,7 @@ - name: install httpd dnf: name: httpd + state: latest ##this may not work, and is definitely not idempotent. If having problems, go to: ## https://dmsimard.com/2016/03/15/changing-the-ssh-port-with-ansible/ @@ -34,19 +35,19 @@ get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x dest: /var/www/html/bin/rhcos-kernel - mode: '0440' + mode: '0755' - name: get mirrors 2 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-initramfs.s390x.img dest: /var/www/html/bin/rhcos-initramfs.img - mode: '0440' + mode: '0755' -##I think the destination path given from Filipe's document mistakenly left out the sub-directories, so I added them in here. - name: get mirrors 3 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-rootfs.s390x.img dest: /var/www/html/bin/rhcos-rootfs.img + mode: '0755' - name: enable httpd systemd: From 7d7fbdfb4f335e6115fe17f573477a6fe0134d72 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 21 Jul 2021 15:34:05 -0400 Subject: [PATCH 058/885] deleted ansible keys. Made small change to http_setup.yaml --- ansible | 49 ------------------- ansible.pub | 1 - .../bastion_s390x/tasks/http_setup.yaml | 7 +-- 3 files changed, 4 insertions(+), 53 deletions(-) delete mode 100644 ansible delete mode 100644 ansible.pub diff --git a/ansible b/ansible deleted file mode 100644 index b1c29d16..00000000 --- a/ansible +++ /dev/null @@ -1,49 +0,0 @@ ------BEGIN OPENSSH PRIVATE KEY----- -b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn -NhAAAAAwEAAQAAAgEA4dy9CDQ05+jhj8pyGhw7g9uOXwELDGOLzYxz/nnVcUznyBsJuiO+ -5jH0/G/EUmUegu5PuWHvULzWnbZMQm9Imnc4WYhnMEuM5Uk5Dp1Ox1WkfLENuJ5Ph5qOe2 -1MhDjfB5SoPN4TJcqKwgmNrj/qRic/AifqKkTygRAxptut/mByiMaJES0WvXAgJTTb8apa -bMmXMIndhhmCgYv225CR/pf9y3P0C3x83B4uuAcyx7Ilfa7Fj1al/AzYLNfs+LThIlTAF6 -COhf6wRTUNaKHxdVcgciJ3ae11smA02zu5q2WslwicwhqqO1tt159DrMdM3M2FYoJBGQ4Q -bjryALHmRE/L6s1ozH9wy0LH9gItEcSa3js3+zAlE7N2Lr8Ck7FI4FWR0WlNmcj7m28j8I -FjKNAPyY3WS0cpu7UJzM7M8SSXBeuk0vAgK19/jW1zn5tzui2MRVKQLNbwMnXNIXeXIkOE -2+/Tn3/QWWdhE+1Cs14l4vVadiM5vdRgcfPLP9XK0EhuS4HkFpbXwr9mpPlQ/PK92N5K+I -ivpBy2wopyrafZnUbqAaXo2Z9NRB5d3kqSQLCSB+enNbdT6+hjjOg1+xUYdwxEFH7EQrIl -YI8qNNAfYvjT67jebSqTfDgW2Qf33RLxFqZ23pY+Vk3+yMxgZre8+PHKw/MxPrWntNVY9F -cAAAdAT0bHFU9GxxUAAAAHc3NoLXJzYQAAAgEA4dy9CDQ05+jhj8pyGhw7g9uOXwELDGOL -zYxz/nnVcUznyBsJuiO+5jH0/G/EUmUegu5PuWHvULzWnbZMQm9Imnc4WYhnMEuM5Uk5Dp -1Ox1WkfLENuJ5Ph5qOe21MhDjfB5SoPN4TJcqKwgmNrj/qRic/AifqKkTygRAxptut/mBy -iMaJES0WvXAgJTTb8apabMmXMIndhhmCgYv225CR/pf9y3P0C3x83B4uuAcyx7Ilfa7Fj1 -al/AzYLNfs+LThIlTAF6COhf6wRTUNaKHxdVcgciJ3ae11smA02zu5q2WslwicwhqqO1tt -159DrMdM3M2FYoJBGQ4QbjryALHmRE/L6s1ozH9wy0LH9gItEcSa3js3+zAlE7N2Lr8Ck7 -FI4FWR0WlNmcj7m28j8IFjKNAPyY3WS0cpu7UJzM7M8SSXBeuk0vAgK19/jW1zn5tzui2M -RVKQLNbwMnXNIXeXIkOE2+/Tn3/QWWdhE+1Cs14l4vVadiM5vdRgcfPLP9XK0EhuS4HkFp -bXwr9mpPlQ/PK92N5K+IivpBy2wopyrafZnUbqAaXo2Z9NRB5d3kqSQLCSB+enNbdT6+hj -jOg1+xUYdwxEFH7EQrIlYI8qNNAfYvjT67jebSqTfDgW2Qf33RLxFqZ23pY+Vk3+yMxgZr -e8+PHKw/MxPrWntNVY9FcAAAADAQABAAACAQChPzolVUBAIN6IDXwQb/99T44A/XmpypfQ -k2lsUAa18LW5ArSbE1ICfb/tSM3cFR0HVKPOMcDs3yRJTBB9jX1D6cFtChjBPe7E8tWtPV -k0rkvUSYMGPt+AbDM0ov9xljiD7Pl/GvyIimUtyixALplOpE4zd9ORgc+vnKQV/9fkN28N -fs/0V0BY1mfcqDQt//S2neU2Xfas+iFV41rFvhy8qj4ithGQ8bsiWtn3wAvcza4+YDvi7X -Vq0LC0syMXG+tu2xqYw6N8viuR/SIphRQhNaaQVVLzaPgR9OVHbwhK+TeSTC+909OctNVi -ju19uHuDV6bWgS+Ad637RFHpdRhybRNZYwn+0vxEEljzIcW1f7bYSSPXCzWctL0Hdhn51/ -IVUj+SLsAAi/ZlBghKh7LrThBc2blwcDrQCm5N/ZZUP22HCGbE8H5kmSYYWNo9Q+p/weYM -w4Jv341N6N6PBJdZj2tQMitYlprsB2c0YixWW4BbNzwj4QcqLLIQtjbMkhjOdcYqo1DWov -PL8nB+SSf8mefqo1TKkRKzswuAGp5O2Q3mcD3+tGnkhQi6EPE3cGsLQUpN1Ot2y2peEitv -+z73FgFS/IGgfF0tfZms9DvDWnVQGu+BfBqYGooBb035pGF9yv/XOlZkpVYdSNrgcW7Ig3 -cP6Bd6Bg8w+eCmp2kr4QAAAQEAnprftFCahU3kWYtaGKTgL1Km0etJd2V/HEzSyIqIK9mW -wjpOlS49zbaka0UNIPNKWsMSVLNZdRn+31VeiEc4PuyfixP8o0pomtB7VB1dg05nYZ2RRi -MI8T+kFIcmLRWapQ+AYz2JEdQMi0pvJBIBqiIYygG3qub5bgATsOlMl+sayeCShClAhVbm -Zebj3HvW6eWOoiVSOdNsZVNmbvnx2TgTUqR6+LE978fU7cQDZqwdqsAOlct3tndkKcyBOm -lqlaz7ZaW+w0vLTtmv+RMxjJSJUhEWT6jXRmCwse8Z+lgGMMbR9e7zhRzC96AH57m6WXe1 -c+HQwP68fjQYaKpRAQAAAQEA+BYfwUWWOLOCc0HsFRkBSDoW1dFO/ta6n4536a45mLT2vL -CjnLxDQqbfW7rUih+Yo/tXzL8qYgMd2lDAPYuHcF+az1ORL7gbeUpJSbRQBIaMiAqfMUWE -zLQhNQ/+T5xXyM5XL/l7uzShKrma6Od/5O84acnd9gSy70pcl9If8E1qsKrCm4SOQuymmr -tsvhqeQ3e+NmWbQLzmi541nu1ck3CDZb2nHwqMIMfkh+NKitQ9vnakSuoiKxwopPUD86TK -rzfik1QyGRkRh2+DK09XLwnysrSSFU4s166/7qwLEnTh0qmKqGKJc/DRUIJd3Bkrvge1zb -bA/one9km0l1nn/QAAAQEA6REhs+WSK4pF/rRA+eu7iebUkk9zXCe4PzU7r5GAaGVxEcXR -sLeCzNpz0/S9EYTl8BefrnPvW6rZnj03T672xW5Ws81XxdwYb/GK9+VFfQPJzPT5Lb/Fgm -0Era89iIymJ8hFKaEiNBN8scUmt+H+ZqOv6NMLh/OboKAtFQWEySlwdL4ror+Z50Nhy0Ml -4cCGucEfQYnZTG2J0Z6854CgBp5JZ3aNggc/CJIdrhkB6ffyBqvXWXQ90M1SvqnP8E4eI8 -eJecPuk6RafoQrcmIfiZ5Mns51FgbObFA9TBzGouABugWobqx9QJ8jx0YLwT/jT1QGwyBz -/PxXLyxyv8fr4wAAAAdhbnNpYmxlAQI= ------END OPENSSH PRIVATE KEY----- diff --git a/ansible.pub b/ansible.pub deleted file mode 100644 index 7ccf5c0b..00000000 --- a/ansible.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml index 329395ba..1f36284d 100644 --- a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -11,6 +11,7 @@ - name: install httpd dnf: name: httpd + state: latest ##this may not work, and is definitely not idempotent. If having problems, go to: ## https://dmsimard.com/2016/03/15/changing-the-ssh-port-with-ansible/ @@ -34,19 +35,19 @@ get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x dest: /var/www/html/bin/rhcos-kernel - mode: '0440' + mode: '0755' - name: get mirrors 2 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-initramfs.s390x.img dest: /var/www/html/bin/rhcos-initramfs.img - mode: '0440' + mode: '0755' -##I think the destination path given from Filipe's document mistakenly left out the sub-directories, so I added them in here. - name: get mirrors 3 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-rootfs.s390x.img dest: /var/www/html/bin/rhcos-rootfs.img + mode: '0755' - name: enable httpd systemd: From d57b5ea384e1154048739c64e182f2670826823f Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 21 Jul 2021 16:30:59 -0500 Subject: [PATCH 059/885] update get_ocp to download OCP installer and client --- get_ocp.installer.yaml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 get_ocp.installer.yaml diff --git a/get_ocp.installer.yaml b/get_ocp.installer.yaml new file mode 100644 index 00000000..f7330a4e --- /dev/null +++ b/get_ocp.installer.yaml @@ -0,0 +1,30 @@ +- hosts: bastion_server + become: true + tasks: + + - name: Download OCP Client + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + dest: /ocpinst/ + mode: '0755' + + - name: Unzip OCP Client + ansible.builtin.unarchive: + src: /ocpinst/openshift-client-linux.tar.gz + dest: /ocpinst/ + + - name: Download OCP Installer + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + dest: /ocpinst/ + mode: '0755' + + - name: Unzip OCP Installer + ansible.builtin.unarchive: + src: /ocpinst/openshift-client-linux.tar.gz + dest: /ocpinst/ + + - name: Change file permissions + + - name: Copy kubectl, oc client, and install program to /usr/local/bin + \ No newline at end of file From c9f4f5e14592c25be8ec3acf780cf86ca33f497a Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 21 Jul 2021 16:30:59 -0500 Subject: [PATCH 060/885] update get_ocp to download OCP installer and client --- get_ocp.installer.yaml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 get_ocp.installer.yaml diff --git a/get_ocp.installer.yaml b/get_ocp.installer.yaml new file mode 100644 index 00000000..f7330a4e --- /dev/null +++ b/get_ocp.installer.yaml @@ -0,0 +1,30 @@ +- hosts: bastion_server + become: true + tasks: + + - name: Download OCP Client + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + dest: /ocpinst/ + mode: '0755' + + - name: Unzip OCP Client + ansible.builtin.unarchive: + src: /ocpinst/openshift-client-linux.tar.gz + dest: /ocpinst/ + + - name: Download OCP Installer + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + dest: /ocpinst/ + mode: '0755' + + - name: Unzip OCP Installer + ansible.builtin.unarchive: + src: /ocpinst/openshift-client-linux.tar.gz + dest: /ocpinst/ + + - name: Change file permissions + + - name: Copy kubectl, oc client, and install program to /usr/local/bin + \ No newline at end of file From fd4efcf0da8e5299f857b68ba21deaa78756f356 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 21 Jul 2021 21:27:28 -0500 Subject: [PATCH 061/885] renamed haproxy config file to J2 for template --- haproxy.cfg => haproxy.cfg.j2 | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename haproxy.cfg => haproxy.cfg.j2 (100%) diff --git a/haproxy.cfg b/haproxy.cfg.j2 similarity index 100% rename from haproxy.cfg rename to haproxy.cfg.j2 From aaa98f1850c8233887c8224170a154da0560b391 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 21 Jul 2021 21:27:28 -0500 Subject: [PATCH 062/885] renamed haproxy config file to J2 for template --- haproxy.cfg => haproxy.cfg.j2 | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename haproxy.cfg => haproxy.cfg.j2 (100%) diff --git a/haproxy.cfg b/haproxy.cfg.j2 similarity index 100% rename from haproxy.cfg rename to haproxy.cfg.j2 From 521754e2b91f00f5f37c5aaceb6c8d1ea280bc13 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 22 Jul 2021 13:17:00 -0400 Subject: [PATCH 063/885] Fixed http_setup.yaml so that it successfully changes the default port on bastion to listen to 8080 instead of 80. --- .../bastion_s390x/tasks/http_setup.yaml | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml index 1f36284d..d750f828 100644 --- a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -4,20 +4,23 @@ become: true tasks: - - name: update repository index - dnf: - update_cache: yes - - - name: install httpd - dnf: - name: httpd - state: latest + ## - name: update repository index + ## dnf: + ## update_cache: yes + + ## - name: install httpd + ## dnf: + ## name: httpd + ## state: latest ##this may not work, and is definitely not idempotent. If having problems, go to: ## https://dmsimard.com/2016/03/15/changing-the-ssh-port-with-ansible/ - - name: change default port to 8080 - set_fact: - ansible_port: 8080 + - name: Ensure the default Apache port is 8080 + lineinfile: + path: /etc/httpd/conf/httpd.conf + regexp: '^Listen ' + insertafter: '^#Listen ' + line: Listen 8080 - name: create directory bin for mirrors file: From e33643fda9c88a38a869bd3a08947d68f9a433f7 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 22 Jul 2021 13:17:00 -0400 Subject: [PATCH 064/885] Fixed http_setup.yaml so that it successfully changes the default port on bastion to listen to 8080 instead of 80. --- .../bastion_s390x/tasks/http_setup.yaml | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml index 1f36284d..d750f828 100644 --- a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -4,20 +4,23 @@ become: true tasks: - - name: update repository index - dnf: - update_cache: yes - - - name: install httpd - dnf: - name: httpd - state: latest + ## - name: update repository index + ## dnf: + ## update_cache: yes + + ## - name: install httpd + ## dnf: + ## name: httpd + ## state: latest ##this may not work, and is definitely not idempotent. If having problems, go to: ## https://dmsimard.com/2016/03/15/changing-the-ssh-port-with-ansible/ - - name: change default port to 8080 - set_fact: - ansible_port: 8080 + - name: Ensure the default Apache port is 8080 + lineinfile: + path: /etc/httpd/conf/httpd.conf + regexp: '^Listen ' + insertafter: '^#Listen ' + line: Listen 8080 - name: create directory bin for mirrors file: From 612c46272c6d624b2bd7e8ef4524e355424533d9 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 22 Jul 2021 13:24:07 -0400 Subject: [PATCH 065/885] Copied the working playbook section from http_test.yaml to the main playbook http_setup.yaml. --- .../bastion_server/bastion_s390x/tasks/http_setup.yaml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml index d750f828..91f60519 100644 --- a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -13,14 +13,12 @@ ## name: httpd ## state: latest -##this may not work, and is definitely not idempotent. If having problems, go to: -## https://dmsimard.com/2016/03/15/changing-the-ssh-port-with-ansible/ - name: Ensure the default Apache port is 8080 - lineinfile: + replace: path: /etc/httpd/conf/httpd.conf - regexp: '^Listen ' - insertafter: '^#Listen ' - line: Listen 8080 + regexp: '^Listen 80' + replace: 'Listen 8080' + backup: yes - name: create directory bin for mirrors file: From daaaf6b601fc72eccc53dbc2ed03b8d027684074 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 22 Jul 2021 13:24:07 -0400 Subject: [PATCH 066/885] Copied the working playbook section from http_test.yaml to the main playbook http_setup.yaml. --- .../bastion_server/bastion_s390x/tasks/http_setup.yaml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml index d750f828..91f60519 100644 --- a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -13,14 +13,12 @@ ## name: httpd ## state: latest -##this may not work, and is definitely not idempotent. If having problems, go to: -## https://dmsimard.com/2016/03/15/changing-the-ssh-port-with-ansible/ - name: Ensure the default Apache port is 8080 - lineinfile: + replace: path: /etc/httpd/conf/httpd.conf - regexp: '^Listen ' - insertafter: '^#Listen ' - line: Listen 8080 + regexp: '^Listen 80' + replace: 'Listen 8080' + backup: yes - name: create directory bin for mirrors file: From 17a220d1ac07bc4cf6f64f45cd2007b086c3bc32 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 22 Jul 2021 14:38:35 -0500 Subject: [PATCH 067/885] added download and copy of OCP client and installer playbook --- get-ocp.yml | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 get-ocp.yml diff --git a/get-ocp.yml b/get-ocp.yml new file mode 100644 index 00000000..d43f0074 --- /dev/null +++ b/get-ocp.yml @@ -0,0 +1,48 @@ +- hosts: kvm_host + become: true + tasks: + + - name: create OCP download landing directory + file: + path: /ocpinst/ + state: directory + + - name: Unzip OCP Client + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + + - name: Unzip OCP Installer + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + + - name: Copy kubectl file + ansible.builtin.copy: + src: /ocpinst/kubectl + dest: /usr/local/bin/kubectl + remote_src: yes + owner: root + group: root + mode: '0755' + + - name: Copy oc file + ansible.builtin.copy: + src: /ocpinst/oc + dest: /usr/local/bin/oc + remote_src: yes + owner: root + group: root + mode: '0755' + + - name: Copy openshift-install file + ansible.builtin.copy: + src: /ocpinst/openshift-install + dest: /usr/local/bin/openshift-install + remote_src: yes + owner: root + group: root + mode: '0755' + From 54f0e07f5da4f450358c6824c6bbcb7bdd414f04 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 22 Jul 2021 14:38:35 -0500 Subject: [PATCH 068/885] added download and copy of OCP client and installer playbook --- get-ocp.yml | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 get-ocp.yml diff --git a/get-ocp.yml b/get-ocp.yml new file mode 100644 index 00000000..d43f0074 --- /dev/null +++ b/get-ocp.yml @@ -0,0 +1,48 @@ +- hosts: kvm_host + become: true + tasks: + + - name: create OCP download landing directory + file: + path: /ocpinst/ + state: directory + + - name: Unzip OCP Client + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + + - name: Unzip OCP Installer + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + + - name: Copy kubectl file + ansible.builtin.copy: + src: /ocpinst/kubectl + dest: /usr/local/bin/kubectl + remote_src: yes + owner: root + group: root + mode: '0755' + + - name: Copy oc file + ansible.builtin.copy: + src: /ocpinst/oc + dest: /usr/local/bin/oc + remote_src: yes + owner: root + group: root + mode: '0755' + + - name: Copy openshift-install file + ansible.builtin.copy: + src: /ocpinst/openshift-install + dest: /usr/local/bin/openshift-install + remote_src: yes + owner: root + group: root + mode: '0755' + From ff3e09468deee9f89cdfd69be7acd73ed6b3861c Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 22 Jul 2021 14:55:47 -0500 Subject: [PATCH 069/885] putting copy images file back --- roles/bastion_server/get-images.yml | 30 +++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 roles/bastion_server/get-images.yml diff --git a/roles/bastion_server/get-images.yml b/roles/bastion_server/get-images.yml new file mode 100644 index 00000000..9563e17b --- /dev/null +++ b/roles/bastion_server/get-images.yml @@ -0,0 +1,30 @@ +--- + +hosts: kvm_host +become: true +tasks: + +- name: download RHCOS initramfs + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + dest: /var/lib/libvirt/images + mode: 0755 + +- name: download RHCOS kernel + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + dest: /var/lib/libvirt/images + mode: 0755 + +- name: download RHCOS rootfs + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + dest: /var/lib/libvirt/images + mode: 0755 + +- name: download QCOW2 image + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz + dest: /var/lib/libvirt/images + mode: 0755 + From 787247e9f6356eb9c265fcc7dcd467a7cc688ddb Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 22 Jul 2021 14:55:47 -0500 Subject: [PATCH 070/885] putting copy images file back --- roles/bastion_server/get-images.yml | 30 +++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 roles/bastion_server/get-images.yml diff --git a/roles/bastion_server/get-images.yml b/roles/bastion_server/get-images.yml new file mode 100644 index 00000000..9563e17b --- /dev/null +++ b/roles/bastion_server/get-images.yml @@ -0,0 +1,30 @@ +--- + +hosts: kvm_host +become: true +tasks: + +- name: download RHCOS initramfs + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + dest: /var/lib/libvirt/images + mode: 0755 + +- name: download RHCOS kernel + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + dest: /var/lib/libvirt/images + mode: 0755 + +- name: download RHCOS rootfs + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + dest: /var/lib/libvirt/images + mode: 0755 + +- name: download QCOW2 image + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz + dest: /var/lib/libvirt/images + mode: 0755 + From fe9455aed686db0485911ad2fdaa3a2244196c04 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 22 Jul 2021 15:57:39 -0400 Subject: [PATCH 071/885] Made some changes to http_setup.yaml and deleted get_ocp.installer.yaml --- get_ocp.installer.yaml | 30 ------------------- .../bastion_s390x/tasks/http_setup.yaml | 16 ++++++++-- .../bastion_s390x/tasks/http_test.yaml | 6 ++++ 3 files changed, 20 insertions(+), 32 deletions(-) delete mode 100644 get_ocp.installer.yaml create mode 100644 roles/bastion_server/bastion_s390x/tasks/http_test.yaml diff --git a/get_ocp.installer.yaml b/get_ocp.installer.yaml deleted file mode 100644 index f7330a4e..00000000 --- a/get_ocp.installer.yaml +++ /dev/null @@ -1,30 +0,0 @@ -- hosts: bastion_server - become: true - tasks: - - - name: Download OCP Client - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz - dest: /ocpinst/ - mode: '0755' - - - name: Unzip OCP Client - ansible.builtin.unarchive: - src: /ocpinst/openshift-client-linux.tar.gz - dest: /ocpinst/ - - - name: Download OCP Installer - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz - dest: /ocpinst/ - mode: '0755' - - - name: Unzip OCP Installer - ansible.builtin.unarchive: - src: /ocpinst/openshift-client-linux.tar.gz - dest: /ocpinst/ - - - name: Change file permissions - - - name: Copy kubectl, oc client, and install program to /usr/local/bin - \ No newline at end of file diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml index 91f60519..42520d51 100644 --- a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -20,6 +20,18 @@ replace: 'Listen 8080' backup: yes + - name: Ensure the SSL default port is 4443 + replace: + path: /etc/httpd/conf.d/ssl.conf + regexp: '^Listen 443 https' + replace: 'Listen 4443 https' + backup: yes + + - name: restart httpd to reflect changes to port + service: + name: httpd + state: restarted + - name: create directory bin for mirrors file: path: /var/www/html/bin @@ -51,12 +63,12 @@ mode: '0755' - name: enable httpd - systemd: + service: name: httpd enable: yes - name: check httpd status - systemd: + service: state: started name: httpd diff --git a/roles/bastion_server/bastion_s390x/tasks/http_test.yaml b/roles/bastion_server/bastion_s390x/tasks/http_test.yaml new file mode 100644 index 00000000..4014775f --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/http_test.yaml @@ -0,0 +1,6 @@ +--- + +- hosts: bastion_server + become: true + tasks: + From 524b585f815c68f01217301cb8e91c9c4497bf0b Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 22 Jul 2021 15:57:39 -0400 Subject: [PATCH 072/885] Made some changes to http_setup.yaml and deleted get_ocp.installer.yaml --- get_ocp.installer.yaml | 30 ------------------- .../bastion_s390x/tasks/http_setup.yaml | 16 ++++++++-- .../bastion_s390x/tasks/http_test.yaml | 6 ++++ 3 files changed, 20 insertions(+), 32 deletions(-) delete mode 100644 get_ocp.installer.yaml create mode 100644 roles/bastion_server/bastion_s390x/tasks/http_test.yaml diff --git a/get_ocp.installer.yaml b/get_ocp.installer.yaml deleted file mode 100644 index f7330a4e..00000000 --- a/get_ocp.installer.yaml +++ /dev/null @@ -1,30 +0,0 @@ -- hosts: bastion_server - become: true - tasks: - - - name: Download OCP Client - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz - dest: /ocpinst/ - mode: '0755' - - - name: Unzip OCP Client - ansible.builtin.unarchive: - src: /ocpinst/openshift-client-linux.tar.gz - dest: /ocpinst/ - - - name: Download OCP Installer - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz - dest: /ocpinst/ - mode: '0755' - - - name: Unzip OCP Installer - ansible.builtin.unarchive: - src: /ocpinst/openshift-client-linux.tar.gz - dest: /ocpinst/ - - - name: Change file permissions - - - name: Copy kubectl, oc client, and install program to /usr/local/bin - \ No newline at end of file diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml index 91f60519..42520d51 100644 --- a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -20,6 +20,18 @@ replace: 'Listen 8080' backup: yes + - name: Ensure the SSL default port is 4443 + replace: + path: /etc/httpd/conf.d/ssl.conf + regexp: '^Listen 443 https' + replace: 'Listen 4443 https' + backup: yes + + - name: restart httpd to reflect changes to port + service: + name: httpd + state: restarted + - name: create directory bin for mirrors file: path: /var/www/html/bin @@ -51,12 +63,12 @@ mode: '0755' - name: enable httpd - systemd: + service: name: httpd enable: yes - name: check httpd status - systemd: + service: state: started name: httpd diff --git a/roles/bastion_server/bastion_s390x/tasks/http_test.yaml b/roles/bastion_server/bastion_s390x/tasks/http_test.yaml new file mode 100644 index 00000000..4014775f --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/http_test.yaml @@ -0,0 +1,6 @@ +--- + +- hosts: bastion_server + become: true + tasks: + From 45dc56c980060518fa3acc29fd604cad8f3fbdef Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 22 Jul 2021 15:24:33 -0500 Subject: [PATCH 073/885] corrected dwnload image file playbook for qemu image --- .../tasks/dwnload-image-files.yaml | 26 +++---------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml b/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml index c2ecaa80..28538f24 100644 --- a/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml +++ b/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml @@ -4,29 +4,11 @@ become: true tasks: - - name: download RHCOS initramfs - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + - name: Unzip OCP Client + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images - mode: 0755 - - - name: download RHCOS kernel - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/lib/libvirt/images - mode: 0755 - - - name: download RHCOS rootfs - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/lib/libvirt/images - mode: 0755 - - - name: download QCOW2 image - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - dest: /var/lib/libvirt/images - mode: 0755 + remote_src: yes From 13e24db9aa707a40efe3e7c26c4e95fcfb8f223f Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 22 Jul 2021 15:24:33 -0500 Subject: [PATCH 074/885] corrected dwnload image file playbook for qemu image --- .../tasks/dwnload-image-files.yaml | 26 +++---------------- 1 file changed, 4 insertions(+), 22 deletions(-) diff --git a/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml b/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml index c2ecaa80..28538f24 100644 --- a/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml +++ b/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml @@ -4,29 +4,11 @@ become: true tasks: - - name: download RHCOS initramfs - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + - name: Unzip OCP Client + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images - mode: 0755 - - - name: download RHCOS kernel - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/lib/libvirt/images - mode: 0755 - - - name: download RHCOS rootfs - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/lib/libvirt/images - mode: 0755 - - - name: download QCOW2 image - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - dest: /var/lib/libvirt/images - mode: 0755 + remote_src: yes From 14c6dc12f90bc11e95a7dcba4112ffcd26aa51e1 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 22 Jul 2021 16:28:43 -0400 Subject: [PATCH 075/885] Changed mirror links for http on bastion --- roles/bastion_server/bastion_s390x/tasks/http_setup.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml index 42520d51..62fb7e58 100644 --- a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -46,19 +46,19 @@ - name: get mirrors 1 get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin/rhcos-kernel mode: '0755' - name: get mirrors 2 get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-initramfs.s390x.img + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin/rhcos-initramfs.img mode: '0755' - name: get mirrors 3 get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-rootfs.s390x.img + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin/rhcos-rootfs.img mode: '0755' @@ -72,4 +72,3 @@ state: started name: httpd - From ade6d67151780f8060c4ca621c1ad3ed1466858a Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 22 Jul 2021 16:28:43 -0400 Subject: [PATCH 076/885] Changed mirror links for http on bastion --- roles/bastion_server/bastion_s390x/tasks/http_setup.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml index 42520d51..62fb7e58 100644 --- a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -46,19 +46,19 @@ - name: get mirrors 1 get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin/rhcos-kernel mode: '0755' - name: get mirrors 2 get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-initramfs.s390x.img + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin/rhcos-initramfs.img mode: '0755' - name: get mirrors 3 get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-rootfs.s390x.img + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin/rhcos-rootfs.img mode: '0755' @@ -72,4 +72,3 @@ state: started name: httpd - From 02af79f424d60ba21936c4edd31a318625096751 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 23 Jul 2021 09:24:50 -0500 Subject: [PATCH 077/885] added pullsecret and ansible key to ocp install-config file --- install-config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install-config.yml b/install-config.yml index 4b2caeae..ba837ca2 100644 --- a/install-config.yml +++ b/install-config.yml @@ -22,5 +22,5 @@ networking: platform: none: {} fips: false -pullSecret: '' -sshKey: '' \ No newline at end of file +pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible' \ No newline at end of file From 29f1d8dc9bef8e335c46d782df65035b8087b827 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 23 Jul 2021 09:24:50 -0500 Subject: [PATCH 078/885] added pullsecret and ansible key to ocp install-config file --- install-config.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/install-config.yml b/install-config.yml index 4b2caeae..ba837ca2 100644 --- a/install-config.yml +++ b/install-config.yml @@ -22,5 +22,5 @@ networking: platform: none: {} fips: false -pullSecret: '' -sshKey: '' \ No newline at end of file +pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible' \ No newline at end of file From 34d3c9bc6f1e9b7f76d3c8939d16781ce315f726 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 23 Jul 2021 15:57:06 -0400 Subject: [PATCH 079/885] created a playbook to copy over the load balancer haproxy config file to the bastion. Moved the config file to bastion templates folder. --- .../bastion_s390x/tasks/http_setup.yaml | 49 ++++++++++++++++--- .../bastion_s390x/tasks/load_balancer.yaml | 14 ++++++ .../bastion_s390x/templates/haproxy.cfg.j2 | 0 3 files changed, 56 insertions(+), 7 deletions(-) create mode 100644 roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml rename haproxy.cfg.j2 => roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 (100%) diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml index 62fb7e58..00bd3cd3 100644 --- a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -16,7 +16,7 @@ - name: Ensure the default Apache port is 8080 replace: path: /etc/httpd/conf/httpd.conf - regexp: '^Listen 80' + regexp: '^Listen 80 ' replace: 'Listen 8080' backup: yes @@ -27,11 +27,35 @@ replace: 'Listen 4443 https' backup: yes - - name: restart httpd to reflect changes to port + - name: restart httpd to reflect changes to port service: name: httpd state: restarted + - name: Allow all access to tcp port 8080 + community.general.ufw: + rule: allow + port: '8080' + proto: tcp + + - name: Allow all access to tcp port 80 + community.general.ufw: + rule: allow + port: '80' + proto: tcp + + - name: Allow all access to tcp port 443 + community.general.ufw: + rule: allow + port: '443' + proto: tcp + + - name: Allow all access to tcp port 4443 + community.general.ufw: + rule: allow + port: '4443' + proto: tcp + - name: create directory bin for mirrors file: path: /var/www/html/bin @@ -47,28 +71,39 @@ - name: get mirrors 1 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/www/html/bin/rhcos-kernel + dest: /var/www/html/bin + remote_src: yes mode: '0755' - name: get mirrors 2 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/www/html/bin/rhcos-initramfs.img + dest: /var/www/html/bin + remote_src: yes mode: '0755' - name: get mirrors 3 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/www/html/bin/rhcos-rootfs.img + dest: /var/www/html/bin + remote_src: yes mode: '0755' - - name: enable httpd + - name: check to make sure httpd is started service: name: httpd - enable: yes + state: started - name: check httpd status service: state: started name: httpd + - name: change mirror 1 file name + command: mv /var/www/html/bin/rhcos-live-kernel-s390x /var/www/html/bin/rhcos-kernel + + - name: change mirror 2 file name + command: mv /var/www/html/bin/rhcos-live-initramfs.s390x.img /var/www/html/bin/rhcos-initramfs.img + + - name: change mirror 3 file name + command: mv /var/www/html/bin/rhcos-live-rootfs.s390x.img /var/www/html/bin/rhcos-rootfs.img diff --git a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml new file mode 100644 index 00000000..30373821 --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml @@ -0,0 +1,14 @@ +--- + +- hosts: bastion_server + become: true + tasks: + +##- name: install haproxy +## dnf: +## - haproxy + + - name: move haproxy config file to bastion + copy: + src: roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg diff --git a/haproxy.cfg.j2 b/roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 similarity index 100% rename from haproxy.cfg.j2 rename to roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 From f5568e3ef75097d0c753375b3a5d37cd75b5fc67 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 23 Jul 2021 15:57:06 -0400 Subject: [PATCH 080/885] created a playbook to copy over the load balancer haproxy config file to the bastion. Moved the config file to bastion templates folder. --- .../bastion_s390x/tasks/http_setup.yaml | 49 ++++++++++++++++--- .../bastion_s390x/tasks/load_balancer.yaml | 14 ++++++ .../bastion_s390x/templates/haproxy.cfg.j2 | 0 3 files changed, 56 insertions(+), 7 deletions(-) create mode 100644 roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml rename haproxy.cfg.j2 => roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 (100%) diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml index 62fb7e58..00bd3cd3 100644 --- a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml @@ -16,7 +16,7 @@ - name: Ensure the default Apache port is 8080 replace: path: /etc/httpd/conf/httpd.conf - regexp: '^Listen 80' + regexp: '^Listen 80 ' replace: 'Listen 8080' backup: yes @@ -27,11 +27,35 @@ replace: 'Listen 4443 https' backup: yes - - name: restart httpd to reflect changes to port + - name: restart httpd to reflect changes to port service: name: httpd state: restarted + - name: Allow all access to tcp port 8080 + community.general.ufw: + rule: allow + port: '8080' + proto: tcp + + - name: Allow all access to tcp port 80 + community.general.ufw: + rule: allow + port: '80' + proto: tcp + + - name: Allow all access to tcp port 443 + community.general.ufw: + rule: allow + port: '443' + proto: tcp + + - name: Allow all access to tcp port 4443 + community.general.ufw: + rule: allow + port: '4443' + proto: tcp + - name: create directory bin for mirrors file: path: /var/www/html/bin @@ -47,28 +71,39 @@ - name: get mirrors 1 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/www/html/bin/rhcos-kernel + dest: /var/www/html/bin + remote_src: yes mode: '0755' - name: get mirrors 2 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/www/html/bin/rhcos-initramfs.img + dest: /var/www/html/bin + remote_src: yes mode: '0755' - name: get mirrors 3 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/www/html/bin/rhcos-rootfs.img + dest: /var/www/html/bin + remote_src: yes mode: '0755' - - name: enable httpd + - name: check to make sure httpd is started service: name: httpd - enable: yes + state: started - name: check httpd status service: state: started name: httpd + - name: change mirror 1 file name + command: mv /var/www/html/bin/rhcos-live-kernel-s390x /var/www/html/bin/rhcos-kernel + + - name: change mirror 2 file name + command: mv /var/www/html/bin/rhcos-live-initramfs.s390x.img /var/www/html/bin/rhcos-initramfs.img + + - name: change mirror 3 file name + command: mv /var/www/html/bin/rhcos-live-rootfs.s390x.img /var/www/html/bin/rhcos-rootfs.img diff --git a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml new file mode 100644 index 00000000..30373821 --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml @@ -0,0 +1,14 @@ +--- + +- hosts: bastion_server + become: true + tasks: + +##- name: install haproxy +## dnf: +## - haproxy + + - name: move haproxy config file to bastion + copy: + src: roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg diff --git a/haproxy.cfg.j2 b/roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 similarity index 100% rename from haproxy.cfg.j2 rename to roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 From 6b9ba9cd41423f603323599fa4e330f5108bcb0f Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 23 Jul 2021 17:22:43 -0400 Subject: [PATCH 081/885] starting to build complete bastion playbook. --- .../bastion_s390x/tasks/bastion.yaml | 144 ++++++++++++++---- 1 file changed, 117 insertions(+), 27 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml index 1fc0ccb3..963203d9 100644 --- a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml @@ -4,40 +4,130 @@ become: true tasks: + - name: start bastion install process + community.libvirt.virt: + name: bastion + memory: 4096 + vcpus: 2 + disk size: 30 + cdrom: /var/lib/libvirt/images/rhel83.iso + accelerate: yes + import: yes + network: network=macvtap-net + extra-args: ""ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion.distribution.ocpz.wsclab.endicott.ibm> + location: /rhcos-install + qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,re> + noautoconsole: yes -#there has to be a way to do this through Ansible. Step 3 page 9 - - name: complete bastion install process +##- name: install haproxy +## dnf: +## - haproxy -#leaving this until I meet with Filipe - - name: download software + - name: move haproxy config file to bastion + copy: + src: roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg -#leaving this until I meet with Filipe - - name: DNS requirements and configuration +##- name: update repository index +## dnf: +## update_cache: yes -#not sure what this instruction step is trying to say. Page 13 - - name: Load Balancer +##- name: install httpd +## dnf: +## name: httpd +## state: latest -# Need to edit this script to automate changing port to 8080 and ensure latest versions of OpenShift mirrors - - name: Create and configure the HTTP server - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/create_http.sh + - name: Ensure the default Apache port is 8080 + replace: + path: /etc/httpd/conf/httpd.conf + regexp: '^Listen 80 ' + replace: 'Listen 8080' + backup: yes - - name: Get installer and oc Client Tools - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/get_ocp_installer.sh + - name: Ensure the SSL default port is 4443 + replace: + path: /etc/httpd/conf.d/ssl.conf + regexp: '^Listen 443 https' + replace: 'Listen 4443 https' + backup: yes -##need to use host_vars for - - name: Generate the ignition files 1 - shell: ./openshift-install create manifests --dir= + - name: restart httpd to reflect changes to port + service: + name: httpd + state: restarted -##also needs variable - - name: Generate the ignition files 2 - shell: ./openshift-install create ignition-configs --dir= - -##also needs variable - - name: Generate the ignition files 3 - shell: cp /*.ign /var/www/html/ignition + - name: Allow all access to tcp port 8080 + community.general.ufw: + rule: allow + port: '8080' + proto: tcp + + - name: Allow all access to tcp port 80 + community.general.ufw: + rule: allow + port: '80' + proto: tcp + + - name: Allow all access to tcp port 443 + community.general.ufw: + rule: allow + port: '443' + proto: tcp + + - name: Allow all access to tcp port 4443 + community.general.ufw: + rule: allow + port: '4443' + proto: tcp + + - name: create directory bin for mirrors + file: + path: /var/www/html/bin + state: directory + mode: '0755' + + - name: create directory bootstrap for mirrors + file: + path: /var/www/html/bootstrap + state: directory + mode: '0755' + + - name: get mirrors 1 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + + - name: get mirrors 2 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + + - name: get mirrors 3 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + + - name: check to make sure httpd is started + service: + name: httpd + state: started + + - name: check httpd status + service: + state: started + name: httpd + + - name: change mirror 1 file name + command: mv /var/www/html/bin/rhcos-live-kernel-s390x /var/www/html/bin/rhcos-kernel - - name: Generate the ignition files 4 - shell: chmod 775 /var/www/html/ignition/*.ign + - name: change mirror 2 file name + command: mv /var/www/html/bin/rhcos-live-initramfs.s390x.img /var/www/html/bin/rhcos-initramfs.img - - name: Prepare the KVM OCP guests - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/prep_kvm_guests.sh + - name: change mirror 3 file name + command: mv /var/www/html/bin/rhcos-live-rootfs.s390x.img /var/www/html/bin/rhcos-rootfs.img From 63b8b0b19df518d783b9cb23cc68a79272dc7e19 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 23 Jul 2021 17:22:43 -0400 Subject: [PATCH 082/885] starting to build complete bastion playbook. --- .../bastion_s390x/tasks/bastion.yaml | 144 ++++++++++++++---- 1 file changed, 117 insertions(+), 27 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml index 1fc0ccb3..963203d9 100644 --- a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml @@ -4,40 +4,130 @@ become: true tasks: + - name: start bastion install process + community.libvirt.virt: + name: bastion + memory: 4096 + vcpus: 2 + disk size: 30 + cdrom: /var/lib/libvirt/images/rhel83.iso + accelerate: yes + import: yes + network: network=macvtap-net + extra-args: ""ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion.distribution.ocpz.wsclab.endicott.ibm> + location: /rhcos-install + qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,re> + noautoconsole: yes -#there has to be a way to do this through Ansible. Step 3 page 9 - - name: complete bastion install process +##- name: install haproxy +## dnf: +## - haproxy -#leaving this until I meet with Filipe - - name: download software + - name: move haproxy config file to bastion + copy: + src: roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg -#leaving this until I meet with Filipe - - name: DNS requirements and configuration +##- name: update repository index +## dnf: +## update_cache: yes -#not sure what this instruction step is trying to say. Page 13 - - name: Load Balancer +##- name: install httpd +## dnf: +## name: httpd +## state: latest -# Need to edit this script to automate changing port to 8080 and ensure latest versions of OpenShift mirrors - - name: Create and configure the HTTP server - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/create_http.sh + - name: Ensure the default Apache port is 8080 + replace: + path: /etc/httpd/conf/httpd.conf + regexp: '^Listen 80 ' + replace: 'Listen 8080' + backup: yes - - name: Get installer and oc Client Tools - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/get_ocp_installer.sh + - name: Ensure the SSL default port is 4443 + replace: + path: /etc/httpd/conf.d/ssl.conf + regexp: '^Listen 443 https' + replace: 'Listen 4443 https' + backup: yes -##need to use host_vars for - - name: Generate the ignition files 1 - shell: ./openshift-install create manifests --dir= + - name: restart httpd to reflect changes to port + service: + name: httpd + state: restarted -##also needs variable - - name: Generate the ignition files 2 - shell: ./openshift-install create ignition-configs --dir= - -##also needs variable - - name: Generate the ignition files 3 - shell: cp /*.ign /var/www/html/ignition + - name: Allow all access to tcp port 8080 + community.general.ufw: + rule: allow + port: '8080' + proto: tcp + + - name: Allow all access to tcp port 80 + community.general.ufw: + rule: allow + port: '80' + proto: tcp + + - name: Allow all access to tcp port 443 + community.general.ufw: + rule: allow + port: '443' + proto: tcp + + - name: Allow all access to tcp port 4443 + community.general.ufw: + rule: allow + port: '4443' + proto: tcp + + - name: create directory bin for mirrors + file: + path: /var/www/html/bin + state: directory + mode: '0755' + + - name: create directory bootstrap for mirrors + file: + path: /var/www/html/bootstrap + state: directory + mode: '0755' + + - name: get mirrors 1 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + + - name: get mirrors 2 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + + - name: get mirrors 3 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + + - name: check to make sure httpd is started + service: + name: httpd + state: started + + - name: check httpd status + service: + state: started + name: httpd + + - name: change mirror 1 file name + command: mv /var/www/html/bin/rhcos-live-kernel-s390x /var/www/html/bin/rhcos-kernel - - name: Generate the ignition files 4 - shell: chmod 775 /var/www/html/ignition/*.ign + - name: change mirror 2 file name + command: mv /var/www/html/bin/rhcos-live-initramfs.s390x.img /var/www/html/bin/rhcos-initramfs.img - - name: Prepare the KVM OCP guests - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/prep_kvm_guests.sh + - name: change mirror 3 file name + command: mv /var/www/html/bin/rhcos-live-rootfs.s390x.img /var/www/html/bin/rhcos-rootfs.img From 49ec85e1d20abc1fd2d69f5b0e64e0ff2f910f3b Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 27 Jul 2021 15:01:20 -0500 Subject: [PATCH 083/885] add enablement of haproxy to load_balancer.yaml --- roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml index 30373821..9d893aea 100644 --- a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml @@ -3,6 +3,7 @@ - hosts: bastion_server become: true tasks: +# required plugin: ansible-galaxy collection install community.general ##- name: install haproxy ## dnf: @@ -12,3 +13,8 @@ copy: src: roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 dest: /etc/haproxy/haproxy.cfg + + - name: Enable haproxy service + community.general.haproxy: + state: enabled + From 2d7ce84e0a69cdf4e6d9c6d192e573f7c9fa1b81 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 27 Jul 2021 15:01:20 -0500 Subject: [PATCH 084/885] add enablement of haproxy to load_balancer.yaml --- roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml index 30373821..9d893aea 100644 --- a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml @@ -3,6 +3,7 @@ - hosts: bastion_server become: true tasks: +# required plugin: ansible-galaxy collection install community.general ##- name: install haproxy ## dnf: @@ -12,3 +13,8 @@ copy: src: roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 dest: /etc/haproxy/haproxy.cfg + + - name: Enable haproxy service + community.general.haproxy: + state: enabled + From 3d14160eaaf3b5e6912da18e1f7d093575d774d2 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 27 Jul 2021 15:27:40 -0500 Subject: [PATCH 085/885] Create rhel83.iso fake iso file --- files/rhel83.iso | 1 + 1 file changed, 1 insertion(+) create mode 100644 files/rhel83.iso diff --git a/files/rhel83.iso b/files/rhel83.iso new file mode 100644 index 00000000..1bbf7176 --- /dev/null +++ b/files/rhel83.iso @@ -0,0 +1 @@ +Placeholder for ISO to be manually replaced for copy during install. Before running playbooks. Copy real ISO into this directory to replace this file. From 221439ab4cb471c6b8cf19eda0b1979709ef3190 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 27 Jul 2021 15:27:40 -0500 Subject: [PATCH 086/885] Create rhel83.iso fake iso file --- files/rhel83.iso | 1 + 1 file changed, 1 insertion(+) create mode 100644 files/rhel83.iso diff --git a/files/rhel83.iso b/files/rhel83.iso new file mode 100644 index 00000000..1bbf7176 --- /dev/null +++ b/files/rhel83.iso @@ -0,0 +1 @@ +Placeholder for ISO to be manually replaced for copy during install. Before running playbooks. Copy real ISO into this directory to replace this file. From 986059768774ec112f155ed86ec206cdd4b627fa Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 27 Jul 2021 15:28:13 -0500 Subject: [PATCH 087/885] Update .gitignore Add iso files to gitignore. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index e43b0f98..db0e4afa 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ .DS_Store +.iso From 242d748319acce7a60802d4d1141401e2d27ba88 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 27 Jul 2021 15:28:13 -0500 Subject: [PATCH 088/885] Update .gitignore Add iso files to gitignore. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index e43b0f98..db0e4afa 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ .DS_Store +.iso From 6b972b5b8d23bf98d363adaffcccc20e12363e79 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 27 Jul 2021 18:22:29 -0400 Subject: [PATCH 089/885] Various work from 07/27. Mostly revolving around consolidating the bastion.yaml main playbook to incorporate all of its parts. Also did some file management. Deleting, moving, etc. --- .../haproxy.cfg.j2 => files/haproxy.cfg | 0 files/shell_scripts/dl_rhel_iso.sh | 3 - files/shell_scripts/start_bastion_install.sh | 2 - .../bastion_s390x/tasks/bastion.yaml | 82 ++++++++++++++++++- .../bastion_s390x/tasks/get-ocp.yaml | 0 .../bastion_s390x/tasks/load_balancer.yaml | 13 +-- .../tasks/create_bootstrap.yaml | 12 +++ 7 files changed, 98 insertions(+), 14 deletions(-) rename roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 => files/haproxy.cfg (100%) delete mode 100644 files/shell_scripts/dl_rhel_iso.sh delete mode 100644 files/shell_scripts/start_bastion_install.sh rename get-ocp.yml => roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml (100%) create mode 100644 roles/bootstrap_server/tasks/create_bootstrap.yaml diff --git a/roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 b/files/haproxy.cfg similarity index 100% rename from roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 rename to files/haproxy.cfg diff --git a/files/shell_scripts/dl_rhel_iso.sh b/files/shell_scripts/dl_rhel_iso.sh deleted file mode 100644 index 1377f15b..00000000 --- a/files/shell_scripts/dl_rhel_iso.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!bin/bash -wget /URL/rhel-8.3-s390x-dvd.iso -rhel-8.3-s390x-dvd.iso rhel83.iso diff --git a/files/shell_scripts/start_bastion_install.sh b/files/shell_scripts/start_bastion_install.sh deleted file mode 100644 index d18ba2c1..00000000 --- a/files/shell_scripts/start_bastion_install.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!bin/bash -virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.255.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml index 963203d9..4727ad16 100644 --- a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml @@ -3,7 +3,9 @@ - hosts: bastion_server become: true tasks: - + +## from start_bastion_install.yaml + - name: start bastion install process community.libvirt.virt: name: bastion @@ -19,14 +21,29 @@ qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,re> noautoconsole: yes +## from install_haproxy.yaml + +##- name: install haproxy +## dnf: +## - haproxy + +## required plugin: ansible-galaxy collection install community.general + ##- name: install haproxy ## dnf: ## - haproxy - name: move haproxy config file to bastion copy: - src: roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 - dest: /etc/haproxy/haproxy.cfg + src: haproxy.cfg + dest: /etc/haproxy/haproxy.cfg.j2 + + - name: Start haproxy + systemd: + state: started + name: haproxy + +## from http_setup.yaml ##- name: update repository index ## dnf: @@ -131,3 +148,62 @@ - name: change mirror 3 file name command: mv /var/www/html/bin/rhcos-live-rootfs.s390x.img /var/www/html/bin/rhcos-rootfs.img + +##from get-ocp.yaml + + - name: create OCP download landing directory + file: + path: /ocpinst/ + state: directory +A + - name: Unzip OCP Client + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + + - name: Unzip OCP Installer + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + + - name: Copy kubectl file + ansible.builtin.copy: + src: /ocpinst/kubectl + dest: /usr/local/bin/kubectl + remote_src: yes + owner: root + group: root + mode: '0755' + + - name: Copy oc file + ansible.builtin.copy: + src: /ocpinst/oc + dest: /usr/local/bin/oc + remote_src: yes + owner: root + group: root + mode: '0755' + + - name: Copy openshift-install file + ansible.builtin.copy: + src: /ocpinst/openshift-install + dest: /usr/local/bin/openshift-install + remote_src: yes + owner: root + group: root + mode: '0755' + +## Rough draft task here. To be completed once we start using templates. Use ansible.builtin.template to fill install_config.yaml with correct variables. +## - name: create install-config.yaml +## file: +## path: "~/files/install-config.yaml" +## state: touch +## +## - name: Fill contents of install-config.yaml file +## ansible.builtin.template: +## src: install-config.yaml +## dest: "~/files/install-config.yaml" + +## also still needs the ignition files task and the prepare the KVM OCP guests task diff --git a/get-ocp.yml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml similarity index 100% rename from get-ocp.yml rename to roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml index 9d893aea..e238a17b 100644 --- a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml @@ -3,6 +3,7 @@ - hosts: bastion_server become: true tasks: + # required plugin: ansible-galaxy collection install community.general ##- name: install haproxy @@ -11,10 +12,10 @@ - name: move haproxy config file to bastion copy: - src: roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 - dest: /etc/haproxy/haproxy.cfg + src: haproxy.cfg + dest: /etc/haproxy/haproxy.cfg.j2 - - name: Enable haproxy service - community.general.haproxy: - state: enabled - + - name: Start haproxy + systemd: + state: started + name: haproxy diff --git a/roles/bootstrap_server/tasks/create_bootstrap.yaml b/roles/bootstrap_server/tasks/create_bootstrap.yaml new file mode 100644 index 00000000..42898bb5 --- /dev/null +++ b/roles/bootstrap_server/tasks/create_bootstrap.yaml @@ -0,0 +1,12 @@ +--- + +- hosts: bootstrap_server + become: true + tasks: + +## to wait for bootstrap creation before continuing, use one of these: +## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_module.html +## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_connection_module.html +## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/pause_module.html + + From 23429c298baa58dd2debb956fc537a7d8c60fa35 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 27 Jul 2021 18:22:29 -0400 Subject: [PATCH 090/885] Various work from 07/27. Mostly revolving around consolidating the bastion.yaml main playbook to incorporate all of its parts. Also did some file management. Deleting, moving, etc. --- .../haproxy.cfg.j2 => files/haproxy.cfg | 0 files/shell_scripts/dl_rhel_iso.sh | 3 - files/shell_scripts/start_bastion_install.sh | 2 - .../bastion_s390x/tasks/bastion.yaml | 82 ++++++++++++++++++- .../bastion_s390x/tasks/get-ocp.yaml | 0 .../bastion_s390x/tasks/load_balancer.yaml | 13 +-- .../tasks/create_bootstrap.yaml | 12 +++ 7 files changed, 98 insertions(+), 14 deletions(-) rename roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 => files/haproxy.cfg (100%) delete mode 100644 files/shell_scripts/dl_rhel_iso.sh delete mode 100644 files/shell_scripts/start_bastion_install.sh rename get-ocp.yml => roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml (100%) create mode 100644 roles/bootstrap_server/tasks/create_bootstrap.yaml diff --git a/roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 b/files/haproxy.cfg similarity index 100% rename from roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 rename to files/haproxy.cfg diff --git a/files/shell_scripts/dl_rhel_iso.sh b/files/shell_scripts/dl_rhel_iso.sh deleted file mode 100644 index 1377f15b..00000000 --- a/files/shell_scripts/dl_rhel_iso.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!bin/bash -wget /URL/rhel-8.3-s390x-dvd.iso -rhel-8.3-s390x-dvd.iso rhel83.iso diff --git a/files/shell_scripts/start_bastion_install.sh b/files/shell_scripts/start_bastion_install.sh deleted file mode 100644 index d18ba2c1..00000000 --- a/files/shell_scripts/start_bastion_install.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!bin/bash -virt-install --connect qemu:///system --name bastion --memory 4096 --vcpus 2 --disk size=20 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.255.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml index 963203d9..4727ad16 100644 --- a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml @@ -3,7 +3,9 @@ - hosts: bastion_server become: true tasks: - + +## from start_bastion_install.yaml + - name: start bastion install process community.libvirt.virt: name: bastion @@ -19,14 +21,29 @@ qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,re> noautoconsole: yes +## from install_haproxy.yaml + +##- name: install haproxy +## dnf: +## - haproxy + +## required plugin: ansible-galaxy collection install community.general + ##- name: install haproxy ## dnf: ## - haproxy - name: move haproxy config file to bastion copy: - src: roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 - dest: /etc/haproxy/haproxy.cfg + src: haproxy.cfg + dest: /etc/haproxy/haproxy.cfg.j2 + + - name: Start haproxy + systemd: + state: started + name: haproxy + +## from http_setup.yaml ##- name: update repository index ## dnf: @@ -131,3 +148,62 @@ - name: change mirror 3 file name command: mv /var/www/html/bin/rhcos-live-rootfs.s390x.img /var/www/html/bin/rhcos-rootfs.img + +##from get-ocp.yaml + + - name: create OCP download landing directory + file: + path: /ocpinst/ + state: directory +A + - name: Unzip OCP Client + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + + - name: Unzip OCP Installer + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + + - name: Copy kubectl file + ansible.builtin.copy: + src: /ocpinst/kubectl + dest: /usr/local/bin/kubectl + remote_src: yes + owner: root + group: root + mode: '0755' + + - name: Copy oc file + ansible.builtin.copy: + src: /ocpinst/oc + dest: /usr/local/bin/oc + remote_src: yes + owner: root + group: root + mode: '0755' + + - name: Copy openshift-install file + ansible.builtin.copy: + src: /ocpinst/openshift-install + dest: /usr/local/bin/openshift-install + remote_src: yes + owner: root + group: root + mode: '0755' + +## Rough draft task here. To be completed once we start using templates. Use ansible.builtin.template to fill install_config.yaml with correct variables. +## - name: create install-config.yaml +## file: +## path: "~/files/install-config.yaml" +## state: touch +## +## - name: Fill contents of install-config.yaml file +## ansible.builtin.template: +## src: install-config.yaml +## dest: "~/files/install-config.yaml" + +## also still needs the ignition files task and the prepare the KVM OCP guests task diff --git a/get-ocp.yml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml similarity index 100% rename from get-ocp.yml rename to roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml index 9d893aea..e238a17b 100644 --- a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml @@ -3,6 +3,7 @@ - hosts: bastion_server become: true tasks: + # required plugin: ansible-galaxy collection install community.general ##- name: install haproxy @@ -11,10 +12,10 @@ - name: move haproxy config file to bastion copy: - src: roles/bastion_server/bastion_s390x/templates/haproxy.cfg.j2 - dest: /etc/haproxy/haproxy.cfg + src: haproxy.cfg + dest: /etc/haproxy/haproxy.cfg.j2 - - name: Enable haproxy service - community.general.haproxy: - state: enabled - + - name: Start haproxy + systemd: + state: started + name: haproxy diff --git a/roles/bootstrap_server/tasks/create_bootstrap.yaml b/roles/bootstrap_server/tasks/create_bootstrap.yaml new file mode 100644 index 00000000..42898bb5 --- /dev/null +++ b/roles/bootstrap_server/tasks/create_bootstrap.yaml @@ -0,0 +1,12 @@ +--- + +- hosts: bootstrap_server + become: true + tasks: + +## to wait for bootstrap creation before continuing, use one of these: +## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_module.html +## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_connection_module.html +## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/pause_module.html + + From ebfdebc37a78f40903a4d1e7c144d8bad6bde56e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 28 Jul 2021 13:43:32 -0400 Subject: [PATCH 091/885] created page 19's playbook prep_kvm_guests.yaml. --- roles/bastion_server/bastion_s390x/tasks/bastion.yaml | 5 +++-- .../{dwnload-image-files.yaml => prep_kvm_guests.yaml} | 10 ++++++---- 2 files changed, 9 insertions(+), 6 deletions(-) rename roles/kvm_host/kvm_host_s390x/tasks/{dwnload-image-files.yaml => prep_kvm_guests.yaml} (64%) diff --git a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml index 4727ad16..c1e0133b 100644 --- a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml @@ -155,7 +155,7 @@ file: path: /ocpinst/ state: directory -A + - name: Unzip OCP Client ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz @@ -196,6 +196,7 @@ A mode: '0755' ## Rough draft task here. To be completed once we start using templates. Use ansible.builtin.template to fill install_config.yaml with correct variables. + ## - name: create install-config.yaml ## file: ## path: "~/files/install-config.yaml" @@ -206,4 +207,4 @@ A ## src: install-config.yaml ## dest: "~/files/install-config.yaml" -## also still needs the ignition files task and the prepare the KVM OCP guests task +## also still needs the ignition files task and the prepare the KVM OCP guests tasks diff --git a/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml b/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml similarity index 64% rename from roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml rename to roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml index 28538f24..df7c3303 100644 --- a/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml +++ b/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml @@ -3,12 +3,14 @@ - hosts: kvm_host become: true tasks: + + - name: create landing directory + file: + path: /var/lib/libvirt/images + state: directory - - name: Unzip OCP Client + - name: Unzip OCP dependencies ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images remote_src: yes - - - From c61ffb34196ca370765c7a37d4491cda07ceaaba Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 28 Jul 2021 13:43:32 -0400 Subject: [PATCH 092/885] created page 19's playbook prep_kvm_guests.yaml. --- roles/bastion_server/bastion_s390x/tasks/bastion.yaml | 5 +++-- .../{dwnload-image-files.yaml => prep_kvm_guests.yaml} | 10 ++++++---- 2 files changed, 9 insertions(+), 6 deletions(-) rename roles/kvm_host/kvm_host_s390x/tasks/{dwnload-image-files.yaml => prep_kvm_guests.yaml} (64%) diff --git a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml index 4727ad16..c1e0133b 100644 --- a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml @@ -155,7 +155,7 @@ file: path: /ocpinst/ state: directory -A + - name: Unzip OCP Client ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz @@ -196,6 +196,7 @@ A mode: '0755' ## Rough draft task here. To be completed once we start using templates. Use ansible.builtin.template to fill install_config.yaml with correct variables. + ## - name: create install-config.yaml ## file: ## path: "~/files/install-config.yaml" @@ -206,4 +207,4 @@ A ## src: install-config.yaml ## dest: "~/files/install-config.yaml" -## also still needs the ignition files task and the prepare the KVM OCP guests task +## also still needs the ignition files task and the prepare the KVM OCP guests tasks diff --git a/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml b/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml similarity index 64% rename from roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml rename to roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml index 28538f24..df7c3303 100644 --- a/roles/kvm_host/kvm_host_s390x/tasks/dwnload-image-files.yaml +++ b/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml @@ -3,12 +3,14 @@ - hosts: kvm_host become: true tasks: + + - name: create landing directory + file: + path: /var/lib/libvirt/images + state: directory - - name: Unzip OCP Client + - name: Unzip OCP dependencies ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images remote_src: yes - - - From 86a3046a485cf3717f18a5dd89496d103a60a109 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 12:32:57 -0500 Subject: [PATCH 093/885] added create manifiest command to test ocp install playbook --- get-ocp.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/get-ocp.yml b/get-ocp.yml index d43f0074..1d5c243a 100644 --- a/get-ocp.yml +++ b/get-ocp.yml @@ -45,4 +45,11 @@ owner: root group: root mode: '0755' - + + - name: Create Manifests + ansible.builtin.command: + cmd: /ocpinst/openshift-install create manifests --dir=/ocpinst/ + args: + chdir: /ocpinst/ + + From d2a0b48c47d5708dbe63a97ed6ec5f942541db23 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 12:32:57 -0500 Subject: [PATCH 094/885] added create manifiest command to test ocp install playbook --- get-ocp.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/get-ocp.yml b/get-ocp.yml index d43f0074..1d5c243a 100644 --- a/get-ocp.yml +++ b/get-ocp.yml @@ -45,4 +45,11 @@ owner: root group: root mode: '0755' - + + - name: Create Manifests + ansible.builtin.command: + cmd: /ocpinst/openshift-install create manifests --dir=/ocpinst/ + args: + chdir: /ocpinst/ + + From b0ea85fc410e19aa7f781419737b3726fdbdb15c Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 15:22:16 -0500 Subject: [PATCH 095/885] Update get-ocp.yaml --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 1d5c243a..bcc8b1c3 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -1,4 +1,4 @@ -- hosts: kvm_host +- hosts: bastion_server become: true tasks: From 8c8e53950fb15448010a7be35d2ba6546fefac6b Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 15:22:16 -0500 Subject: [PATCH 096/885] Update get-ocp.yaml --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 1d5c243a..bcc8b1c3 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -1,4 +1,4 @@ -- hosts: kvm_host +- hosts: bastion_server become: true tasks: From 2fafab7d5732e063effee1243a1738c75979417f Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 15:40:11 -0500 Subject: [PATCH 097/885] update build script to correct bootstrap name --- build_script.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build_script.sh b/build_script.sh index 863090b0..9b5780f9 100644 --- a/build_script.sh +++ b/build_script.sh @@ -8,9 +8,9 @@ #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole #virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole @@ -20,4 +20,4 @@ qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/ #virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From a2386eab255f42f0a30aed296ea7a0592b14cb96 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 15:40:11 -0500 Subject: [PATCH 098/885] update build script to correct bootstrap name --- build_script.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build_script.sh b/build_script.sh index 863090b0..9b5780f9 100644 --- a/build_script.sh +++ b/build_script.sh @@ -8,9 +8,9 @@ #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole #virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole @@ -20,4 +20,4 @@ qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/ #virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From 3d0bf0bb42a92a78b1bc1bd6a4717a2bb2c9e105 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 15:46:40 -0500 Subject: [PATCH 099/885] modify create manifest play to run ocp install --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index bcc8b1c3..11e6bfc4 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -47,9 +47,6 @@ mode: '0755' - name: Create Manifests - ansible.builtin.command: - cmd: /ocpinst/openshift-install create manifests --dir=/ocpinst/ - args: - chdir: /ocpinst/ + ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ From 6d9dec5b83397e1945b8389d41b365f9421f11eb Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 15:46:40 -0500 Subject: [PATCH 100/885] modify create manifest play to run ocp install --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index bcc8b1c3..11e6bfc4 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -47,9 +47,6 @@ mode: '0755' - name: Create Manifests - ansible.builtin.command: - cmd: /ocpinst/openshift-install create manifests --dir=/ocpinst/ - args: - chdir: /ocpinst/ + ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ From 986d0c0373aafde79f01180d1fa40902f5a47f5b Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 15:57:50 -0500 Subject: [PATCH 101/885] escalate privilege for manifest creation --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 11e6bfc4..c68be667 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -48,5 +48,7 @@ - name: Create Manifests ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ + become: yes + chdir: /ocpinst/ From f68c44133a98f46b393f102eadedcc6bf275006b Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 15:57:50 -0500 Subject: [PATCH 102/885] escalate privilege for manifest creation --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 11e6bfc4..c68be667 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -48,5 +48,7 @@ - name: Create Manifests ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ + become: yes + chdir: /ocpinst/ From afb2a678b7cf44e16ef6fd9f18dbdada642c5655 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 29 Jul 2021 16:58:21 -0400 Subject: [PATCH 103/885] updated create_bootstrap.sh with new lines from build_scripts.sh and started working on create_bootrap.yaml --- files/shell_scripts/create_bootstrap.sh | 12 ++---------- roles/bootstrap_server/tasks/create_bootstrap.yaml | 8 ++++++++ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/files/shell_scripts/create_bootstrap.sh b/files/shell_scripts/create_bootstrap.sh index 597d0c37..a9bf6529 100644 --- a/files/shell_scripts/create_bootstrap.sh +++ b/files/shell_scripts/create_bootstrap.sh @@ -1,15 +1,7 @@ #!bin/bash ##create -qemu-img create -f qcow2 -F qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G ##boot -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 -coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://:8080/bin/rhcos-rootfs.img -coreos.inst.ignition_url=http://:8080/ignition/bootstrap.ign ip=::::::none -nameserver=’ --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/ -images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive -if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio- -blk,serial=ignition,drive=ignition" - - +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/roles/bootstrap_server/tasks/create_bootstrap.yaml b/roles/bootstrap_server/tasks/create_bootstrap.yaml index 42898bb5..278900c1 100644 --- a/roles/bootstrap_server/tasks/create_bootstrap.yaml +++ b/roles/bootstrap_server/tasks/create_bootstrap.yaml @@ -4,6 +4,14 @@ become: true tasks: + - name: create bootstrap + community.libvirt.virt: + command: create + dest: /var/lib/libvirt/images/bootstrap.qcow2 + size: 120000 + + + ## to wait for bootstrap creation before continuing, use one of these: ## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_module.html ## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_connection_module.html From 4227bd686a5495c67f7eacce09cb080876707819 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 29 Jul 2021 16:58:21 -0400 Subject: [PATCH 104/885] updated create_bootstrap.sh with new lines from build_scripts.sh and started working on create_bootrap.yaml --- files/shell_scripts/create_bootstrap.sh | 12 ++---------- roles/bootstrap_server/tasks/create_bootstrap.yaml | 8 ++++++++ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/files/shell_scripts/create_bootstrap.sh b/files/shell_scripts/create_bootstrap.sh index 597d0c37..a9bf6529 100644 --- a/files/shell_scripts/create_bootstrap.sh +++ b/files/shell_scripts/create_bootstrap.sh @@ -1,15 +1,7 @@ #!bin/bash ##create -qemu-img create -f qcow2 -F qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G ##boot -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 -coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://:8080/bin/rhcos-rootfs.img -coreos.inst.ignition_url=http://:8080/ignition/bootstrap.ign ip=::::::none -nameserver=’ --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/ -images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive -if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio- -blk,serial=ignition,drive=ignition" - - +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/roles/bootstrap_server/tasks/create_bootstrap.yaml b/roles/bootstrap_server/tasks/create_bootstrap.yaml index 42898bb5..278900c1 100644 --- a/roles/bootstrap_server/tasks/create_bootstrap.yaml +++ b/roles/bootstrap_server/tasks/create_bootstrap.yaml @@ -4,6 +4,14 @@ become: true tasks: + - name: create bootstrap + community.libvirt.virt: + command: create + dest: /var/lib/libvirt/images/bootstrap.qcow2 + size: 120000 + + + ## to wait for bootstrap creation before continuing, use one of these: ## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_module.html ## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_connection_module.html From 86378d6b58d6b1e146c84949eb0bec88ebe18cb4 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 29 Jul 2021 17:00:29 -0400 Subject: [PATCH 105/885] updated create_bootstrap.sh with new lines from build_scripts.sh and started working on create_bootrap.yaml --- build_script.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build_script.sh b/build_script.sh index 863090b0..9b5780f9 100644 --- a/build_script.sh +++ b/build_script.sh @@ -8,9 +8,9 @@ #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole #virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole @@ -20,4 +20,4 @@ qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/ #virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From 943a8ee52f953355682f5b648262dc3eecf07013 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 29 Jul 2021 17:00:29 -0400 Subject: [PATCH 106/885] updated create_bootstrap.sh with new lines from build_scripts.sh and started working on create_bootrap.yaml --- build_script.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/build_script.sh b/build_script.sh index 863090b0..9b5780f9 100644 --- a/build_script.sh +++ b/build_script.sh @@ -8,9 +8,9 @@ #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G +#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole #virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole @@ -20,4 +20,4 @@ qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/ #virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From b62c4436ad770ca3cd538b8fc63cdbf0cdc269ef Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 16:26:09 -0500 Subject: [PATCH 107/885] fixed install-config and copied to install dir --- install-config.yml => files/install-config.yaml | 0 roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 5 +++++ 2 files changed, 5 insertions(+) rename install-config.yml => files/install-config.yaml (100%) diff --git a/install-config.yml b/files/install-config.yaml similarity index 100% rename from install-config.yml rename to files/install-config.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index c68be667..4f899284 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -46,6 +46,11 @@ group: root mode: '0755' + - name: Copy install-config.yml to ocp install directory + copy: + src: install-config.yaml + dest: /ocpinst/install-config.yaml + - name: Create Manifests ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes From f727ffe54219baf639b8c6a98ec591db63034621 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 16:26:09 -0500 Subject: [PATCH 108/885] fixed install-config and copied to install dir --- install-config.yml => files/install-config.yaml | 0 roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 5 +++++ 2 files changed, 5 insertions(+) rename install-config.yml => files/install-config.yaml (100%) diff --git a/install-config.yml b/files/install-config.yaml similarity index 100% rename from install-config.yml rename to files/install-config.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index c68be667..4f899284 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -46,6 +46,11 @@ group: root mode: '0755' + - name: Copy install-config.yml to ocp install directory + copy: + src: install-config.yaml + dest: /ocpinst/install-config.yaml + - name: Create Manifests ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes From afb7be1674d491656cf30fe2a293fdc9c921ed9d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 29 Jul 2021 17:29:31 -0400 Subject: [PATCH 109/885] updated bastion.yaml and did some work on create_bootstrap.yaml. Have not figured out wait_for module yet. --- roles/bastion_server/bastion_s390x/tasks/bastion.yaml | 1 + roles/bootstrap_server/tasks/create_bootstrap.yaml | 11 ++++------- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml index c1e0133b..338e84a3 100644 --- a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml @@ -206,5 +206,6 @@ ## ansible.builtin.template: ## src: install-config.yaml ## dest: "~/files/install-config.yaml" +## remote_src: yes ## also still needs the ignition files task and the prepare the KVM OCP guests tasks diff --git a/roles/bootstrap_server/tasks/create_bootstrap.yaml b/roles/bootstrap_server/tasks/create_bootstrap.yaml index 278900c1..4406839d 100644 --- a/roles/bootstrap_server/tasks/create_bootstrap.yaml +++ b/roles/bootstrap_server/tasks/create_bootstrap.yaml @@ -4,13 +4,10 @@ become: true tasks: - - name: create bootstrap - community.libvirt.virt: - command: create - dest: /var/lib/libvirt/images/bootstrap.qcow2 - size: 120000 - - + - name: run bootstrap build sheel script + command: files/shell_scripts/create_bootstrap.sh + + - name: wait for bootstrap installation to complete ## to wait for bootstrap creation before continuing, use one of these: ## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_module.html From 124be052c2634617e5790b949d9b81007b587cf0 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 29 Jul 2021 17:29:31 -0400 Subject: [PATCH 110/885] updated bastion.yaml and did some work on create_bootstrap.yaml. Have not figured out wait_for module yet. --- roles/bastion_server/bastion_s390x/tasks/bastion.yaml | 1 + roles/bootstrap_server/tasks/create_bootstrap.yaml | 11 ++++------- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml index c1e0133b..338e84a3 100644 --- a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/bastion.yaml @@ -206,5 +206,6 @@ ## ansible.builtin.template: ## src: install-config.yaml ## dest: "~/files/install-config.yaml" +## remote_src: yes ## also still needs the ignition files task and the prepare the KVM OCP guests tasks diff --git a/roles/bootstrap_server/tasks/create_bootstrap.yaml b/roles/bootstrap_server/tasks/create_bootstrap.yaml index 278900c1..4406839d 100644 --- a/roles/bootstrap_server/tasks/create_bootstrap.yaml +++ b/roles/bootstrap_server/tasks/create_bootstrap.yaml @@ -4,13 +4,10 @@ become: true tasks: - - name: create bootstrap - community.libvirt.virt: - command: create - dest: /var/lib/libvirt/images/bootstrap.qcow2 - size: 120000 - - + - name: run bootstrap build sheel script + command: files/shell_scripts/create_bootstrap.sh + + - name: wait for bootstrap installation to complete ## to wait for bootstrap creation before continuing, use one of these: ## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_module.html From cf95239ad5d8e0b765d7109b027f5cce5217cbaf Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 16:39:32 -0500 Subject: [PATCH 111/885] Update get-ocp.yaml --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 4f899284..503e3be4 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -51,6 +51,7 @@ src: install-config.yaml dest: /ocpinst/install-config.yaml +# testing portion - name: Create Manifests ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes From 8db193ee507109ce48c27297a4121e12fe964181 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 16:39:32 -0500 Subject: [PATCH 112/885] Update get-ocp.yaml --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 4f899284..503e3be4 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -51,6 +51,7 @@ src: install-config.yaml dest: /ocpinst/install-config.yaml +# testing portion - name: Create Manifests ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes From cb16334319c81615d7b9f71c12fef04bad9230ba Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 16:43:14 -0500 Subject: [PATCH 113/885] Update get-ocp.yaml removed chdir argument from command line 55 --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 503e3be4..c28b4bdf 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -55,6 +55,6 @@ - name: Create Manifests ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes - chdir: /ocpinst/ + From b8ff9decdf38cc54754d82d8388aa5196a075ce6 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 16:43:14 -0500 Subject: [PATCH 114/885] Update get-ocp.yaml removed chdir argument from command line 55 --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 503e3be4..c28b4bdf 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -55,6 +55,6 @@ - name: Create Manifests ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes - chdir: /ocpinst/ + From a4cec4203282c213730d8a4a164e1cefacccdd72 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 17:10:21 -0500 Subject: [PATCH 115/885] trying to sync up build script bootstrap name --- build_script.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_script.sh b/build_script.sh index 9b5780f9..69666b66 100644 --- a/build_script.sh +++ b/build_script.sh @@ -10,7 +10,7 @@ #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole #virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From 8825d79d563afe915eda6b060807d4b1ee3db967 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 17:10:21 -0500 Subject: [PATCH 116/885] trying to sync up build script bootstrap name --- build_script.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_script.sh b/build_script.sh index 9b5780f9..69666b66 100644 --- a/build_script.sh +++ b/build_script.sh @@ -10,7 +10,7 @@ #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole #virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From 7fe7585613732ea8538354c64a4d76f8dd046579 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 20:23:59 -0500 Subject: [PATCH 117/885] Create install-config.yaml copy install-config.yaml file to role/files --- .../bastion_s390x/files/install-config.yaml | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 roles/bastion_server/bastion_s390x/files/install-config.yaml diff --git a/roles/bastion_server/bastion_s390x/files/install-config.yaml b/roles/bastion_server/bastion_s390x/files/install-config.yaml new file mode 100644 index 00000000..ba837ca2 --- /dev/null +++ b/roles/bastion_server/bastion_s390x/files/install-config.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +baseDomain: ocpz.wsclab.endicott.ibm.com +compute: +- hyperthreading: Enabled + name: worker + replicas: 0 + architecture : s390x +controlPlane: + hyperthreading: Enabled + name: master + replicas: 3 + architecture : s390x +metadata: + name: distribution +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 +platform: + none: {} +fips: false +pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible' \ No newline at end of file From 4b0cac813345530c209a71b6fa66d032a96459c7 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 29 Jul 2021 20:23:59 -0500 Subject: [PATCH 118/885] Create install-config.yaml copy install-config.yaml file to role/files --- .../bastion_s390x/files/install-config.yaml | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 roles/bastion_server/bastion_s390x/files/install-config.yaml diff --git a/roles/bastion_server/bastion_s390x/files/install-config.yaml b/roles/bastion_server/bastion_s390x/files/install-config.yaml new file mode 100644 index 00000000..ba837ca2 --- /dev/null +++ b/roles/bastion_server/bastion_s390x/files/install-config.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +baseDomain: ocpz.wsclab.endicott.ibm.com +compute: +- hyperthreading: Enabled + name: worker + replicas: 0 + architecture : s390x +controlPlane: + hyperthreading: Enabled + name: master + replicas: 3 + architecture : s390x +metadata: + name: distribution +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 +platform: + none: {} +fips: false +pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible' \ No newline at end of file From 362c24ffe4a6013c892e61b43ca67600d8075bf4 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 10:15:30 -0500 Subject: [PATCH 119/885] added replace of masterScheduable parameter --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index c28b4bdf..b9a4e425 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -56,5 +56,10 @@ ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes - + - name: Set mastersSchedulable parameter to False + replace: + path: /ocpinst/manifests/cluster-scheduler-02-config.yml + regexp: '^masterSchedulable: true' + replace: 'masterSchedulable: false' + backup: yes From 7c5ea18e78369990c1a28b6fefbb2d5f9f41149f Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 10:15:30 -0500 Subject: [PATCH 120/885] added replace of masterScheduable parameter --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index c28b4bdf..b9a4e425 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -56,5 +56,10 @@ ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes - + - name: Set mastersSchedulable parameter to False + replace: + path: /ocpinst/manifests/cluster-scheduler-02-config.yml + regexp: '^masterSchedulable: true' + replace: 'masterSchedulable: false' + backup: yes From 1ce1bb015ff1242f093197adc82562fb25939743 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 10:25:15 -0500 Subject: [PATCH 121/885] move files directory within tasks directory --- .../bastion_s390x/{ => tasks}/files/install-config.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename roles/bastion_server/bastion_s390x/{ => tasks}/files/install-config.yaml (100%) diff --git a/roles/bastion_server/bastion_s390x/files/install-config.yaml b/roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/files/install-config.yaml rename to roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml From fdd05101432577b795d2bb0b957fc633be226386 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 10:25:15 -0500 Subject: [PATCH 122/885] move files directory within tasks directory --- .../bastion_s390x/{ => tasks}/files/install-config.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename roles/bastion_server/bastion_s390x/{ => tasks}/files/install-config.yaml (100%) diff --git a/roles/bastion_server/bastion_s390x/files/install-config.yaml b/roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/files/install-config.yaml rename to roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml From b2ccef4897403d557c516bf3d57571f36f03b02e Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 10:28:45 -0500 Subject: [PATCH 123/885] corrected variable name in get-ocp.yml --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index b9a4e425..d7fbf894 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -59,7 +59,7 @@ - name: Set mastersSchedulable parameter to False replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: '^masterSchedulable: true' - replace: 'masterSchedulable: false' + regexp: '^mastersSchedulable: true' + replace: 'mastersSchedulable: false' backup: yes From 6d5368500b509a43b6d0251fd3792d1a4f4e833b Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 10:28:45 -0500 Subject: [PATCH 124/885] corrected variable name in get-ocp.yml --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index b9a4e425..d7fbf894 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -59,7 +59,7 @@ - name: Set mastersSchedulable parameter to False replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: '^masterSchedulable: true' - replace: 'masterSchedulable: false' + regexp: '^mastersSchedulable: true' + replace: 'mastersSchedulable: false' backup: yes From 0ffafabafdfb641c28fd454752e73005af6e5965 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 10:55:41 -0500 Subject: [PATCH 125/885] modify replace command in playbook --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index d7fbf894..cb31ba09 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -59,7 +59,7 @@ - name: Set mastersSchedulable parameter to False replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: '^mastersSchedulable: true' + regexp: '$mastersSchedulable: true' replace: 'mastersSchedulable: false' backup: yes From 2175229d83c3093c0d1ea97bf48b6dbeb6205356 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 10:55:41 -0500 Subject: [PATCH 126/885] modify replace command in playbook --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index d7fbf894..cb31ba09 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -59,7 +59,7 @@ - name: Set mastersSchedulable parameter to False replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: '^mastersSchedulable: true' + regexp: '$mastersSchedulable: true' replace: 'mastersSchedulable: false' backup: yes From e8a5f791afd33271b60ffdb20c67f62f74a68172 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 11:05:02 -0500 Subject: [PATCH 127/885] replace modification --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index cb31ba09..38095759 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -59,7 +59,7 @@ - name: Set mastersSchedulable parameter to False replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: '$mastersSchedulable: true' - replace: 'mastersSchedulable: false' + regexp: '^true' + replace: 'false' backup: yes From f3c6fd2cabf831e700107f999669c1d2070e5de1 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 11:05:02 -0500 Subject: [PATCH 128/885] replace modification --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index cb31ba09..38095759 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -59,7 +59,7 @@ - name: Set mastersSchedulable parameter to False replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: '$mastersSchedulable: true' - replace: 'mastersSchedulable: false' + regexp: '^true' + replace: 'false' backup: yes From 0fd4da5dc49526e0cdf1a75cbeeabe09598cba80 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 11:15:14 -0500 Subject: [PATCH 129/885] replace modification --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 38095759..7fdae698 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -61,5 +61,5 @@ path: /ocpinst/manifests/cluster-scheduler-02-config.yml regexp: '^true' replace: 'false' - backup: yes + From 90fd3ed86852f2df88208db7648cbec4edc38678 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 11:15:14 -0500 Subject: [PATCH 130/885] replace modification --- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 38095759..7fdae698 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -61,5 +61,5 @@ path: /ocpinst/manifests/cluster-scheduler-02-config.yml regexp: '^true' replace: 'false' - backup: yes + From 03af7551cc518c815dd7884a855103fa37349d89 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 11:24:59 -0500 Subject: [PATCH 131/885] replace modification --- .../bastion_s390x/tasks/files/fix-sched.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 roles/bastion_server/bastion_s390x/tasks/files/fix-sched.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/files/fix-sched.yaml b/roles/bastion_server/bastion_s390x/tasks/files/fix-sched.yaml new file mode 100644 index 00000000..f190cf4a --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/files/fix-sched.yaml @@ -0,0 +1,11 @@ +- hosts: bastion_server + become: true + tasks: + + - name: Set mastersSchedulable parameter to False + replace: + path: /ocpinst/manifests/cluster-scheduler-02-config.yml + after: 'mastersSchedulable:' + regexp: '^true' + replace: '# false' + \ No newline at end of file From ee303dc60c83884b01d4ce6fda2879041f957846 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 11:24:59 -0500 Subject: [PATCH 132/885] replace modification --- .../bastion_s390x/tasks/files/fix-sched.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 roles/bastion_server/bastion_s390x/tasks/files/fix-sched.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/files/fix-sched.yaml b/roles/bastion_server/bastion_s390x/tasks/files/fix-sched.yaml new file mode 100644 index 00000000..f190cf4a --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/files/fix-sched.yaml @@ -0,0 +1,11 @@ +- hosts: bastion_server + become: true + tasks: + + - name: Set mastersSchedulable parameter to False + replace: + path: /ocpinst/manifests/cluster-scheduler-02-config.yml + after: 'mastersSchedulable:' + regexp: '^true' + replace: '# false' + \ No newline at end of file From ea42f4408c13f1adb937e3c4de87ef70e6bb3350 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 11:26:13 -0500 Subject: [PATCH 133/885] replace modification --- .../bastion_server/bastion_s390x/{tasks/files => }/fix-sched.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename roles/bastion_server/bastion_s390x/{tasks/files => }/fix-sched.yaml (100%) diff --git a/roles/bastion_server/bastion_s390x/tasks/files/fix-sched.yaml b/roles/bastion_server/bastion_s390x/fix-sched.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/files/fix-sched.yaml rename to roles/bastion_server/bastion_s390x/fix-sched.yaml From 1b8efc4cb9b2a42fc1a4973b669c64a6daff9b2c Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 11:26:13 -0500 Subject: [PATCH 134/885] replace modification --- .../bastion_server/bastion_s390x/{tasks/files => }/fix-sched.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename roles/bastion_server/bastion_s390x/{tasks/files => }/fix-sched.yaml (100%) diff --git a/roles/bastion_server/bastion_s390x/tasks/files/fix-sched.yaml b/roles/bastion_server/bastion_s390x/fix-sched.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/files/fix-sched.yaml rename to roles/bastion_server/bastion_s390x/fix-sched.yaml From 44089a9e5cf72e0ca48d35c335481819226e5fc3 Mon Sep 17 00:00:00 2001 From: Phillip Wilson Date: Fri, 30 Jul 2021 11:43:55 -0500 Subject: [PATCH 135/885] fixed modify task --- roles/bastion_server/bastion_s390x/fix-sched.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/fix-sched.yaml b/roles/bastion_server/bastion_s390x/fix-sched.yaml index f190cf4a..9372f308 100644 --- a/roles/bastion_server/bastion_s390x/fix-sched.yaml +++ b/roles/bastion_server/bastion_s390x/fix-sched.yaml @@ -5,7 +5,6 @@ - name: Set mastersSchedulable parameter to False replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml - after: 'mastersSchedulable:' - regexp: '^true' - replace: '# false' - \ No newline at end of file + regexp: ': true' + replace: ': false' + From a0cc4490bf38d2df0c0b5529712b2c5e23d23972 Mon Sep 17 00:00:00 2001 From: Phillip Wilson Date: Fri, 30 Jul 2021 11:43:55 -0500 Subject: [PATCH 136/885] fixed modify task --- roles/bastion_server/bastion_s390x/fix-sched.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/fix-sched.yaml b/roles/bastion_server/bastion_s390x/fix-sched.yaml index f190cf4a..9372f308 100644 --- a/roles/bastion_server/bastion_s390x/fix-sched.yaml +++ b/roles/bastion_server/bastion_s390x/fix-sched.yaml @@ -5,7 +5,6 @@ - name: Set mastersSchedulable parameter to False replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml - after: 'mastersSchedulable:' - regexp: '^true' - replace: '# false' - \ No newline at end of file + regexp: ': true' + replace: ': false' + From 5cc0b1ee9bbb2b59fb4e49ac17ed088ac85b24a4 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 12:01:56 -0500 Subject: [PATCH 137/885] fixing ignition file creation --- roles/bastion_server/bastion_s390x/fix-sched.yaml | 10 ++++++++++ .../bastion_s390x/tasks/get-ocp.yaml | 15 +++++++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/fix-sched.yaml b/roles/bastion_server/bastion_s390x/fix-sched.yaml index 9372f308..94424849 100644 --- a/roles/bastion_server/bastion_s390x/fix-sched.yaml +++ b/roles/bastion_server/bastion_s390x/fix-sched.yaml @@ -8,3 +8,13 @@ regexp: ': true' replace: ': false' + - name: Create Ignition files + ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ + become: yes + + - name: Copy Ignition files to web server + copy: + src: /ocpinst/*.ign + dest: /var/www/html/ignition + remote_src: yes + diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 7fdae698..088c73a8 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -59,7 +59,18 @@ - name: Set mastersSchedulable parameter to False replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: '^true' - replace: 'false' + regexp: ': true' + replace: ': false' + + - name: Create Ignition files + ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ + become: yes + + - name: Copy Ignition files to web server + copy: + src: /ocpinst/*.ign + dest: /var/www/html/ignition + remote_src: yes + From 20a639d4a5693dfe43ca79c0a5f5fbb1583904d5 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 12:01:56 -0500 Subject: [PATCH 138/885] fixing ignition file creation --- roles/bastion_server/bastion_s390x/fix-sched.yaml | 10 ++++++++++ .../bastion_s390x/tasks/get-ocp.yaml | 15 +++++++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/fix-sched.yaml b/roles/bastion_server/bastion_s390x/fix-sched.yaml index 9372f308..94424849 100644 --- a/roles/bastion_server/bastion_s390x/fix-sched.yaml +++ b/roles/bastion_server/bastion_s390x/fix-sched.yaml @@ -8,3 +8,13 @@ regexp: ': true' replace: ': false' + - name: Create Ignition files + ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ + become: yes + + - name: Copy Ignition files to web server + copy: + src: /ocpinst/*.ign + dest: /var/www/html/ignition + remote_src: yes + diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 7fdae698..088c73a8 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -59,7 +59,18 @@ - name: Set mastersSchedulable parameter to False replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: '^true' - replace: 'false' + regexp: ': true' + replace: ': false' + + - name: Create Ignition files + ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ + become: yes + + - name: Copy Ignition files to web server + copy: + src: /ocpinst/*.ign + dest: /var/www/html/ignition + remote_src: yes + From 5c6e6f4baec236ba8563583dcf9ea4b70cb6bdf4 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 12:05:05 -0500 Subject: [PATCH 139/885] created ignition landing directory --- roles/bastion_server/bastion_s390x/fix-sched.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/bastion_server/bastion_s390x/fix-sched.yaml b/roles/bastion_server/bastion_s390x/fix-sched.yaml index 94424849..a21a7db1 100644 --- a/roles/bastion_server/bastion_s390x/fix-sched.yaml +++ b/roles/bastion_server/bastion_s390x/fix-sched.yaml @@ -12,6 +12,11 @@ ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes + - name: create Ignition directory on webserver + file: + path: /var/www/html/ignition + state: directory + - name: Copy Ignition files to web server copy: src: /ocpinst/*.ign From e36d2797cfe3a66fb4587807cd2e9373b99e1c57 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 12:05:05 -0500 Subject: [PATCH 140/885] created ignition landing directory --- roles/bastion_server/bastion_s390x/fix-sched.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/bastion_server/bastion_s390x/fix-sched.yaml b/roles/bastion_server/bastion_s390x/fix-sched.yaml index 94424849..a21a7db1 100644 --- a/roles/bastion_server/bastion_s390x/fix-sched.yaml +++ b/roles/bastion_server/bastion_s390x/fix-sched.yaml @@ -12,6 +12,11 @@ ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes + - name: create Ignition directory on webserver + file: + path: /var/www/html/ignition + state: directory + - name: Copy Ignition files to web server copy: src: /ocpinst/*.ign From 771fdaf7a10606038e9459122141424599978bdd Mon Sep 17 00:00:00 2001 From: Phillip Wilson Date: Fri, 30 Jul 2021 12:16:43 -0500 Subject: [PATCH 141/885] fixed file transfer of ignition files --- .../bastion_s390x/fix-sched.yaml | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/fix-sched.yaml b/roles/bastion_server/bastion_s390x/fix-sched.yaml index a21a7db1..17cfef11 100644 --- a/roles/bastion_server/bastion_s390x/fix-sched.yaml +++ b/roles/bastion_server/bastion_s390x/fix-sched.yaml @@ -2,11 +2,11 @@ become: true tasks: - - name: Set mastersSchedulable parameter to False - replace: - path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: ': true' - replace: ': false' +# - name: Set mastersSchedulable parameter to False +# replace: +# path: /ocpinst/manifests/cluster-scheduler-02-config.yml +# regexp: ': true' +# replace: ': false' - name: Create Ignition files ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ @@ -17,9 +17,21 @@ path: /var/www/html/ignition state: directory - - name: Copy Ignition files to web server + - name: Copy bootstrap Ignition file to web server copy: - src: /ocpinst/*.ign + src: /ocpinst/bootstrap.ign dest: /var/www/html/ignition remote_src: yes - + + - name: Copy control plane Ignition file to web server + copy: + src: /ocpinst/master.ign + dest: /var/www/html/ignition + remote_src: yes + + - name: Copy worker Ignition file to web server + copy: + src: /ocpinst/worker.ign + dest: /var/www/html/ignition + remote_src: yes + From eaa4e1a6c8ed870c9fcb8dd09ab367b0ed66aaeb Mon Sep 17 00:00:00 2001 From: Phillip Wilson Date: Fri, 30 Jul 2021 12:16:43 -0500 Subject: [PATCH 142/885] fixed file transfer of ignition files --- .../bastion_s390x/fix-sched.yaml | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/fix-sched.yaml b/roles/bastion_server/bastion_s390x/fix-sched.yaml index a21a7db1..17cfef11 100644 --- a/roles/bastion_server/bastion_s390x/fix-sched.yaml +++ b/roles/bastion_server/bastion_s390x/fix-sched.yaml @@ -2,11 +2,11 @@ become: true tasks: - - name: Set mastersSchedulable parameter to False - replace: - path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: ': true' - replace: ': false' +# - name: Set mastersSchedulable parameter to False +# replace: +# path: /ocpinst/manifests/cluster-scheduler-02-config.yml +# regexp: ': true' +# replace: ': false' - name: Create Ignition files ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ @@ -17,9 +17,21 @@ path: /var/www/html/ignition state: directory - - name: Copy Ignition files to web server + - name: Copy bootstrap Ignition file to web server copy: - src: /ocpinst/*.ign + src: /ocpinst/bootstrap.ign dest: /var/www/html/ignition remote_src: yes - + + - name: Copy control plane Ignition file to web server + copy: + src: /ocpinst/master.ign + dest: /var/www/html/ignition + remote_src: yes + + - name: Copy worker Ignition file to web server + copy: + src: /ocpinst/worker.ign + dest: /var/www/html/ignition + remote_src: yes + From 9a504e349ca59f4d51e3618366f0281994a6de26 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 12:20:16 -0500 Subject: [PATCH 143/885] added fixed plays to get-ocp --- .../bastion_s390x/tasks/get-ocp.yaml | 24 +++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 088c73a8..06ccc1f3 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -51,7 +51,6 @@ src: install-config.yaml dest: /ocpinst/install-config.yaml -# testing portion - name: Create Manifests ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes @@ -66,11 +65,28 @@ ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes - - name: Copy Ignition files to web server + - name: create Ignition directory on webserver + file: + path: /var/www/html/ignition + state: directory + + - name: Copy bootstrap Ignition file to web server + copy: + src: /ocpinst/bootstrap.ign + dest: /var/www/html/ignition + remote_src: yes + + - name: Copy control plane Ignition file to web server + copy: + src: /ocpinst/master.ign + dest: /var/www/html/ignition + remote_src: yes + + - name: Copy worker Ignition file to web server copy: - src: /ocpinst/*.ign + src: /ocpinst/worker.ign dest: /var/www/html/ignition remote_src: yes - + From b88e63eeee77a16ab44a30c2f91ee56a1780e711 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 12:20:16 -0500 Subject: [PATCH 144/885] added fixed plays to get-ocp --- .../bastion_s390x/tasks/get-ocp.yaml | 24 +++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 088c73a8..06ccc1f3 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -51,7 +51,6 @@ src: install-config.yaml dest: /ocpinst/install-config.yaml -# testing portion - name: Create Manifests ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes @@ -66,11 +65,28 @@ ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes - - name: Copy Ignition files to web server + - name: create Ignition directory on webserver + file: + path: /var/www/html/ignition + state: directory + + - name: Copy bootstrap Ignition file to web server + copy: + src: /ocpinst/bootstrap.ign + dest: /var/www/html/ignition + remote_src: yes + + - name: Copy control plane Ignition file to web server + copy: + src: /ocpinst/master.ign + dest: /var/www/html/ignition + remote_src: yes + + - name: Copy worker Ignition file to web server copy: - src: /ocpinst/*.ign + src: /ocpinst/worker.ign dest: /var/www/html/ignition remote_src: yes - + From 0db23a64ddcb65d263a14cb8f6a47499277a8ad8 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 30 Jul 2021 16:59:53 -0400 Subject: [PATCH 145/885] fixed unarchiving prep_kvm_guests playbook. Create_boostrap playbook complete. Created shell scripts for control and compute nodes --- bootstrap.yml | 45 ------------------- ...create_bootstrap.sh => create_bootstrap.sh | 0 create_bootstrap.yaml | 20 +++++++++ files/shell_scripts/create_compute.sh | 6 +++ files/shell_scripts/create_control.sh | 9 ++++ .../tasks/create_bootstrap.yaml | 17 ------- .../kvm_host_s390x/tasks/prep_kvm_guests.yaml | 16 +++---- 7 files changed, 42 insertions(+), 71 deletions(-) delete mode 100644 bootstrap.yml rename files/shell_scripts/create_bootstrap.sh => create_bootstrap.sh (100%) create mode 100644 create_bootstrap.yaml create mode 100644 files/shell_scripts/create_compute.sh create mode 100644 files/shell_scripts/create_control.sh delete mode 100644 roles/bootstrap_server/tasks/create_bootstrap.yaml diff --git a/bootstrap.yml b/bootstrap.yml deleted file mode 100644 index 64b68c77..00000000 --- a/bootstrap.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- - -- hosts: all - become: true - pre_tasks: - - - name: install updates (CentOS) - tags: always - dnf: - update_only: yes - update_cache: yes - when: ansible_distribution == "CentOS" - - - name: install updates (Ubuntu) - tags: always - apt: - upgrade: dist - update_cache: yes - when: ansible_distribution == "Ubuntu" - -- hosts: all - become: true - tasks: - - - name: create simone user - tags: always - user: - name: simone - groups: root - - - name: add ssh key for simone - tags: always - authorized_key: - user: simone - key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKHEBRxXjvVaYY8mg0S05qqUWJQaDLbzO4w5uwN8ogJ2 ansible" - - - name: add sudeoers file for simone - tags: always - copy: - src: sudoer_simone - dest: /etc/sudoers.d/simone - owner: root - group: root - mode: 0440 - diff --git a/files/shell_scripts/create_bootstrap.sh b/create_bootstrap.sh similarity index 100% rename from files/shell_scripts/create_bootstrap.sh rename to create_bootstrap.sh diff --git a/create_bootstrap.yaml b/create_bootstrap.yaml new file mode 100644 index 00000000..dd909726 --- /dev/null +++ b/create_bootstrap.yaml @@ -0,0 +1,20 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: virtualize bootstrap + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G + + - name: boot bootstrap + command: virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.root> + +## - name: wait for bootstrap installation to complete + +## to wait for bootstrap creation before continuing, use one of these: +## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_module.html +## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_connection_module.html +## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/pause_module.html + + diff --git a/files/shell_scripts/create_compute.sh b/files/shell_scripts/create_compute.sh new file mode 100644 index 00000000..e39cb1bf --- /dev/null +++ b/files/shell_scripts/create_compute.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> diff --git a/files/shell_scripts/create_control.sh b/files/shell_scripts/create_control.sh new file mode 100644 index 00000000..12a8536b --- /dev/null +++ b/files/shell_scripts/create_control.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G + +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> diff --git a/roles/bootstrap_server/tasks/create_bootstrap.yaml b/roles/bootstrap_server/tasks/create_bootstrap.yaml deleted file mode 100644 index 4406839d..00000000 --- a/roles/bootstrap_server/tasks/create_bootstrap.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - -- hosts: bootstrap_server - become: true - tasks: - - - name: run bootstrap build sheel script - command: files/shell_scripts/create_bootstrap.sh - - - name: wait for bootstrap installation to complete - -## to wait for bootstrap creation before continuing, use one of these: -## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_module.html -## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_connection_module.html -## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/pause_module.html - - diff --git a/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml b/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml index df7c3303..a419cedf 100644 --- a/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml +++ b/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml @@ -3,14 +3,12 @@ - hosts: kvm_host become: true tasks: - - - name: create landing directory - file: - path: /var/lib/libvirt/images - state: directory + + - name: get dependencies from openshift.com + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz + dest: /var/lib/libvirt/images/ + - name: Unzip OCP dependencies - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - dest: /var/lib/libvirt/images - remote_src: yes + command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz From 9728988c70f56cd2b3ec691745373509985d5245 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 30 Jul 2021 16:59:53 -0400 Subject: [PATCH 146/885] fixed unarchiving prep_kvm_guests playbook. Create_boostrap playbook complete. Created shell scripts for control and compute nodes --- bootstrap.yml | 45 ------------------- ...create_bootstrap.sh => create_bootstrap.sh | 0 create_bootstrap.yaml | 20 +++++++++ files/shell_scripts/create_compute.sh | 6 +++ files/shell_scripts/create_control.sh | 9 ++++ .../tasks/create_bootstrap.yaml | 17 ------- .../kvm_host_s390x/tasks/prep_kvm_guests.yaml | 16 +++---- 7 files changed, 42 insertions(+), 71 deletions(-) delete mode 100644 bootstrap.yml rename files/shell_scripts/create_bootstrap.sh => create_bootstrap.sh (100%) create mode 100644 create_bootstrap.yaml create mode 100644 files/shell_scripts/create_compute.sh create mode 100644 files/shell_scripts/create_control.sh delete mode 100644 roles/bootstrap_server/tasks/create_bootstrap.yaml diff --git a/bootstrap.yml b/bootstrap.yml deleted file mode 100644 index 64b68c77..00000000 --- a/bootstrap.yml +++ /dev/null @@ -1,45 +0,0 @@ ---- - -- hosts: all - become: true - pre_tasks: - - - name: install updates (CentOS) - tags: always - dnf: - update_only: yes - update_cache: yes - when: ansible_distribution == "CentOS" - - - name: install updates (Ubuntu) - tags: always - apt: - upgrade: dist - update_cache: yes - when: ansible_distribution == "Ubuntu" - -- hosts: all - become: true - tasks: - - - name: create simone user - tags: always - user: - name: simone - groups: root - - - name: add ssh key for simone - tags: always - authorized_key: - user: simone - key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKHEBRxXjvVaYY8mg0S05qqUWJQaDLbzO4w5uwN8ogJ2 ansible" - - - name: add sudeoers file for simone - tags: always - copy: - src: sudoer_simone - dest: /etc/sudoers.d/simone - owner: root - group: root - mode: 0440 - diff --git a/files/shell_scripts/create_bootstrap.sh b/create_bootstrap.sh similarity index 100% rename from files/shell_scripts/create_bootstrap.sh rename to create_bootstrap.sh diff --git a/create_bootstrap.yaml b/create_bootstrap.yaml new file mode 100644 index 00000000..dd909726 --- /dev/null +++ b/create_bootstrap.yaml @@ -0,0 +1,20 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: virtualize bootstrap + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G + + - name: boot bootstrap + command: virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.root> + +## - name: wait for bootstrap installation to complete + +## to wait for bootstrap creation before continuing, use one of these: +## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_module.html +## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_connection_module.html +## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/pause_module.html + + diff --git a/files/shell_scripts/create_compute.sh b/files/shell_scripts/create_compute.sh new file mode 100644 index 00000000..e39cb1bf --- /dev/null +++ b/files/shell_scripts/create_compute.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> diff --git a/files/shell_scripts/create_control.sh b/files/shell_scripts/create_control.sh new file mode 100644 index 00000000..12a8536b --- /dev/null +++ b/files/shell_scripts/create_control.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G +qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G + +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> +virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> diff --git a/roles/bootstrap_server/tasks/create_bootstrap.yaml b/roles/bootstrap_server/tasks/create_bootstrap.yaml deleted file mode 100644 index 4406839d..00000000 --- a/roles/bootstrap_server/tasks/create_bootstrap.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - -- hosts: bootstrap_server - become: true - tasks: - - - name: run bootstrap build sheel script - command: files/shell_scripts/create_bootstrap.sh - - - name: wait for bootstrap installation to complete - -## to wait for bootstrap creation before continuing, use one of these: -## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_module.html -## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_connection_module.html -## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/pause_module.html - - diff --git a/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml b/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml index df7c3303..a419cedf 100644 --- a/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml +++ b/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml @@ -3,14 +3,12 @@ - hosts: kvm_host become: true tasks: - - - name: create landing directory - file: - path: /var/lib/libvirt/images - state: directory + + - name: get dependencies from openshift.com + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz + dest: /var/lib/libvirt/images/ + - name: Unzip OCP dependencies - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - dest: /var/lib/libvirt/images - remote_src: yes + command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz From 0e5da94f025529a65f2bfbdcc70f1d571fdfad08 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 17:06:20 -0500 Subject: [PATCH 147/885] added additioal bootstrap playbook for test --- pw-bootstrap.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 pw-bootstrap.yaml diff --git a/pw-bootstrap.yaml b/pw-bootstrap.yaml new file mode 100644 index 00000000..38992d67 --- /dev/null +++ b/pw-bootstrap.yaml @@ -0,0 +1,12 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: virtualize bootstrap + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G + + - name: boot bootstrap + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + \ No newline at end of file From 51c162e880831e328a76953cbe092d1b0de1da04 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 17:06:20 -0500 Subject: [PATCH 148/885] added additioal bootstrap playbook for test --- pw-bootstrap.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 pw-bootstrap.yaml diff --git a/pw-bootstrap.yaml b/pw-bootstrap.yaml new file mode 100644 index 00000000..38992d67 --- /dev/null +++ b/pw-bootstrap.yaml @@ -0,0 +1,12 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: virtualize bootstrap + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G + + - name: boot bootstrap + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + \ No newline at end of file From 385cdb6c7813024e3914c379a48dcb82ab8ff5f9 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 17:11:55 -0500 Subject: [PATCH 149/885] changed command for booting bootstrap --- pw-bootstrap.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pw-bootstrap.yaml b/pw-bootstrap.yaml index 38992d67..e464ce56 100644 --- a/pw-bootstrap.yaml +++ b/pw-bootstrap.yaml @@ -8,5 +8,5 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - name: boot bootstrap - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - \ No newline at end of file +# command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.root \ No newline at end of file From 827e68bebc475393dd6ef4cf9a00b0263fc7476f Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 17:11:55 -0500 Subject: [PATCH 150/885] changed command for booting bootstrap --- pw-bootstrap.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pw-bootstrap.yaml b/pw-bootstrap.yaml index 38992d67..e464ce56 100644 --- a/pw-bootstrap.yaml +++ b/pw-bootstrap.yaml @@ -8,5 +8,5 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - name: boot bootstrap - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - \ No newline at end of file +# command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.root \ No newline at end of file From 562bd7696d7ab6761cc75dc35b5142917cb71130 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 18:11:25 -0500 Subject: [PATCH 151/885] fix virt-install line --- pw-bootstrap.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pw-bootstrap.yaml b/pw-bootstrap.yaml index e464ce56..701f6177 100644 --- a/pw-bootstrap.yaml +++ b/pw-bootstrap.yaml @@ -8,5 +8,5 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - name: boot bootstrap -# command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - command: virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.root \ No newline at end of file + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +# command: virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.root \ No newline at end of file From 3fe63a411c6e7d7c74f07466f0f7e0f7ee533d01 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 18:11:25 -0500 Subject: [PATCH 152/885] fix virt-install line --- pw-bootstrap.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pw-bootstrap.yaml b/pw-bootstrap.yaml index e464ce56..701f6177 100644 --- a/pw-bootstrap.yaml +++ b/pw-bootstrap.yaml @@ -8,5 +8,5 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - name: boot bootstrap -# command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - command: virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.root \ No newline at end of file + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +# command: virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.root \ No newline at end of file From 0bc6ca3e591c7a8e2edeeac8e91ce2c58362a617 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 18:24:55 -0500 Subject: [PATCH 153/885] changed sshkey to root on bastion --- files/install-config.yaml | 2 +- .../bastion_s390x/tasks/files/install-config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/files/install-config.yaml b/files/install-config.yaml index ba837ca2..c9134ad7 100644 --- a/files/install-config.yaml +++ b/files/install-config.yaml @@ -23,4 +23,4 @@ platform: none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible' \ No newline at end of file +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDppZ1cEGLhC3T9rvex+ZTVq4V1IKgSUYclRkZPNFPyrMjUw+scSVw5O4wlgnhZF2ZB6XdTPKUOolvNv1TH6YdtMOulAsJ6mvs2KnRW2Sd1jdw9dJ3ijkMOAz2CBHvt9q4r0u+87WaAAMlGvHAEqCJvBawPTtYkNSXTDp7dagb2wt8+/j3HI+em7LSY4obNtYmXHsQLiEYSPQMcalHPmbgpTLerST19x214DiDCfHHkk2ek+BEdtgQEJNIc6ufyrwk6KUVv7MCgCyQB2o5r3G/KX31Va3hyRRAca1MDI3Ee+C5XpYfPZvJRhL/FKa2FRATffl7Kn3zZLH2ZtOV4Cs2zebjX0dOGUipyjnf/MacsTIeKK7Bm91IPgqgTSlcZbiIcYXoGSIO0u2pytzPJTUywmPvy2AP/NnPZn+NR39Zf/DYys8vWVPRWOKbJBSHDO0yikzH1xUt62+XS5Kl6kmPsQz3VEghA/lvVJ6KpW5PSgFXMkn8e+CcHV+bIO2zc5oc= root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file diff --git a/roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml b/roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml index ba837ca2..c9134ad7 100644 --- a/roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml @@ -23,4 +23,4 @@ platform: none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible' \ No newline at end of file +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDppZ1cEGLhC3T9rvex+ZTVq4V1IKgSUYclRkZPNFPyrMjUw+scSVw5O4wlgnhZF2ZB6XdTPKUOolvNv1TH6YdtMOulAsJ6mvs2KnRW2Sd1jdw9dJ3ijkMOAz2CBHvt9q4r0u+87WaAAMlGvHAEqCJvBawPTtYkNSXTDp7dagb2wt8+/j3HI+em7LSY4obNtYmXHsQLiEYSPQMcalHPmbgpTLerST19x214DiDCfHHkk2ek+BEdtgQEJNIc6ufyrwk6KUVv7MCgCyQB2o5r3G/KX31Va3hyRRAca1MDI3Ee+C5XpYfPZvJRhL/FKa2FRATffl7Kn3zZLH2ZtOV4Cs2zebjX0dOGUipyjnf/MacsTIeKK7Bm91IPgqgTSlcZbiIcYXoGSIO0u2pytzPJTUywmPvy2AP/NnPZn+NR39Zf/DYys8vWVPRWOKbJBSHDO0yikzH1xUt62+XS5Kl6kmPsQz3VEghA/lvVJ6KpW5PSgFXMkn8e+CcHV+bIO2zc5oc= root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file From 6827883d5245484bc1090b6fe435fddb37cdca56 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 18:24:55 -0500 Subject: [PATCH 154/885] changed sshkey to root on bastion --- files/install-config.yaml | 2 +- .../bastion_s390x/tasks/files/install-config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/files/install-config.yaml b/files/install-config.yaml index ba837ca2..c9134ad7 100644 --- a/files/install-config.yaml +++ b/files/install-config.yaml @@ -23,4 +23,4 @@ platform: none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible' \ No newline at end of file +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDppZ1cEGLhC3T9rvex+ZTVq4V1IKgSUYclRkZPNFPyrMjUw+scSVw5O4wlgnhZF2ZB6XdTPKUOolvNv1TH6YdtMOulAsJ6mvs2KnRW2Sd1jdw9dJ3ijkMOAz2CBHvt9q4r0u+87WaAAMlGvHAEqCJvBawPTtYkNSXTDp7dagb2wt8+/j3HI+em7LSY4obNtYmXHsQLiEYSPQMcalHPmbgpTLerST19x214DiDCfHHkk2ek+BEdtgQEJNIc6ufyrwk6KUVv7MCgCyQB2o5r3G/KX31Va3hyRRAca1MDI3Ee+C5XpYfPZvJRhL/FKa2FRATffl7Kn3zZLH2ZtOV4Cs2zebjX0dOGUipyjnf/MacsTIeKK7Bm91IPgqgTSlcZbiIcYXoGSIO0u2pytzPJTUywmPvy2AP/NnPZn+NR39Zf/DYys8vWVPRWOKbJBSHDO0yikzH1xUt62+XS5Kl6kmPsQz3VEghA/lvVJ6KpW5PSgFXMkn8e+CcHV+bIO2zc5oc= root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file diff --git a/roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml b/roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml index ba837ca2..c9134ad7 100644 --- a/roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml @@ -23,4 +23,4 @@ platform: none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDh3L0INDTn6OGPynIaHDuD245fAQsMY4vNjHP+edVxTOfIGwm6I77mMfT8b8RSZR6C7k+5Ye9QvNadtkxCb0iadzhZiGcwS4zlSTkOnU7HVaR8sQ24nk+Hmo57bUyEON8HlKg83hMlyorCCY2uP+pGJz8CJ+oqRPKBEDGm263+YHKIxokRLRa9cCAlNNvxqlpsyZcwid2GGYKBi/bbkJH+l/3Lc/QLfHzcHi64BzLHsiV9rsWPVqX8DNgs1+z4tOEiVMAXoI6F/rBFNQ1oofF1VyByIndp7XWyYDTbO7mrZayXCJzCGqo7W23Xn0Osx0zczYVigkEZDhBuOvIAseZET8vqzWjMf3DLQsf2Ai0RxJreOzf7MCUTs3YuvwKTsUjgVZHRaU2ZyPubbyPwgWMo0A/JjdZLRym7tQnMzszxJJcF66TS8CArX3+NbXOfm3O6LYxFUpAs1vAydc0hd5ciQ4Tb79Off9BZZ2ET7UKzXiXi9Vp2Izm91GBx88s/1crQSG5LgeQWltfCv2ak+VD88r3Y3kr4iK+kHLbCinKtp9mdRuoBpejZn01EHl3eSpJAsJIH56c1t1Pr6GOM6DX7FRh3DEQUfsRCsiVgjyo00B9i+NPruN5tKpN8OBbZB/fdEvEWpnbelj5WTf7IzGBmt7z48crD8zE+tae01Vj0Vw== ansible' \ No newline at end of file +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDppZ1cEGLhC3T9rvex+ZTVq4V1IKgSUYclRkZPNFPyrMjUw+scSVw5O4wlgnhZF2ZB6XdTPKUOolvNv1TH6YdtMOulAsJ6mvs2KnRW2Sd1jdw9dJ3ijkMOAz2CBHvt9q4r0u+87WaAAMlGvHAEqCJvBawPTtYkNSXTDp7dagb2wt8+/j3HI+em7LSY4obNtYmXHsQLiEYSPQMcalHPmbgpTLerST19x214DiDCfHHkk2ek+BEdtgQEJNIc6ufyrwk6KUVv7MCgCyQB2o5r3G/KX31Va3hyRRAca1MDI3Ee+C5XpYfPZvJRhL/FKa2FRATffl7Kn3zZLH2ZtOV4Cs2zebjX0dOGUipyjnf/MacsTIeKK7Bm91IPgqgTSlcZbiIcYXoGSIO0u2pytzPJTUywmPvy2AP/NnPZn+NR39Zf/DYys8vWVPRWOKbJBSHDO0yikzH1xUt62+XS5Kl6kmPsQz3VEghA/lvVJ6KpW5PSgFXMkn8e+CcHV+bIO2zc5oc= root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file From 84c9d6efdeb669078219e7c7919c46d83230f7f5 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 18:33:33 -0500 Subject: [PATCH 155/885] fixed ssh key in several files --- .../bastion_server/bastion_s390x/tasks/fill_install_config.yaml | 2 +- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml b/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml index 1b4156b6..567fd275 100644 --- a/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml @@ -41,4 +41,4 @@ none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' - sshKey: '' + sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDppZ1cEGLhC3T9rvex+ZTVq4V1IKgSUYclRkZPNFPyrMjUw+scSVw5O4wlgnhZF2ZB6XdTPKUOolvNv1TH6YdtMOulAsJ6mvs2KnRW2Sd1jdw9dJ3ijkMOAz2CBHvt9q4r0u+87WaAAMlGvHAEqCJvBawPTtYkNSXTDp7dagb2wt8+/j3HI+em7LSY4obNtYmXHsQLiEYSPQMcalHPmbgpTLerST19x214DiDCfHHkk2ek+BEdtgQEJNIc6ufyrwk6KUVv7MCgCyQB2o5r3G/KX31Va3hyRRAca1MDI3Ee+C5XpYfPZvJRhL/FKa2FRATffl7Kn3zZLH2ZtOV4Cs2zebjX0dOGUipyjnf/MacsTIeKK7Bm91IPgqgTSlcZbiIcYXoGSIO0u2pytzPJTUywmPvy2AP/NnPZn+NR39Zf/DYys8vWVPRWOKbJBSHDO0yikzH1xUt62+XS5Kl6kmPsQz3VEghA/lvVJ6KpW5PSgFXMkn8e+CcHV+bIO2zc5oc= root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 06ccc1f3..f2198677 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -46,7 +46,7 @@ group: root mode: '0755' - - name: Copy install-config.yml to ocp install directory + - name: Copy install-config.yaml to ocp install directory copy: src: install-config.yaml dest: /ocpinst/install-config.yaml From 0c3225c27e5f84c3f595c73aea93af7a9bf96737 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 18:33:33 -0500 Subject: [PATCH 156/885] fixed ssh key in several files --- .../bastion_server/bastion_s390x/tasks/fill_install_config.yaml | 2 +- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml b/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml index 1b4156b6..567fd275 100644 --- a/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml @@ -41,4 +41,4 @@ none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' - sshKey: '' + sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDppZ1cEGLhC3T9rvex+ZTVq4V1IKgSUYclRkZPNFPyrMjUw+scSVw5O4wlgnhZF2ZB6XdTPKUOolvNv1TH6YdtMOulAsJ6mvs2KnRW2Sd1jdw9dJ3ijkMOAz2CBHvt9q4r0u+87WaAAMlGvHAEqCJvBawPTtYkNSXTDp7dagb2wt8+/j3HI+em7LSY4obNtYmXHsQLiEYSPQMcalHPmbgpTLerST19x214DiDCfHHkk2ek+BEdtgQEJNIc6ufyrwk6KUVv7MCgCyQB2o5r3G/KX31Va3hyRRAca1MDI3Ee+C5XpYfPZvJRhL/FKa2FRATffl7Kn3zZLH2ZtOV4Cs2zebjX0dOGUipyjnf/MacsTIeKK7Bm91IPgqgTSlcZbiIcYXoGSIO0u2pytzPJTUywmPvy2AP/NnPZn+NR39Zf/DYys8vWVPRWOKbJBSHDO0yikzH1xUt62+XS5Kl6kmPsQz3VEghA/lvVJ6KpW5PSgFXMkn8e+CcHV+bIO2zc5oc= root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index 06ccc1f3..f2198677 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -46,7 +46,7 @@ group: root mode: '0755' - - name: Copy install-config.yml to ocp install directory + - name: Copy install-config.yaml to ocp install directory copy: src: install-config.yaml dest: /ocpinst/install-config.yaml From 866272f312e5b119cff8c3001f459ffe870a8aba Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 19:19:33 -0500 Subject: [PATCH 157/885] add chdir to command --- pw-bootstrap.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/pw-bootstrap.yaml b/pw-bootstrap.yaml index 701f6177..1b783d61 100644 --- a/pw-bootstrap.yaml +++ b/pw-bootstrap.yaml @@ -8,5 +8,6 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - name: boot bootstrap + chdir: /var/lib/libvirt/images command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole # command: virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.root \ No newline at end of file From c757dd9d82c8620b1712acf063f4e2d3a481bf9a Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 30 Jul 2021 19:19:33 -0500 Subject: [PATCH 158/885] add chdir to command --- pw-bootstrap.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/pw-bootstrap.yaml b/pw-bootstrap.yaml index 701f6177..1b783d61 100644 --- a/pw-bootstrap.yaml +++ b/pw-bootstrap.yaml @@ -8,5 +8,6 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - name: boot bootstrap + chdir: /var/lib/libvirt/images command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole # command: virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.root \ No newline at end of file From 58bc9f94f5532b27101d51c78ef1b1517644ecab Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Sat, 31 Jul 2021 11:34:09 -0500 Subject: [PATCH 159/885] add ocp pull secret file (remove later) --- files/pull-secret.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 files/pull-secret.txt diff --git a/files/pull-secret.txt b/files/pull-secret.txt new file mode 100644 index 00000000..ee0cd16d --- /dev/null +++ b/files/pull-secret.txt @@ -0,0 +1 @@ +{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}} From 90c36ed195a85ef7102b145582249047f4dc2542 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Sat, 31 Jul 2021 11:34:09 -0500 Subject: [PATCH 160/885] add ocp pull secret file (remove later) --- files/pull-secret.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 files/pull-secret.txt diff --git a/files/pull-secret.txt b/files/pull-secret.txt new file mode 100644 index 00000000..ee0cd16d --- /dev/null +++ b/files/pull-secret.txt @@ -0,0 +1 @@ +{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}} From 2047baa0aa8790f321e4cf84ab5f020cfd61bc26 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Sun, 1 Aug 2021 12:53:59 -0500 Subject: [PATCH 161/885] fixed build script to correct image names --- build_script.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/build_script.sh b/build_script.sh index 69666b66..799e6f28 100644 --- a/build_script.sh +++ b/build_script.sh @@ -10,14 +10,14 @@ #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From a0e87f7a1f800b14069c3063fd8ccd7c7121daaf Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Sun, 1 Aug 2021 12:53:59 -0500 Subject: [PATCH 162/885] fixed build script to correct image names --- build_script.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/build_script.sh b/build_script.sh index 69666b66..799e6f28 100644 --- a/build_script.sh +++ b/build_script.sh @@ -10,14 +10,14 @@ #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From 068f7886c867f1eb7b6d25128edbd57e1ddc3d7a Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Sun, 1 Aug 2021 15:54:50 -0500 Subject: [PATCH 163/885] added new haproxy.cfg for ocp v4.8 --- files/haproxy8.cfg | 58 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 files/haproxy8.cfg diff --git a/files/haproxy8.cfg b/files/haproxy8.cfg new file mode 100644 index 00000000..0b80783d --- /dev/null +++ b/files/haproxy8.cfg @@ -0,0 +1,58 @@ +global + log 127.0.0.1 local2 + pidfile /var/run/haproxy.pid + maxconn 4000 + daemon +defaults + mode http + log global + option dontlognull + option http-server-close + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 +frontend stats + bind *:1936 + mode http + log global + maxconn 10 + stats enable + stats hide-version + stats refresh 30s + stats show-node + stats show-desc Stats for distribution cluster + stats auth admin:distribution + stats uri /stats +listen api-server-6443 + bind *:6443 + mode tcp + server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s backup + server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s + server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s + server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s +listen machine-config-server-22623 + bind *:22623 + mode tcp + server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup + server master0 master0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server master1 master1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server master2 master2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s +listen ingress-router-443 + bind *:443 + mode tcp + balance source + server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s +listen ingress-router-80 + bind *:80 + mode tcp + balance source + server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s \ No newline at end of file From e111a5a7b30b08563e229b95c8a3f17d403f5502 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Sun, 1 Aug 2021 15:54:50 -0500 Subject: [PATCH 164/885] added new haproxy.cfg for ocp v4.8 --- files/haproxy8.cfg | 58 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 files/haproxy8.cfg diff --git a/files/haproxy8.cfg b/files/haproxy8.cfg new file mode 100644 index 00000000..0b80783d --- /dev/null +++ b/files/haproxy8.cfg @@ -0,0 +1,58 @@ +global + log 127.0.0.1 local2 + pidfile /var/run/haproxy.pid + maxconn 4000 + daemon +defaults + mode http + log global + option dontlognull + option http-server-close + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 +frontend stats + bind *:1936 + mode http + log global + maxconn 10 + stats enable + stats hide-version + stats refresh 30s + stats show-node + stats show-desc Stats for distribution cluster + stats auth admin:distribution + stats uri /stats +listen api-server-6443 + bind *:6443 + mode tcp + server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s backup + server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s + server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s + server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s +listen machine-config-server-22623 + bind *:22623 + mode tcp + server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup + server master0 master0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server master1 master1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server master2 master2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s +listen ingress-router-443 + bind *:443 + mode tcp + balance source + server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s +listen ingress-router-80 + bind *:80 + mode tcp + balance source + server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s \ No newline at end of file From 330d459f37097bb66bbf3da35d5077a7ab1f0e18 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Mon, 2 Aug 2021 08:51:26 -0500 Subject: [PATCH 165/885] Adding DNS record files for bastion DNS zone files added --- files/dns-text-rev.txt | 21 +++++++++++++++++++++ files/dns-text.txt | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 files/dns-text-rev.txt create mode 100644 files/dns-text.txt diff --git a/files/dns-text-rev.txt b/files/dns-text-rev.txt new file mode 100644 index 00000000..6000e368 --- /dev/null +++ b/files/dns-text-rev.txt @@ -0,0 +1,21 @@ +$TTL 86400 +@ IN SOA bastion-1.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com. ( + 2020011800 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) +;Name Server Information +@ IN NS bastion-1.ocp.home.local. +bastion IN A 9.60.87.139 + +;Reverse lookup for Name Server +139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. + +;PTR Record IP address to Hostname +138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. +137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. +136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. +135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. +134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. diff --git a/files/dns-text.txt b/files/dns-text.txt new file mode 100644 index 00000000..384e7dfa --- /dev/null +++ b/files/dns-text.txt @@ -0,0 +1,41 @@ +$TTL 86400 +@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( + 2020021821 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) + +;Name Server Information +@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. + +;IP Address for Name Server +bastion IN A 9.60.87.139 + +;entry for bootstrap host. +bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 + +;entry of your load balancer +haproxy IN A 9.60.87.139 + +;entries for the master nodes +control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 +control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 +control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 + +;entry for the bastion host +bastion IN A 9.60.87.139 + +;entries for the worker nodes +compute-0.ocp.home.local IN A 9.60.87.135 +compute-1.ocp.home.local IN A 9.60.87.134 + +;The api identifies the IP of your load balancer. +api.ocp.home.local IN CNAME haproxy.ocp.home.local. +api-int.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. + +;The wildcard also identifies the load balancer. +*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. + +;EOF \ No newline at end of file From 840d88ae91c30a6167c37608676b61c2619a4cd7 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Mon, 2 Aug 2021 08:51:26 -0500 Subject: [PATCH 166/885] Adding DNS record files for bastion DNS zone files added --- files/dns-text-rev.txt | 21 +++++++++++++++++++++ files/dns-text.txt | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+) create mode 100644 files/dns-text-rev.txt create mode 100644 files/dns-text.txt diff --git a/files/dns-text-rev.txt b/files/dns-text-rev.txt new file mode 100644 index 00000000..6000e368 --- /dev/null +++ b/files/dns-text-rev.txt @@ -0,0 +1,21 @@ +$TTL 86400 +@ IN SOA bastion-1.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com. ( + 2020011800 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) +;Name Server Information +@ IN NS bastion-1.ocp.home.local. +bastion IN A 9.60.87.139 + +;Reverse lookup for Name Server +139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. + +;PTR Record IP address to Hostname +138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. +137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. +136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. +135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. +134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. diff --git a/files/dns-text.txt b/files/dns-text.txt new file mode 100644 index 00000000..384e7dfa --- /dev/null +++ b/files/dns-text.txt @@ -0,0 +1,41 @@ +$TTL 86400 +@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( + 2020021821 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) + +;Name Server Information +@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. + +;IP Address for Name Server +bastion IN A 9.60.87.139 + +;entry for bootstrap host. +bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 + +;entry of your load balancer +haproxy IN A 9.60.87.139 + +;entries for the master nodes +control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 +control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 +control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 + +;entry for the bastion host +bastion IN A 9.60.87.139 + +;entries for the worker nodes +compute-0.ocp.home.local IN A 9.60.87.135 +compute-1.ocp.home.local IN A 9.60.87.134 + +;The api identifies the IP of your load balancer. +api.ocp.home.local IN CNAME haproxy.ocp.home.local. +api-int.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. + +;The wildcard also identifies the load balancer. +*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. + +;EOF \ No newline at end of file From c1b54236106dfb37a930c243674db8863666161a Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Mon, 2 Aug 2021 09:22:45 -0500 Subject: [PATCH 167/885] Create named.conf Added named.conf file for bastion dns server --- files/named.conf | 78 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 files/named.conf diff --git a/files/named.conf b/files/named.conf new file mode 100644 index 00000000..107460d0 --- /dev/null +++ b/files/named.conf @@ -0,0 +1,78 @@ +// +// named.conf +// +// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS +// server as a caching only nameserver (as a localhost DNS resolver only). +// +// See /usr/share/doc/bind*/sample/ for example named configuration files. +// + +options { +// listen-on port 53 { 127.0.0.1; }; + listen-on port 53 { any; }; + listen-on-v6 port 53 { ::1; }; + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + statistics-file "/var/named/data/named_stats.txt"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + secroots-file "/var/named/data/named.secroots"; + recursing-file "/var/named/data/named.recursing"; + allow-query { any; }; + + /* + - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. + - If you are building a RECURSIVE (caching) DNS server, you need to enable + recursion. + - If your recursive DNS server has a public IP address, you MUST enable access + control to limit queries to your legitimate users. Failing to do so will + cause your server to become part of large scale DNS amplification + attacks. Implementing BCP38 within your network would greatly + reduce such attack surface + */ + recursion yes; + + dnssec-enable no; + dnssec-validation no; + + managed-keys-directory "/var/named/dynamic"; + + pid-file "/run/named/named.pid"; + session-keyfile "/run/named/session.key"; + + /* https://fedoraproject.org/wiki/Changes/CryptoPolicy */ + include "/etc/crypto-policies/back-ends/bind.config"; +}; + +logging { + channel default_debug { + file "data/named.run"; + severity dynamic; + }; +}; + +zone "." IN { + type forward; + forwarders { 9.60.70.82; }; +// type hint; +// file "named.ca"; +}; + +include "/etc/named.rfc1912.zones"; +include "/etc/named.root.key"; + +//forward zone +zone "distribution.ocpz.wsclab.endicott.ibm.com" IN { + type master; + file "distribution.db"; + allow-update { any; }; + allow-query { any; }; +}; + +//backward zone +zone "87.60.9.in-addr.arpa" IN { + type master; + file "distribution.rev"; + allow-update { any; }; + allow-query { any; }; +}; + From bf0b5f0b14c36c5d7d272cae09bd0033c1f9edde Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Mon, 2 Aug 2021 09:22:45 -0500 Subject: [PATCH 168/885] Create named.conf Added named.conf file for bastion dns server --- files/named.conf | 78 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 files/named.conf diff --git a/files/named.conf b/files/named.conf new file mode 100644 index 00000000..107460d0 --- /dev/null +++ b/files/named.conf @@ -0,0 +1,78 @@ +// +// named.conf +// +// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS +// server as a caching only nameserver (as a localhost DNS resolver only). +// +// See /usr/share/doc/bind*/sample/ for example named configuration files. +// + +options { +// listen-on port 53 { 127.0.0.1; }; + listen-on port 53 { any; }; + listen-on-v6 port 53 { ::1; }; + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + statistics-file "/var/named/data/named_stats.txt"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + secroots-file "/var/named/data/named.secroots"; + recursing-file "/var/named/data/named.recursing"; + allow-query { any; }; + + /* + - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. + - If you are building a RECURSIVE (caching) DNS server, you need to enable + recursion. + - If your recursive DNS server has a public IP address, you MUST enable access + control to limit queries to your legitimate users. Failing to do so will + cause your server to become part of large scale DNS amplification + attacks. Implementing BCP38 within your network would greatly + reduce such attack surface + */ + recursion yes; + + dnssec-enable no; + dnssec-validation no; + + managed-keys-directory "/var/named/dynamic"; + + pid-file "/run/named/named.pid"; + session-keyfile "/run/named/session.key"; + + /* https://fedoraproject.org/wiki/Changes/CryptoPolicy */ + include "/etc/crypto-policies/back-ends/bind.config"; +}; + +logging { + channel default_debug { + file "data/named.run"; + severity dynamic; + }; +}; + +zone "." IN { + type forward; + forwarders { 9.60.70.82; }; +// type hint; +// file "named.ca"; +}; + +include "/etc/named.rfc1912.zones"; +include "/etc/named.root.key"; + +//forward zone +zone "distribution.ocpz.wsclab.endicott.ibm.com" IN { + type master; + file "distribution.db"; + allow-update { any; }; + allow-query { any; }; +}; + +//backward zone +zone "87.60.9.in-addr.arpa" IN { + type master; + file "distribution.rev"; + allow-update { any; }; + allow-query { any; }; +}; + From 1820fee2e00f6c68f2d56990652efa7a1957fbd3 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Mon, 2 Aug 2021 09:44:58 -0500 Subject: [PATCH 169/885] fixed build script for new dns and reverse dns config file --- build_script.sh | 12 ++++++------ files/dns-text-rev.txt | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/build_script.sh b/build_script.sh index 799e6f28..0505a2df 100644 --- a/build_script.sh +++ b/build_script.sh @@ -10,14 +10,14 @@ #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/files/dns-text-rev.txt b/files/dns-text-rev.txt index 6000e368..7202af7c 100644 --- a/files/dns-text-rev.txt +++ b/files/dns-text-rev.txt @@ -7,7 +7,7 @@ $TTL 86400 86400 ;Minimum TTL ) ;Name Server Information -@ IN NS bastion-1.ocp.home.local. +@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. bastion IN A 9.60.87.139 ;Reverse lookup for Name Server From 2cfba71c65e896b11bcc819b28dad1eb5eeacf23 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Mon, 2 Aug 2021 09:44:58 -0500 Subject: [PATCH 170/885] fixed build script for new dns and reverse dns config file --- build_script.sh | 12 ++++++------ files/dns-text-rev.txt | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/build_script.sh b/build_script.sh index 799e6f28..0505a2df 100644 --- a/build_script.sh +++ b/build_script.sh @@ -10,14 +10,14 @@ #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/files/dns-text-rev.txt b/files/dns-text-rev.txt index 6000e368..7202af7c 100644 --- a/files/dns-text-rev.txt +++ b/files/dns-text-rev.txt @@ -7,7 +7,7 @@ $TTL 86400 86400 ;Minimum TTL ) ;Name Server Information -@ IN NS bastion-1.ocp.home.local. +@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. bastion IN A 9.60.87.139 ;Reverse lookup for Name Server From ded8269afb931696ce128468a491113009ce3598 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 2 Aug 2021 11:56:36 -0400 Subject: [PATCH 171/885] created start_bootrap_install.yaml and started working on bootstrap_wait and bootstrap_verify playbooks --- .../bootstrap_server/tasks/bootstrap_verify.yaml | 16 ++++++++++++++++ roles/bootstrap_server/tasks/bootstrap_wait.yaml | 11 +++++++++++ start_bootstrap_install.yaml | 10 ++++++++++ 3 files changed, 37 insertions(+) create mode 100644 roles/bootstrap_server/tasks/bootstrap_verify.yaml create mode 100644 roles/bootstrap_server/tasks/bootstrap_wait.yaml create mode 100644 start_bootstrap_install.yaml diff --git a/roles/bootstrap_server/tasks/bootstrap_verify.yaml b/roles/bootstrap_server/tasks/bootstrap_verify.yaml new file mode 100644 index 00000000..40a325fb --- /dev/null +++ b/roles/bootstrap_server/tasks/bootstrap_verify.yaml @@ -0,0 +1,16 @@ +--- + +- hosts: bootstrap_server + become: true + tasks: + + - name: verify bootstrap creation + ansible.builtin.systemd: + state: started + name: bootstrap +##not ready + - name: verify bootstrap install process + ansible.builtin.systemd: + state: + name: bootkube.service + diff --git a/roles/bootstrap_server/tasks/bootstrap_wait.yaml b/roles/bootstrap_server/tasks/bootstrap_wait.yaml new file mode 100644 index 00000000..e23af586 --- /dev/null +++ b/roles/bootstrap_server/tasks/bootstrap_wait.yaml @@ -0,0 +1,11 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: wait for bootstrap to be created + wait_for_connection: + delay: 10 + sleep: 10 + timeout: 300 diff --git a/start_bootstrap_install.yaml b/start_bootstrap_install.yaml new file mode 100644 index 00000000..3b21874a --- /dev/null +++ b/start_bootstrap_install.yaml @@ -0,0 +1,10 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: start bootstrap install process + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images From f626ca568814b895fcab84d0780ecf815aae976c Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 2 Aug 2021 11:56:36 -0400 Subject: [PATCH 172/885] created start_bootrap_install.yaml and started working on bootstrap_wait and bootstrap_verify playbooks --- .../bootstrap_server/tasks/bootstrap_verify.yaml | 16 ++++++++++++++++ roles/bootstrap_server/tasks/bootstrap_wait.yaml | 11 +++++++++++ start_bootstrap_install.yaml | 10 ++++++++++ 3 files changed, 37 insertions(+) create mode 100644 roles/bootstrap_server/tasks/bootstrap_verify.yaml create mode 100644 roles/bootstrap_server/tasks/bootstrap_wait.yaml create mode 100644 start_bootstrap_install.yaml diff --git a/roles/bootstrap_server/tasks/bootstrap_verify.yaml b/roles/bootstrap_server/tasks/bootstrap_verify.yaml new file mode 100644 index 00000000..40a325fb --- /dev/null +++ b/roles/bootstrap_server/tasks/bootstrap_verify.yaml @@ -0,0 +1,16 @@ +--- + +- hosts: bootstrap_server + become: true + tasks: + + - name: verify bootstrap creation + ansible.builtin.systemd: + state: started + name: bootstrap +##not ready + - name: verify bootstrap install process + ansible.builtin.systemd: + state: + name: bootkube.service + diff --git a/roles/bootstrap_server/tasks/bootstrap_wait.yaml b/roles/bootstrap_server/tasks/bootstrap_wait.yaml new file mode 100644 index 00000000..e23af586 --- /dev/null +++ b/roles/bootstrap_server/tasks/bootstrap_wait.yaml @@ -0,0 +1,11 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: wait for bootstrap to be created + wait_for_connection: + delay: 10 + sleep: 10 + timeout: 300 diff --git a/start_bootstrap_install.yaml b/start_bootstrap_install.yaml new file mode 100644 index 00000000..3b21874a --- /dev/null +++ b/start_bootstrap_install.yaml @@ -0,0 +1,10 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: start bootstrap install process + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images From ba25218c9707728a15c95d6d813a48980ba4de43 Mon Sep 17 00:00:00 2001 From: zoltan Date: Mon, 2 Aug 2021 13:07:05 -0400 Subject: [PATCH 173/885] adding DNS db files that work for our zone --- files/distribution.db | 47 ++++++++++++++++++++++++++++++++++++++++++ files/distribution.rev | 28 +++++++++++++++++++++++++ 2 files changed, 75 insertions(+) create mode 100644 files/distribution.db create mode 100644 files/distribution.rev diff --git a/files/distribution.db b/files/distribution.db new file mode 100644 index 00000000..45615303 --- /dev/null +++ b/files/distribution.db @@ -0,0 +1,47 @@ +$TTL 86400 +@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( + 2020021821 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) + +;Name Server Information +@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. + +;IP Address for Name Server +bastion IN A 9.60.87.139 + +;entry for bootstrap host. +bootstrap IN A 9.60.87.133 +bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 + +;entry of your load balancer +haproxy IN A 9.60.87.139 + +;entries for the master nodes +control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 +control-0 IN A 9.60.87.138 +control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 +control-1 IN A 9.60.87.137 +control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 +control-2 IN A 9.60.87.136 + +;entry for the bastion host +bastion IN A 9.60.87.139 + +;entries for the worker nodes +compute-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.135 +compute-0 IN A 9.60.87.135 +compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 +compute-1 IN A 9.60.87.134 + +;The api identifies the IP of your load balancer. +api.ocp.home.local IN CNAME haproxy.ocp.home.local. +api-int.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. + +;The wildcard also identifies the load balancer. +*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. + +;EOF diff --git a/files/distribution.rev b/files/distribution.rev new file mode 100644 index 00000000..d6653ed8 --- /dev/null +++ b/files/distribution.rev @@ -0,0 +1,28 @@ +$TTL 86400 +@ IN SOA bastion-1.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com. ( + 2020011800 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) +;Name Server Information +@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. +bastion IN A 9.60.87.139 + +;Reverse lookup for Name Server +139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. + +;PTR Record IP address to Hostname +138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. +137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. +136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. +135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. +134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. +133 IN PTR bootstrap.distribution.ocpz.wsclab.endicott.ibm.com. +133 IN PTR bootstrap +138 IN PTR control-0 +137 IN PTR control-1 +136 IN PTR control-2 +135 IN PTR compute-0 +134 IN PTR compute-1 From 8d0d62d94812b903abf3803b6a0bd819f4da8ec4 Mon Sep 17 00:00:00 2001 From: zoltan Date: Mon, 2 Aug 2021 13:07:05 -0400 Subject: [PATCH 174/885] adding DNS db files that work for our zone --- files/distribution.db | 47 ++++++++++++++++++++++++++++++++++++++++++ files/distribution.rev | 28 +++++++++++++++++++++++++ 2 files changed, 75 insertions(+) create mode 100644 files/distribution.db create mode 100644 files/distribution.rev diff --git a/files/distribution.db b/files/distribution.db new file mode 100644 index 00000000..45615303 --- /dev/null +++ b/files/distribution.db @@ -0,0 +1,47 @@ +$TTL 86400 +@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( + 2020021821 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) + +;Name Server Information +@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. + +;IP Address for Name Server +bastion IN A 9.60.87.139 + +;entry for bootstrap host. +bootstrap IN A 9.60.87.133 +bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 + +;entry of your load balancer +haproxy IN A 9.60.87.139 + +;entries for the master nodes +control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 +control-0 IN A 9.60.87.138 +control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 +control-1 IN A 9.60.87.137 +control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 +control-2 IN A 9.60.87.136 + +;entry for the bastion host +bastion IN A 9.60.87.139 + +;entries for the worker nodes +compute-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.135 +compute-0 IN A 9.60.87.135 +compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 +compute-1 IN A 9.60.87.134 + +;The api identifies the IP of your load balancer. +api.ocp.home.local IN CNAME haproxy.ocp.home.local. +api-int.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. + +;The wildcard also identifies the load balancer. +*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. + +;EOF diff --git a/files/distribution.rev b/files/distribution.rev new file mode 100644 index 00000000..d6653ed8 --- /dev/null +++ b/files/distribution.rev @@ -0,0 +1,28 @@ +$TTL 86400 +@ IN SOA bastion-1.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com. ( + 2020011800 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) +;Name Server Information +@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. +bastion IN A 9.60.87.139 + +;Reverse lookup for Name Server +139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. + +;PTR Record IP address to Hostname +138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. +137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. +136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. +135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. +134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. +133 IN PTR bootstrap.distribution.ocpz.wsclab.endicott.ibm.com. +133 IN PTR bootstrap +138 IN PTR control-0 +137 IN PTR control-1 +136 IN PTR control-2 +135 IN PTR compute-0 +134 IN PTR compute-1 From 674feaa7eab5ca29c3c619391f490d7b57c8fbad Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 2 Aug 2021 13:22:10 -0400 Subject: [PATCH 175/885] added create_nodes.yaml to virtualize and install OS on control and compute nodes --- .../kvm_host_s390x/tasks/create_nodes.yaml | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml diff --git a/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml b/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml new file mode 100644 index 00000000..2ed925a5 --- /dev/null +++ b/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml @@ -0,0 +1,45 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: virtualize control-0 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G + + - name: virtualize control-1 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G + + - name: virtualize control-2 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G + + - name: virtualize compute-0 node + qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G + + - name: virtualize compute-1 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G + + - name: install CoreOS on control-0 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: install CoreOS on control-1 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: install CoreOS on control-2 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: install CoreOS on compute-0 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: install CoreOS on compute-1 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images From 57d063d6c542c9db3b292499a1b0a658aa03efd1 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 2 Aug 2021 13:22:10 -0400 Subject: [PATCH 176/885] added create_nodes.yaml to virtualize and install OS on control and compute nodes --- .../kvm_host_s390x/tasks/create_nodes.yaml | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml diff --git a/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml b/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml new file mode 100644 index 00000000..2ed925a5 --- /dev/null +++ b/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml @@ -0,0 +1,45 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: virtualize control-0 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G + + - name: virtualize control-1 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G + + - name: virtualize control-2 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G + + - name: virtualize compute-0 node + qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G + + - name: virtualize compute-1 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G + + - name: install CoreOS on control-0 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: install CoreOS on control-1 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: install CoreOS on control-2 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: install CoreOS on compute-0 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: install CoreOS on compute-1 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images From 5972ad047b0e33c9b5a140869806b1f345d9fb52 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Mon, 2 Aug 2021 14:42:28 -0500 Subject: [PATCH 177/885] added 2vcpus and 4GB of ram to bastion vm --- .../bastion_s390x/tasks/start_bastion_install.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml b/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml index 4e81c48b..4a1dfece 100644 --- a/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml @@ -7,8 +7,8 @@ - name: start bastion install process community.libvirt.virt: name: bastion - memory: 4096 - vcpus: 2 + memory: 8172 + vcpus: 4 disk size: 30 cdrom: /var/lib/libvirt/images/rhel83.iso accelerate: yes From f565a9ba71359673b4a8e8770e91e3a7661fcff2 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Mon, 2 Aug 2021 14:42:28 -0500 Subject: [PATCH 178/885] added 2vcpus and 4GB of ram to bastion vm --- .../bastion_s390x/tasks/start_bastion_install.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml b/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml index 4e81c48b..4a1dfece 100644 --- a/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml @@ -7,8 +7,8 @@ - name: start bastion install process community.libvirt.virt: name: bastion - memory: 4096 - vcpus: 2 + memory: 8172 + vcpus: 4 disk size: 30 cdrom: /var/lib/libvirt/images/rhel83.iso accelerate: yes From 5dfc6282ddb9035d0721ace42c5a830d77c2f219 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 2 Aug 2021 15:45:31 -0400 Subject: [PATCH 179/885] Made edits to create_nodes.yaml, create_bootrap.yaml and get-ocp.yaml --- create_bootstrap.yaml | 11 +++-------- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 4 ++-- roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml | 6 +++++- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/create_bootstrap.yaml b/create_bootstrap.yaml index dd909726..751af0a5 100644 --- a/create_bootstrap.yaml +++ b/create_bootstrap.yaml @@ -8,13 +8,8 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - name: boot bootstrap - command: virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.root> - -## - name: wait for bootstrap installation to complete - -## to wait for bootstrap creation before continuing, use one of these: -## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_module.html -## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_connection_module.html -## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/pause_module.html + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index f2198677..809d23c4 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -52,7 +52,7 @@ dest: /ocpinst/install-config.yaml - name: Create Manifests - ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ + command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes - name: Set mastersSchedulable parameter to False @@ -62,7 +62,7 @@ replace: ': false' - name: Create Ignition files - ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ + command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes - name: create Ignition directory on webserver diff --git a/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml b/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml index 2ed925a5..637748df 100644 --- a/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml +++ b/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml @@ -14,7 +14,7 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G - name: virtualize compute-0 node - qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G - name: virtualize compute-1 node command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G @@ -34,6 +34,10 @@ args: chdir: /var/lib/libvirt/images + - name: pause 5 minutes + pause: + minutes: 5 + - name: install CoreOS on compute-0 node command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: From 3769eb4b4b9e5ce39c1b3c89e603860457ca4309 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 2 Aug 2021 15:45:31 -0400 Subject: [PATCH 180/885] Made edits to create_nodes.yaml, create_bootrap.yaml and get-ocp.yaml --- create_bootstrap.yaml | 11 +++-------- roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml | 4 ++-- roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml | 6 +++++- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/create_bootstrap.yaml b/create_bootstrap.yaml index dd909726..751af0a5 100644 --- a/create_bootstrap.yaml +++ b/create_bootstrap.yaml @@ -8,13 +8,8 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - name: boot bootstrap - command: virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.root> - -## - name: wait for bootstrap installation to complete - -## to wait for bootstrap creation before continuing, use one of these: -## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_module.html -## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/wait_for_connection_module.html -## https://docs.ansible.com/ansible/latest/collections/ansible/builtin/pause_module.html + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml index f2198677..809d23c4 100644 --- a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml @@ -52,7 +52,7 @@ dest: /ocpinst/install-config.yaml - name: Create Manifests - ansible.builtin.command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ + command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes - name: Set mastersSchedulable parameter to False @@ -62,7 +62,7 @@ replace: ': false' - name: Create Ignition files - ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ + command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes - name: create Ignition directory on webserver diff --git a/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml b/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml index 2ed925a5..637748df 100644 --- a/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml +++ b/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml @@ -14,7 +14,7 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G - name: virtualize compute-0 node - qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G - name: virtualize compute-1 node command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G @@ -34,6 +34,10 @@ args: chdir: /var/lib/libvirt/images + - name: pause 5 minutes + pause: + minutes: 5 + - name: install CoreOS on compute-0 node command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: From d3efde99fcc4d80c536f343125b03dff0dbe324f Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 2 Aug 2021 15:52:19 -0400 Subject: [PATCH 181/885] added waits between node installs --- .../kvm_host_s390x/tasks/create_nodes.yaml | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml b/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml index 637748df..83eef633 100644 --- a/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml +++ b/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml @@ -24,25 +24,37 @@ args: chdir: /var/lib/libvirt/images + - name: pause 8 minutes + pause: + minutes: 8 + - name: install CoreOS on control-1 node command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images + - name: pause 8 minutes + pause: + minutes: 8 + - name: install CoreOS on control-2 node command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images - - name: pause 5 minutes + - name: pause 8 minutes pause: - minutes: 5 + minutes: 8 - name: install CoreOS on compute-0 node command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images + - name: pause 8 minutes + pause: + minutes: 8 + - name: install CoreOS on compute-1 node command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: From 3881acd9dfd27728a7eac68e005fa1f303e5c904 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 2 Aug 2021 15:52:19 -0400 Subject: [PATCH 182/885] added waits between node installs --- .../kvm_host_s390x/tasks/create_nodes.yaml | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml b/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml index 637748df..83eef633 100644 --- a/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml +++ b/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml @@ -24,25 +24,37 @@ args: chdir: /var/lib/libvirt/images + - name: pause 8 minutes + pause: + minutes: 8 + - name: install CoreOS on control-1 node command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images + - name: pause 8 minutes + pause: + minutes: 8 + - name: install CoreOS on control-2 node command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images - - name: pause 5 minutes + - name: pause 8 minutes pause: - minutes: 5 + minutes: 8 - name: install CoreOS on compute-0 node command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images + - name: pause 8 minutes + pause: + minutes: 8 + - name: install CoreOS on compute-1 node command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: From 9c78b05eb40828b1d647db59410e08b4bb20a312 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 3 Aug 2021 10:46:14 -0400 Subject: [PATCH 183/885] added updated files from bastion --- files/distribution.db | 10 +++-- files/haproxy.cfg | 91 ++++++++++++++++++++++++++----------------- files/haproxy8.cfg | 8 ++-- 3 files changed, 67 insertions(+), 42 deletions(-) diff --git a/files/distribution.db b/files/distribution.db index 45615303..dc707e61 100644 --- a/files/distribution.db +++ b/files/distribution.db @@ -19,6 +19,7 @@ bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 ;entry of your load balancer haproxy IN A 9.60.87.139 +haproxy.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 ;entries for the master nodes control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 @@ -38,10 +39,13 @@ compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 compute-1 IN A 9.60.87.134 ;The api identifies the IP of your load balancer. -api.ocp.home.local IN CNAME haproxy.ocp.home.local. -api-int.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. +api.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 +api IN A 9.60.87.139 +api-int.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 +api-int IN A 9.60.87.139 ;The wildcard also identifies the load balancer. -*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. +*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 +*.apps IN A 9.60.87.139 ;EOF diff --git a/files/haproxy.cfg b/files/haproxy.cfg index fcec60f0..f7b1f7f0 100644 --- a/files/haproxy.cfg +++ b/files/haproxy.cfg @@ -1,37 +1,58 @@ global - daemon - maxconn 256 - + log 127.0.0.1 local2 + pidfile /var/run/haproxy.pid + maxconn 4000 + daemon defaults - mode http - timeout connect 10s - timeout client 1m - timeout server 1m - -listen ingress-http - bind *:80 - mode tcp - server compute-0 9.60.87.135:80 check - server compute-1 9.60.87.134:80 check - -listen ingress-https - bind *:443 - mode tcp - server compute-0 9.60.87.135:443 check - server compute-1 9.60.87.134:443 check - -listen api - bind *:6443 - mode tcp - server bootstrap 9.60.87.133:6443 check - server control-0 9.60.87.138:6443 check - server control-1 9.60.87.137:6443 check - server control-2 9.60.87.136:6443 check - -listen api-int - bind *:22623 - mode tcp - server bootstrap 9.60.87.133:22623 check - server control-0 9.60.87.138:22623 check - server control-1 9.60.87.137:22623 check - server control-2 9.60.87.136:22623 check + mode http + log global + option dontlognull + option http-server-close + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 +frontend stats + bind *:1936 + mode http + log global + maxconn 10 + stats enable + stats hide-version + stats refresh 30s + stats show-node + stats show-desc Stats for distribution cluster + stats auth admin:distribution + stats uri /stats +listen api-server-6443 + bind *:6443 + mode tcp + server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s backup + server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s + server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s + server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s +listen machine-config-server-22623 + bind *:22623 + mode tcp + server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup + server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s +listen ingress-router-443 + bind *:443 + mode tcp + balance source + server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s +listen ingress-router-80 + bind *:80 + mode tcp + balance source + server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s diff --git a/files/haproxy8.cfg b/files/haproxy8.cfg index 0b80783d..f7b1f7f0 100644 --- a/files/haproxy8.cfg +++ b/files/haproxy8.cfg @@ -41,9 +41,9 @@ listen machine-config-server-22623 bind *:22623 mode tcp server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup - server master0 master0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server master1 master1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server master2 master2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s listen ingress-router-443 bind *:443 mode tcp @@ -55,4 +55,4 @@ listen ingress-router-80 mode tcp balance source server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s \ No newline at end of file + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s From db39086f20dd949c03d40bfd0b2a13f9d1db6624 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 3 Aug 2021 10:46:14 -0400 Subject: [PATCH 184/885] added updated files from bastion --- files/distribution.db | 10 +++-- files/haproxy.cfg | 91 ++++++++++++++++++++++++++----------------- files/haproxy8.cfg | 8 ++-- 3 files changed, 67 insertions(+), 42 deletions(-) diff --git a/files/distribution.db b/files/distribution.db index 45615303..dc707e61 100644 --- a/files/distribution.db +++ b/files/distribution.db @@ -19,6 +19,7 @@ bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 ;entry of your load balancer haproxy IN A 9.60.87.139 +haproxy.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 ;entries for the master nodes control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 @@ -38,10 +39,13 @@ compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 compute-1 IN A 9.60.87.134 ;The api identifies the IP of your load balancer. -api.ocp.home.local IN CNAME haproxy.ocp.home.local. -api-int.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. +api.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 +api IN A 9.60.87.139 +api-int.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 +api-int IN A 9.60.87.139 ;The wildcard also identifies the load balancer. -*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. +*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 +*.apps IN A 9.60.87.139 ;EOF diff --git a/files/haproxy.cfg b/files/haproxy.cfg index fcec60f0..f7b1f7f0 100644 --- a/files/haproxy.cfg +++ b/files/haproxy.cfg @@ -1,37 +1,58 @@ global - daemon - maxconn 256 - + log 127.0.0.1 local2 + pidfile /var/run/haproxy.pid + maxconn 4000 + daemon defaults - mode http - timeout connect 10s - timeout client 1m - timeout server 1m - -listen ingress-http - bind *:80 - mode tcp - server compute-0 9.60.87.135:80 check - server compute-1 9.60.87.134:80 check - -listen ingress-https - bind *:443 - mode tcp - server compute-0 9.60.87.135:443 check - server compute-1 9.60.87.134:443 check - -listen api - bind *:6443 - mode tcp - server bootstrap 9.60.87.133:6443 check - server control-0 9.60.87.138:6443 check - server control-1 9.60.87.137:6443 check - server control-2 9.60.87.136:6443 check - -listen api-int - bind *:22623 - mode tcp - server bootstrap 9.60.87.133:22623 check - server control-0 9.60.87.138:22623 check - server control-1 9.60.87.137:22623 check - server control-2 9.60.87.136:22623 check + mode http + log global + option dontlognull + option http-server-close + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 +frontend stats + bind *:1936 + mode http + log global + maxconn 10 + stats enable + stats hide-version + stats refresh 30s + stats show-node + stats show-desc Stats for distribution cluster + stats auth admin:distribution + stats uri /stats +listen api-server-6443 + bind *:6443 + mode tcp + server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s backup + server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s + server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s + server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s +listen machine-config-server-22623 + bind *:22623 + mode tcp + server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup + server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s +listen ingress-router-443 + bind *:443 + mode tcp + balance source + server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s +listen ingress-router-80 + bind *:80 + mode tcp + balance source + server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s diff --git a/files/haproxy8.cfg b/files/haproxy8.cfg index 0b80783d..f7b1f7f0 100644 --- a/files/haproxy8.cfg +++ b/files/haproxy8.cfg @@ -41,9 +41,9 @@ listen machine-config-server-22623 bind *:22623 mode tcp server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup - server master0 master0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server master1 master1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server master2 master2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s listen ingress-router-443 bind *:443 mode tcp @@ -55,4 +55,4 @@ listen ingress-router-80 mode tcp balance source server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s \ No newline at end of file + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s From 43aa1e5faf16489c0d6b44f728b316cf02ad700e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 3 Aug 2021 13:17:58 -0400 Subject: [PATCH 185/885] moved create_bastion.yaml file to kvm_host roles folder and modified it slightly. Still not perfect. --- .../kvm_host/kvm_host_s390x/tasks/create_bastion.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml diff --git a/roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml b/roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml new file mode 100644 index 00000000..0468cc71 --- /dev/null +++ b/roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml @@ -0,0 +1,11 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: virtualize bastion server + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G + + - name: start bastion install + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From 935e994d241ac35745cae1f2a525a8f9584c14ce Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 3 Aug 2021 13:17:58 -0400 Subject: [PATCH 186/885] moved create_bastion.yaml file to kvm_host roles folder and modified it slightly. Still not perfect. --- .../kvm_host/kvm_host_s390x/tasks/create_bastion.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml diff --git a/roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml b/roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml new file mode 100644 index 00000000..0468cc71 --- /dev/null +++ b/roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml @@ -0,0 +1,11 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: virtualize bastion server + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G + + - name: start bastion install + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From e922d0797c7151166d86d52e27570764f79e7cf5 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 3 Aug 2021 13:26:15 -0400 Subject: [PATCH 187/885] edited load_balancer.yaml so that haproxy starts automatically --- roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml index e238a17b..35849dc0 100644 --- a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml @@ -17,5 +17,5 @@ - name: Start haproxy systemd: - state: started + state: restarted name: haproxy From a4eac24bd4722bad53d9e231d0eb4f11a31c7d8e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 3 Aug 2021 13:26:15 -0400 Subject: [PATCH 188/885] edited load_balancer.yaml so that haproxy starts automatically --- roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml index e238a17b..35849dc0 100644 --- a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml +++ b/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml @@ -17,5 +17,5 @@ - name: Start haproxy systemd: - state: started + state: restarted name: haproxy From f62f9ff28470c80f8e7dd897e893b811fec00048 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 3 Aug 2021 13:53:15 -0400 Subject: [PATCH 189/885] Created a playbook to connect the cluster and approve certificate signing requests, step 3 page 25 of Felipe's document. --- .../bastion_s390x/tasks/connect_cluster.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 roles/bastion_server/bastion_s390x/tasks/connect_cluster.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/connect_cluster.yaml b/roles/bastion_server/bastion_s390x/tasks/connect_cluster.yaml new file mode 100644 index 00000000..82c0ca2f --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/connect_cluster.yaml @@ -0,0 +1,12 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + - name: create csr-name variable + command: oc get csr + register: csr-name + + - name: approve all certificate signing requests + command: oc adm certificate approve {{ csr-name }} From 67f30c8b47ef4a25a463c5320671dd8ceed89c15 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 3 Aug 2021 13:53:15 -0400 Subject: [PATCH 190/885] Created a playbook to connect the cluster and approve certificate signing requests, step 3 page 25 of Felipe's document. --- .../bastion_s390x/tasks/connect_cluster.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 roles/bastion_server/bastion_s390x/tasks/connect_cluster.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/connect_cluster.yaml b/roles/bastion_server/bastion_s390x/tasks/connect_cluster.yaml new file mode 100644 index 00000000..82c0ca2f --- /dev/null +++ b/roles/bastion_server/bastion_s390x/tasks/connect_cluster.yaml @@ -0,0 +1,12 @@ +--- + +- hosts: bastion_server + become: true + tasks: + + - name: create csr-name variable + command: oc get csr + register: csr-name + + - name: approve all certificate signing requests + command: oc adm certificate approve {{ csr-name }} From 66c156b9fd4a97bc448a44e0266788783f22eab0 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 3 Aug 2021 15:01:02 -0400 Subject: [PATCH 191/885] updated bootstrap_verify.yaml with new method. Have not had the chance to check it yet. --- .../tasks/bootstrap_verify.yaml | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/roles/bootstrap_server/tasks/bootstrap_verify.yaml b/roles/bootstrap_server/tasks/bootstrap_verify.yaml index 40a325fb..d47e3d71 100644 --- a/roles/bootstrap_server/tasks/bootstrap_verify.yaml +++ b/roles/bootstrap_server/tasks/bootstrap_verify.yaml @@ -4,13 +4,12 @@ become: true tasks: - - name: verify bootstrap creation - ansible.builtin.systemd: - state: started - name: bootstrap -##not ready - - name: verify bootstrap install process - ansible.builtin.systemd: - state: - name: bootkube.service - + - name: connect bootstrap + command: virsh console bootstrap + + - name: Verify bootstrap install process until complete + command: journalctl -u bootkube.service + register: result + until: result.stdout.find("bootkube.service complete") != -1 + retries: 10 + delay: 20 From 35b4cbbdf3378f9935cfff350e3ff37553896af0 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 3 Aug 2021 15:01:02 -0400 Subject: [PATCH 192/885] updated bootstrap_verify.yaml with new method. Have not had the chance to check it yet. --- .../tasks/bootstrap_verify.yaml | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/roles/bootstrap_server/tasks/bootstrap_verify.yaml b/roles/bootstrap_server/tasks/bootstrap_verify.yaml index 40a325fb..d47e3d71 100644 --- a/roles/bootstrap_server/tasks/bootstrap_verify.yaml +++ b/roles/bootstrap_server/tasks/bootstrap_verify.yaml @@ -4,13 +4,12 @@ become: true tasks: - - name: verify bootstrap creation - ansible.builtin.systemd: - state: started - name: bootstrap -##not ready - - name: verify bootstrap install process - ansible.builtin.systemd: - state: - name: bootkube.service - + - name: connect bootstrap + command: virsh console bootstrap + + - name: Verify bootstrap install process until complete + command: journalctl -u bootkube.service + register: result + until: result.stdout.find("bootkube.service complete") != -1 + retries: 10 + delay: 20 From 7249ffc1d779451ed491e10ae952627a441677a8 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 3 Aug 2021 16:00:32 -0400 Subject: [PATCH 193/885] deleted redundant files in main directory to reduce clutter --- .build_script.sh.swp | Bin 0 -> 1024 bytes create_bootstrap.sh | 7 ----- pw-bootstrap.yaml | 13 --------- .../tasks/start_bastion_install.yaml | 20 ------------- start_bootstrap_install.yaml | 10 ------- test02-joe.yml | 27 ------------------ 6 files changed, 77 deletions(-) create mode 100644 .build_script.sh.swp delete mode 100644 create_bootstrap.sh delete mode 100644 pw-bootstrap.yaml delete mode 100644 roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml delete mode 100644 start_bootstrap_install.yaml delete mode 100644 test02-joe.yml diff --git a/.build_script.sh.swp b/.build_script.sh.swp new file mode 100644 index 0000000000000000000000000000000000000000..b70a3266611f7280a0a0108b7d0d213019e36a0f GIT binary patch literal 1024 zcmYc?$V<%2S1{4DU_b$DPiQe@B_`)5p@?GV6qn}aW|kOYmqZdxD$UGEi7!qr$}A|+ TE6zZY8D)-!z-R~zL Date: Tue, 3 Aug 2021 16:00:32 -0400 Subject: [PATCH 194/885] deleted redundant files in main directory to reduce clutter --- .build_script.sh.swp | Bin 0 -> 1024 bytes create_bootstrap.sh | 7 ----- pw-bootstrap.yaml | 13 --------- .../tasks/start_bastion_install.yaml | 20 ------------- start_bootstrap_install.yaml | 10 ------- test02-joe.yml | 27 ------------------ 6 files changed, 77 deletions(-) create mode 100644 .build_script.sh.swp delete mode 100644 create_bootstrap.sh delete mode 100644 pw-bootstrap.yaml delete mode 100644 roles/bastion_server/bastion_s390x/tasks/start_bastion_install.yaml delete mode 100644 start_bootstrap_install.yaml delete mode 100644 test02-joe.yml diff --git a/.build_script.sh.swp b/.build_script.sh.swp new file mode 100644 index 0000000000000000000000000000000000000000..b70a3266611f7280a0a0108b7d0d213019e36a0f GIT binary patch literal 1024 zcmYc?$V<%2S1{4DU_b$DPiQe@B_`)5p@?GV6qn}aW|kOYmqZdxD$UGEi7!qr$}A|+ TE6zZY8D)-!z-R~zL Date: Tue, 3 Aug 2021 16:03:46 -0400 Subject: [PATCH 195/885] converted yml files to yaml in main directory --- .build_script.sh.swp | Bin 1024 -> 0 bytes copy-image.yml => copy-image.yaml | 0 list_vms.yml => list_vms.yaml | 0 setup-mgmt-user.yml => setup-mgmt-user.yaml | 0 4 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 .build_script.sh.swp rename copy-image.yml => copy-image.yaml (100%) rename list_vms.yml => list_vms.yaml (100%) rename setup-mgmt-user.yml => setup-mgmt-user.yaml (100%) diff --git a/.build_script.sh.swp b/.build_script.sh.swp deleted file mode 100644 index b70a3266611f7280a0a0108b7d0d213019e36a0f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1024 zcmYc?$V<%2S1{4DU_b$DPiQe@B_`)5p@?GV6qn}aW|kOYmqZdxD$UGEi7!qr$}A|+ TE6zZY8D)-!z-R~zL Date: Tue, 3 Aug 2021 16:03:46 -0400 Subject: [PATCH 196/885] converted yml files to yaml in main directory --- .build_script.sh.swp | Bin 1024 -> 0 bytes copy-image.yml => copy-image.yaml | 0 list_vms.yml => list_vms.yaml | 0 setup-mgmt-user.yml => setup-mgmt-user.yaml | 0 4 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 .build_script.sh.swp rename copy-image.yml => copy-image.yaml (100%) rename list_vms.yml => list_vms.yaml (100%) rename setup-mgmt-user.yml => setup-mgmt-user.yaml (100%) diff --git a/.build_script.sh.swp b/.build_script.sh.swp deleted file mode 100644 index b70a3266611f7280a0a0108b7d0d213019e36a0f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1024 zcmYc?$V<%2S1{4DU_b$DPiQe@B_`)5p@?GV6qn}aW|kOYmqZdxD$UGEi7!qr$}A|+ TE6zZY8D)-!z-R~zL Date: Tue, 3 Aug 2021 16:21:13 -0400 Subject: [PATCH 197/885] Cleaned out a lot of redundant files --- build_script.sh | 2 +- .../bastion_x86/tasks/bastion.yaml | 49 ------------------- .../tasks/fill_install_config.yaml | 44 ----------------- .../bastion_x86/tasks/http_setup.yaml.save | 23 --------- .../tasks/start_bastion_install.yaml | 20 -------- .../{bastion_s390x => }/tasks/bastion.yaml | 0 .../tasks/connect_cluster.yaml | 0 .../tasks/fill_install_config.yaml | 0 .../{bastion_s390x => tasks}/fix-sched.yaml | 0 .../{get-images.yml => tasks/get-images.yaml} | 0 .../{bastion_s390x => }/tasks/get-ocp.yaml | 0 .../{bastion_s390x => }/tasks/http_setup.yaml | 0 .../{bastion_s390x => }/tasks/http_test.yaml | 0 .../tasks/load_balancer.yaml | 0 .../files => templates}/install-config.yaml | 0 roles/kvm_host/kvm_host_x86/tasks/main.yml | 41 ---------------- .../kvm_host_x86/templates/macvtap.xml.j2 | 6 --- .../tasks/create_bastion.yaml | 2 +- .../kvm_host/tasks/create_bootstrap.yaml | 0 .../tasks/create_nodes.yaml | 0 .../tasks/define_macvtap.yaml | 0 .../tasks/prep_kvm_guests.yaml | 0 .../templates/macvtap.xml.j2 | 0 23 files changed, 2 insertions(+), 185 deletions(-) delete mode 100644 roles/bastion_server/bastion_x86/tasks/bastion.yaml delete mode 100644 roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml delete mode 100644 roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save delete mode 100644 roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml rename roles/bastion_server/{bastion_s390x => }/tasks/bastion.yaml (100%) rename roles/bastion_server/{bastion_s390x => }/tasks/connect_cluster.yaml (100%) rename roles/bastion_server/{bastion_s390x => }/tasks/fill_install_config.yaml (100%) rename roles/bastion_server/{bastion_s390x => tasks}/fix-sched.yaml (100%) rename roles/bastion_server/{get-images.yml => tasks/get-images.yaml} (100%) rename roles/bastion_server/{bastion_s390x => }/tasks/get-ocp.yaml (100%) rename roles/bastion_server/{bastion_s390x => }/tasks/http_setup.yaml (100%) rename roles/bastion_server/{bastion_s390x => }/tasks/http_test.yaml (100%) rename roles/bastion_server/{bastion_s390x => }/tasks/load_balancer.yaml (100%) rename roles/bastion_server/{bastion_s390x/tasks/files => templates}/install-config.yaml (100%) delete mode 100644 roles/kvm_host/kvm_host_x86/tasks/main.yml delete mode 100644 roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 rename roles/kvm_host/{kvm_host_s390x => }/tasks/create_bastion.yaml (91%) rename create_bootstrap.yaml => roles/kvm_host/tasks/create_bootstrap.yaml (100%) rename roles/kvm_host/{kvm_host_s390x => }/tasks/create_nodes.yaml (100%) rename roles/kvm_host/{kvm_host_s390x => }/tasks/define_macvtap.yaml (100%) rename roles/kvm_host/{kvm_host_s390x => }/tasks/prep_kvm_guests.yaml (100%) rename roles/kvm_host/{kvm_host_s390x => }/templates/macvtap.xml.j2 (100%) diff --git a/build_script.sh b/build_script.sh index 0505a2df..52a90971 100644 --- a/build_script.sh +++ b/build_script.sh @@ -1,7 +1,7 @@ #!/bin/bash # Created by Phillip - +##for when we start templating: https://searchservervirtualization.techtarget.com/tip/Expedite-Ansible-KVM-provisioning-with-automation #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G diff --git a/roles/bastion_server/bastion_x86/tasks/bastion.yaml b/roles/bastion_server/bastion_x86/tasks/bastion.yaml deleted file mode 100644 index ac86bf27..00000000 --- a/roles/bastion_server/bastion_x86/tasks/bastion.yaml +++ /dev/null @@ -1,49 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - - -#there has to be a way to do this through Ansible. Step 3 page 9 - - name: complete bastion install process - -#leaving this until I meet with Filipe - - name: download software - -#leaving this until I meet with Filipe - - name: DNS requirements and configuration - -#not sure what this instruction step is trying to say. Page 13 - - name: Load Balancer - -# Need to edit this script to automate changing port to 8080 and ensure latest versions of OpenShift mirrors - - name: Create and configure the HTTP server - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/create_http.sh - - - name: Get installer and oc Client Tools - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/get_ocp_installer.sh - -##create install-config.yaml file - - name: create install-config.yaml - file: - path: "~/files/install-config.yaml" - state: touch - -##need to use host_vars for - - name: Generate the ignition files 1 - shell: ./openshift-install create manifests --dir= - -##also needs variable - - name: Generate the ignition files 2 - shell: ./openshift-install create ignition-configs --dir= - -##also needs variable - - name: Generate the ignition files 3 - shell: cp /*.ign /var/www/html/ignition - - - name: Generate the ignition files 4 - shell: chmod 775 /var/www/html/ignition/*.ign - - - name: Prepare the KVM OCP guests - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/prep_kvm_guests.sh diff --git a/roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml b/roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml deleted file mode 100644 index 24709068..00000000 --- a/roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml +++ /dev/null @@ -1,44 +0,0 @@ -##Needs variables from host_vars for baseDomain, cluster_name, pullsecret, and ssh-public-key. -##I think it also needs cidr (pod's IP range) and service network IP range. -## Ensure PATH references are correct ---- - -- hosts: bastion_server - become: true - tasks: - - - name: create install-config.yaml - file: - path: "~/files/install-config.yaml" - state: touch - - - name: Fill contents of install-config.yaml file - copy: - dest: "~/files/macvtap.xml" - content: | - apiVersion: v1 - baseDomain: - compute: - - architecture: x86 - hyperthreading: Enabled - name: worker - replicas: 0 - controlPlane: - architecture: x86 - hyperthreading: Enabled - name: master - replicas: 3 - metadata: - name: - networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN - serviceNetwork: - - 172.30.0.0/16 - platform: - none: {} - fips: false - pullSecret: '' - sshKey: '' diff --git a/roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save b/roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save deleted file mode 100644 index f5887aa8..00000000 --- a/roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save +++ /dev/null @@ -1,23 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - - - name: update repository index - dnf: - update_cache: yes - - - name: install httpd - dnf: - name: httpd - state: latest - -## Not sure if this will work, especially after running once. Check this page out if encountering problems: -##https://dmsimard.com/2016/03/15/changing-the-ssh-port-with-ansible/ - - - name: Change ssh port to 8080 - set_fact: - ansible_port: 8080 - - diff --git a/roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml b/roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml deleted file mode 100644 index 5667dae4..00000000 --- a/roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - - - name: start bastion install process - community.libvirt.virt: - name: bastion - memory: 4096 - vcpus: 2 - disk size: 20 - cdrom: /var/lib/libvirt/images/rhel83.iso - accelerate: yes - import: yes - network: network=macvtap-net - extra-args: "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" - location: /rhcos-install - qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" - noautoconsole: yes diff --git a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml b/roles/bastion_server/tasks/bastion.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/bastion.yaml rename to roles/bastion_server/tasks/bastion.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/connect_cluster.yaml b/roles/bastion_server/tasks/connect_cluster.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/connect_cluster.yaml rename to roles/bastion_server/tasks/connect_cluster.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml b/roles/bastion_server/tasks/fill_install_config.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml rename to roles/bastion_server/tasks/fill_install_config.yaml diff --git a/roles/bastion_server/bastion_s390x/fix-sched.yaml b/roles/bastion_server/tasks/fix-sched.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/fix-sched.yaml rename to roles/bastion_server/tasks/fix-sched.yaml diff --git a/roles/bastion_server/get-images.yml b/roles/bastion_server/tasks/get-images.yaml similarity index 100% rename from roles/bastion_server/get-images.yml rename to roles/bastion_server/tasks/get-images.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/tasks/get-ocp.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml rename to roles/bastion_server/tasks/get-ocp.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/tasks/http_setup.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/http_setup.yaml rename to roles/bastion_server/tasks/http_setup.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/http_test.yaml b/roles/bastion_server/tasks/http_test.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/http_test.yaml rename to roles/bastion_server/tasks/http_test.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml b/roles/bastion_server/tasks/load_balancer.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml rename to roles/bastion_server/tasks/load_balancer.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml b/roles/bastion_server/templates/install-config.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml rename to roles/bastion_server/templates/install-config.yaml diff --git a/roles/kvm_host/kvm_host_x86/tasks/main.yml b/roles/kvm_host/kvm_host_x86/tasks/main.yml deleted file mode 100644 index ed7b03a3..00000000 --- a/roles/kvm_host/kvm_host_x86/tasks/main.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- - -- hosts: kvm_hosts - become: true - tasks: - - - name: Ensure pre-requisite packages are installed - yum: - names: - - libvirt - - libvirt-devel - - libvirt-daemon-kvm - - qemu-kvm - - virt-manager - - libvirt-daemon-config-network - - libvirt-client - - qemu-img - - - name: update repository index - yum: - update_cache: yes - -## Playbook works, need to change absolute path to relative or variable. Need to move to roles file for kvm_host - - - name: Set up macvtap bridge - community.libvirt.virt_net: - command: define - name: macvtap-net - autostart: true - xml: "{{ lookup ('template', '/home/jacob/.git/Ansible-OpenShift-Provisioning/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2') }}" - - - name: Start macvtap-net - community.libvirt.virt_net: - autostart: yes - command: start - name: macvtap-net - - - name: Set autostart for macvtap-net - community.libvirt.virt_net: - autostart: yes - name: macvtap-net diff --git a/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 b/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 deleted file mode 100644 index 388477ea..00000000 --- a/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 +++ /dev/null @@ -1,6 +0,0 @@ - - macvtap-net - - - - diff --git a/roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml b/roles/kvm_host/tasks/create_bastion.yaml similarity index 91% rename from roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml rename to roles/kvm_host/tasks/create_bastion.yaml index 0468cc71..73272375 100644 --- a/roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml +++ b/roles/kvm_host/tasks/create_bastion.yaml @@ -5,7 +5,7 @@ tasks: - name: virtualize bastion server - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: start bastion install command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/create_bootstrap.yaml b/roles/kvm_host/tasks/create_bootstrap.yaml similarity index 100% rename from create_bootstrap.yaml rename to roles/kvm_host/tasks/create_bootstrap.yaml diff --git a/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml b/roles/kvm_host/tasks/create_nodes.yaml similarity index 100% rename from roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml rename to roles/kvm_host/tasks/create_nodes.yaml diff --git a/roles/kvm_host/kvm_host_s390x/tasks/define_macvtap.yaml b/roles/kvm_host/tasks/define_macvtap.yaml similarity index 100% rename from roles/kvm_host/kvm_host_s390x/tasks/define_macvtap.yaml rename to roles/kvm_host/tasks/define_macvtap.yaml diff --git a/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml b/roles/kvm_host/tasks/prep_kvm_guests.yaml similarity index 100% rename from roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml rename to roles/kvm_host/tasks/prep_kvm_guests.yaml diff --git a/roles/kvm_host/kvm_host_s390x/templates/macvtap.xml.j2 b/roles/kvm_host/templates/macvtap.xml.j2 similarity index 100% rename from roles/kvm_host/kvm_host_s390x/templates/macvtap.xml.j2 rename to roles/kvm_host/templates/macvtap.xml.j2 From e68a274ea30b956d2903cb69a3f331d5eff34b46 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 3 Aug 2021 16:21:13 -0400 Subject: [PATCH 198/885] Cleaned out a lot of redundant files --- build_script.sh | 2 +- .../bastion_x86/tasks/bastion.yaml | 49 ------------------- .../tasks/fill_install_config.yaml | 44 ----------------- .../bastion_x86/tasks/http_setup.yaml.save | 23 --------- .../tasks/start_bastion_install.yaml | 20 -------- .../{bastion_s390x => }/tasks/bastion.yaml | 0 .../tasks/connect_cluster.yaml | 0 .../tasks/fill_install_config.yaml | 0 .../{bastion_s390x => tasks}/fix-sched.yaml | 0 .../{get-images.yml => tasks/get-images.yaml} | 0 .../{bastion_s390x => }/tasks/get-ocp.yaml | 0 .../{bastion_s390x => }/tasks/http_setup.yaml | 0 .../{bastion_s390x => }/tasks/http_test.yaml | 0 .../tasks/load_balancer.yaml | 0 .../files => templates}/install-config.yaml | 0 roles/kvm_host/kvm_host_x86/tasks/main.yml | 41 ---------------- .../kvm_host_x86/templates/macvtap.xml.j2 | 6 --- .../tasks/create_bastion.yaml | 2 +- .../kvm_host/tasks/create_bootstrap.yaml | 0 .../tasks/create_nodes.yaml | 0 .../tasks/define_macvtap.yaml | 0 .../tasks/prep_kvm_guests.yaml | 0 .../templates/macvtap.xml.j2 | 0 23 files changed, 2 insertions(+), 185 deletions(-) delete mode 100644 roles/bastion_server/bastion_x86/tasks/bastion.yaml delete mode 100644 roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml delete mode 100644 roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save delete mode 100644 roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml rename roles/bastion_server/{bastion_s390x => }/tasks/bastion.yaml (100%) rename roles/bastion_server/{bastion_s390x => }/tasks/connect_cluster.yaml (100%) rename roles/bastion_server/{bastion_s390x => }/tasks/fill_install_config.yaml (100%) rename roles/bastion_server/{bastion_s390x => tasks}/fix-sched.yaml (100%) rename roles/bastion_server/{get-images.yml => tasks/get-images.yaml} (100%) rename roles/bastion_server/{bastion_s390x => }/tasks/get-ocp.yaml (100%) rename roles/bastion_server/{bastion_s390x => }/tasks/http_setup.yaml (100%) rename roles/bastion_server/{bastion_s390x => }/tasks/http_test.yaml (100%) rename roles/bastion_server/{bastion_s390x => }/tasks/load_balancer.yaml (100%) rename roles/bastion_server/{bastion_s390x/tasks/files => templates}/install-config.yaml (100%) delete mode 100644 roles/kvm_host/kvm_host_x86/tasks/main.yml delete mode 100644 roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 rename roles/kvm_host/{kvm_host_s390x => }/tasks/create_bastion.yaml (91%) rename create_bootstrap.yaml => roles/kvm_host/tasks/create_bootstrap.yaml (100%) rename roles/kvm_host/{kvm_host_s390x => }/tasks/create_nodes.yaml (100%) rename roles/kvm_host/{kvm_host_s390x => }/tasks/define_macvtap.yaml (100%) rename roles/kvm_host/{kvm_host_s390x => }/tasks/prep_kvm_guests.yaml (100%) rename roles/kvm_host/{kvm_host_s390x => }/templates/macvtap.xml.j2 (100%) diff --git a/build_script.sh b/build_script.sh index 0505a2df..52a90971 100644 --- a/build_script.sh +++ b/build_script.sh @@ -1,7 +1,7 @@ #!/bin/bash # Created by Phillip - +##for when we start templating: https://searchservervirtualization.techtarget.com/tip/Expedite-Ansible-KVM-provisioning-with-automation #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G #qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G diff --git a/roles/bastion_server/bastion_x86/tasks/bastion.yaml b/roles/bastion_server/bastion_x86/tasks/bastion.yaml deleted file mode 100644 index ac86bf27..00000000 --- a/roles/bastion_server/bastion_x86/tasks/bastion.yaml +++ /dev/null @@ -1,49 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - - -#there has to be a way to do this through Ansible. Step 3 page 9 - - name: complete bastion install process - -#leaving this until I meet with Filipe - - name: download software - -#leaving this until I meet with Filipe - - name: DNS requirements and configuration - -#not sure what this instruction step is trying to say. Page 13 - - name: Load Balancer - -# Need to edit this script to automate changing port to 8080 and ensure latest versions of OpenShift mirrors - - name: Create and configure the HTTP server - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/create_http.sh - - - name: Get installer and oc Client Tools - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/get_ocp_installer.sh - -##create install-config.yaml file - - name: create install-config.yaml - file: - path: "~/files/install-config.yaml" - state: touch - -##need to use host_vars for - - name: Generate the ignition files 1 - shell: ./openshift-install create manifests --dir= - -##also needs variable - - name: Generate the ignition files 2 - shell: ./openshift-install create ignition-configs --dir= - -##also needs variable - - name: Generate the ignition files 3 - shell: cp /*.ign /var/www/html/ignition - - - name: Generate the ignition files 4 - shell: chmod 775 /var/www/html/ignition/*.ign - - - name: Prepare the KVM OCP guests - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/prep_kvm_guests.sh diff --git a/roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml b/roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml deleted file mode 100644 index 24709068..00000000 --- a/roles/bastion_server/bastion_x86/tasks/fill_install_config.yaml +++ /dev/null @@ -1,44 +0,0 @@ -##Needs variables from host_vars for baseDomain, cluster_name, pullsecret, and ssh-public-key. -##I think it also needs cidr (pod's IP range) and service network IP range. -## Ensure PATH references are correct ---- - -- hosts: bastion_server - become: true - tasks: - - - name: create install-config.yaml - file: - path: "~/files/install-config.yaml" - state: touch - - - name: Fill contents of install-config.yaml file - copy: - dest: "~/files/macvtap.xml" - content: | - apiVersion: v1 - baseDomain: - compute: - - architecture: x86 - hyperthreading: Enabled - name: worker - replicas: 0 - controlPlane: - architecture: x86 - hyperthreading: Enabled - name: master - replicas: 3 - metadata: - name: - networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN - serviceNetwork: - - 172.30.0.0/16 - platform: - none: {} - fips: false - pullSecret: '' - sshKey: '' diff --git a/roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save b/roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save deleted file mode 100644 index f5887aa8..00000000 --- a/roles/bastion_server/bastion_x86/tasks/http_setup.yaml.save +++ /dev/null @@ -1,23 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - - - name: update repository index - dnf: - update_cache: yes - - - name: install httpd - dnf: - name: httpd - state: latest - -## Not sure if this will work, especially after running once. Check this page out if encountering problems: -##https://dmsimard.com/2016/03/15/changing-the-ssh-port-with-ansible/ - - - name: Change ssh port to 8080 - set_fact: - ansible_port: 8080 - - diff --git a/roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml b/roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml deleted file mode 100644 index 5667dae4..00000000 --- a/roles/bastion_server/bastion_x86/tasks/start_bastion_install.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - - - name: start bastion install process - community.libvirt.virt: - name: bastion - memory: 4096 - vcpus: 2 - disk size: 20 - cdrom: /var/lib/libvirt/images/rhel83.iso - accelerate: yes - import: yes - network: network=macvtap-net - extra-args: "ip=172.16.10.212::172.16.10.1:255.255.255.0:bastion.ocp.home.local::none nameserver=172.16.10.38 vnc vncpassword=12341234 inst.repo=hd:/dev/vda ipv6.disable=1" - location: /rhcos-install - qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" - noautoconsole: yes diff --git a/roles/bastion_server/bastion_s390x/tasks/bastion.yaml b/roles/bastion_server/tasks/bastion.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/bastion.yaml rename to roles/bastion_server/tasks/bastion.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/connect_cluster.yaml b/roles/bastion_server/tasks/connect_cluster.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/connect_cluster.yaml rename to roles/bastion_server/tasks/connect_cluster.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml b/roles/bastion_server/tasks/fill_install_config.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/fill_install_config.yaml rename to roles/bastion_server/tasks/fill_install_config.yaml diff --git a/roles/bastion_server/bastion_s390x/fix-sched.yaml b/roles/bastion_server/tasks/fix-sched.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/fix-sched.yaml rename to roles/bastion_server/tasks/fix-sched.yaml diff --git a/roles/bastion_server/get-images.yml b/roles/bastion_server/tasks/get-images.yaml similarity index 100% rename from roles/bastion_server/get-images.yml rename to roles/bastion_server/tasks/get-images.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml b/roles/bastion_server/tasks/get-ocp.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/get-ocp.yaml rename to roles/bastion_server/tasks/get-ocp.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/http_setup.yaml b/roles/bastion_server/tasks/http_setup.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/http_setup.yaml rename to roles/bastion_server/tasks/http_setup.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/http_test.yaml b/roles/bastion_server/tasks/http_test.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/http_test.yaml rename to roles/bastion_server/tasks/http_test.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml b/roles/bastion_server/tasks/load_balancer.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/load_balancer.yaml rename to roles/bastion_server/tasks/load_balancer.yaml diff --git a/roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml b/roles/bastion_server/templates/install-config.yaml similarity index 100% rename from roles/bastion_server/bastion_s390x/tasks/files/install-config.yaml rename to roles/bastion_server/templates/install-config.yaml diff --git a/roles/kvm_host/kvm_host_x86/tasks/main.yml b/roles/kvm_host/kvm_host_x86/tasks/main.yml deleted file mode 100644 index ed7b03a3..00000000 --- a/roles/kvm_host/kvm_host_x86/tasks/main.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- - -- hosts: kvm_hosts - become: true - tasks: - - - name: Ensure pre-requisite packages are installed - yum: - names: - - libvirt - - libvirt-devel - - libvirt-daemon-kvm - - qemu-kvm - - virt-manager - - libvirt-daemon-config-network - - libvirt-client - - qemu-img - - - name: update repository index - yum: - update_cache: yes - -## Playbook works, need to change absolute path to relative or variable. Need to move to roles file for kvm_host - - - name: Set up macvtap bridge - community.libvirt.virt_net: - command: define - name: macvtap-net - autostart: true - xml: "{{ lookup ('template', '/home/jacob/.git/Ansible-OpenShift-Provisioning/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2') }}" - - - name: Start macvtap-net - community.libvirt.virt_net: - autostart: yes - command: start - name: macvtap-net - - - name: Set autostart for macvtap-net - community.libvirt.virt_net: - autostart: yes - name: macvtap-net diff --git a/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 b/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 deleted file mode 100644 index 388477ea..00000000 --- a/roles/kvm_host/kvm_host_x86/templates/macvtap.xml.j2 +++ /dev/null @@ -1,6 +0,0 @@ - - macvtap-net - - - - diff --git a/roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml b/roles/kvm_host/tasks/create_bastion.yaml similarity index 91% rename from roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml rename to roles/kvm_host/tasks/create_bastion.yaml index 0468cc71..73272375 100644 --- a/roles/kvm_host/kvm_host_s390x/tasks/create_bastion.yaml +++ b/roles/kvm_host/tasks/create_bastion.yaml @@ -5,7 +5,7 @@ tasks: - name: virtualize bastion server - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: start bastion install command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/create_bootstrap.yaml b/roles/kvm_host/tasks/create_bootstrap.yaml similarity index 100% rename from create_bootstrap.yaml rename to roles/kvm_host/tasks/create_bootstrap.yaml diff --git a/roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml b/roles/kvm_host/tasks/create_nodes.yaml similarity index 100% rename from roles/kvm_host/kvm_host_s390x/tasks/create_nodes.yaml rename to roles/kvm_host/tasks/create_nodes.yaml diff --git a/roles/kvm_host/kvm_host_s390x/tasks/define_macvtap.yaml b/roles/kvm_host/tasks/define_macvtap.yaml similarity index 100% rename from roles/kvm_host/kvm_host_s390x/tasks/define_macvtap.yaml rename to roles/kvm_host/tasks/define_macvtap.yaml diff --git a/roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml b/roles/kvm_host/tasks/prep_kvm_guests.yaml similarity index 100% rename from roles/kvm_host/kvm_host_s390x/tasks/prep_kvm_guests.yaml rename to roles/kvm_host/tasks/prep_kvm_guests.yaml diff --git a/roles/kvm_host/kvm_host_s390x/templates/macvtap.xml.j2 b/roles/kvm_host/templates/macvtap.xml.j2 similarity index 100% rename from roles/kvm_host/kvm_host_s390x/templates/macvtap.xml.j2 rename to roles/kvm_host/templates/macvtap.xml.j2 From 51e332bae271e198731d4574c2bea62fa4d141af Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 3 Aug 2021 16:29:47 -0400 Subject: [PATCH 199/885] More clean-up --- roles/bastion_server/tasks/get-ocp.yaml | 2 ++ roles/bootstrap_server/tasks/bootstrap_wait.yaml | 11 ----------- roles/bootstrap_server/tasks/main.yaml | 13 ------------- 3 files changed, 2 insertions(+), 24 deletions(-) delete mode 100644 roles/bootstrap_server/tasks/bootstrap_wait.yaml delete mode 100644 roles/bootstrap_server/tasks/main.yaml diff --git a/roles/bastion_server/tasks/get-ocp.yaml b/roles/bastion_server/tasks/get-ocp.yaml index 809d23c4..ec520cb9 100644 --- a/roles/bastion_server/tasks/get-ocp.yaml +++ b/roles/bastion_server/tasks/get-ocp.yaml @@ -1,3 +1,5 @@ +--- + - hosts: bastion_server become: true tasks: diff --git a/roles/bootstrap_server/tasks/bootstrap_wait.yaml b/roles/bootstrap_server/tasks/bootstrap_wait.yaml deleted file mode 100644 index e23af586..00000000 --- a/roles/bootstrap_server/tasks/bootstrap_wait.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: wait for bootstrap to be created - wait_for_connection: - delay: 10 - sleep: 10 - timeout: 300 diff --git a/roles/bootstrap_server/tasks/main.yaml b/roles/bootstrap_server/tasks/main.yaml deleted file mode 100644 index d0e92ea8..00000000 --- a/roles/bootstrap_server/tasks/main.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - -- hosts: bootstrap_server - become: true - tasks: - -##need to implement wait for completion logic before starting the next one - - name: create bootstrap - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/create_bootstrap.sh - -##needs to wait for previous to finish before starting - - name: verify installation - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/verify_bootstrap.sh From 7453e9d6c4cd91cc57dbe83099c0294bf34e2ba1 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 3 Aug 2021 16:29:47 -0400 Subject: [PATCH 200/885] More clean-up --- roles/bastion_server/tasks/get-ocp.yaml | 2 ++ roles/bootstrap_server/tasks/bootstrap_wait.yaml | 11 ----------- roles/bootstrap_server/tasks/main.yaml | 13 ------------- 3 files changed, 2 insertions(+), 24 deletions(-) delete mode 100644 roles/bootstrap_server/tasks/bootstrap_wait.yaml delete mode 100644 roles/bootstrap_server/tasks/main.yaml diff --git a/roles/bastion_server/tasks/get-ocp.yaml b/roles/bastion_server/tasks/get-ocp.yaml index 809d23c4..ec520cb9 100644 --- a/roles/bastion_server/tasks/get-ocp.yaml +++ b/roles/bastion_server/tasks/get-ocp.yaml @@ -1,3 +1,5 @@ +--- + - hosts: bastion_server become: true tasks: diff --git a/roles/bootstrap_server/tasks/bootstrap_wait.yaml b/roles/bootstrap_server/tasks/bootstrap_wait.yaml deleted file mode 100644 index e23af586..00000000 --- a/roles/bootstrap_server/tasks/bootstrap_wait.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: wait for bootstrap to be created - wait_for_connection: - delay: 10 - sleep: 10 - timeout: 300 diff --git a/roles/bootstrap_server/tasks/main.yaml b/roles/bootstrap_server/tasks/main.yaml deleted file mode 100644 index d0e92ea8..00000000 --- a/roles/bootstrap_server/tasks/main.yaml +++ /dev/null @@ -1,13 +0,0 @@ ---- - -- hosts: bootstrap_server - become: true - tasks: - -##need to implement wait for completion logic before starting the next one - - name: create bootstrap - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/create_bootstrap.sh - -##needs to wait for previous to finish before starting - - name: verify installation - script: ~/.git/Ansible-OpenShift-Provisioning/files/shell_scripts/verify_bootstrap.sh From f302155bbfef78368d4a17d5fc0ac7513df0d1c2 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 12:47:13 -0400 Subject: [PATCH 201/885] updated create_bastion playbook to reflect updated command --- roles/bastion_server/tasks/bastion.yaml | 4 +++- roles/kvm_host/tasks/create_bastion.yaml | 2 +- roles/kvm_host/tasks/prep_kvm_guests.yaml | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/roles/bastion_server/tasks/bastion.yaml b/roles/bastion_server/tasks/bastion.yaml index 338e84a3..85f4ede6 100644 --- a/roles/bastion_server/tasks/bastion.yaml +++ b/roles/bastion_server/tasks/bastion.yaml @@ -109,6 +109,7 @@ state: directory mode: '0755' +##link got cut off - name: get mirrors 1 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> @@ -116,13 +117,14 @@ remote_src: yes mode: '0755' +##link got cut off - name: get mirrors 2 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> dest: /var/www/html/bin remote_src: yes mode: '0755' - +##link got cut off - name: get mirrors 3 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> diff --git a/roles/kvm_host/tasks/create_bastion.yaml b/roles/kvm_host/tasks/create_bastion.yaml index 73272375..79c9af24 100644 --- a/roles/kvm_host/tasks/create_bastion.yaml +++ b/roles/kvm_host/tasks/create_bastion.yaml @@ -8,4 +8,4 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: start bastion install - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/roles/kvm_host/tasks/prep_kvm_guests.yaml b/roles/kvm_host/tasks/prep_kvm_guests.yaml index a419cedf..75abf5aa 100644 --- a/roles/kvm_host/tasks/prep_kvm_guests.yaml +++ b/roles/kvm_host/tasks/prep_kvm_guests.yaml @@ -4,7 +4,7 @@ become: true tasks: - - name: get dependencies from openshift.com + - name: get rhcos qcow2 files get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images/ From 900c18bae6d3fc196b5fa2150549a8eef20781b9 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 12:47:13 -0400 Subject: [PATCH 202/885] updated create_bastion playbook to reflect updated command --- roles/bastion_server/tasks/bastion.yaml | 4 +++- roles/kvm_host/tasks/create_bastion.yaml | 2 +- roles/kvm_host/tasks/prep_kvm_guests.yaml | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/roles/bastion_server/tasks/bastion.yaml b/roles/bastion_server/tasks/bastion.yaml index 338e84a3..85f4ede6 100644 --- a/roles/bastion_server/tasks/bastion.yaml +++ b/roles/bastion_server/tasks/bastion.yaml @@ -109,6 +109,7 @@ state: directory mode: '0755' +##link got cut off - name: get mirrors 1 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> @@ -116,13 +117,14 @@ remote_src: yes mode: '0755' +##link got cut off - name: get mirrors 2 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> dest: /var/www/html/bin remote_src: yes mode: '0755' - +##link got cut off - name: get mirrors 3 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> diff --git a/roles/kvm_host/tasks/create_bastion.yaml b/roles/kvm_host/tasks/create_bastion.yaml index 73272375..79c9af24 100644 --- a/roles/kvm_host/tasks/create_bastion.yaml +++ b/roles/kvm_host/tasks/create_bastion.yaml @@ -8,4 +8,4 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: start bastion install - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion.distribution.ocpz.wsclab.endicott.ibm.com::none nameserver=9.60.70.82 inst.repo=hd:/dev/vda ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/roles/kvm_host/tasks/prep_kvm_guests.yaml b/roles/kvm_host/tasks/prep_kvm_guests.yaml index a419cedf..75abf5aa 100644 --- a/roles/kvm_host/tasks/prep_kvm_guests.yaml +++ b/roles/kvm_host/tasks/prep_kvm_guests.yaml @@ -4,7 +4,7 @@ become: true tasks: - - name: get dependencies from openshift.com + - name: get rhcos qcow2 files get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images/ From 0ad7bc74d4a467521f0067035ff18ec5f25f34d3 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 4 Aug 2021 14:34:20 -0500 Subject: [PATCH 203/885] updated bastion sshkey for accessing cluster nodes --- files/install-config.yaml | 2 +- roles/bastion_server/tasks/fill_install_config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/files/install-config.yaml b/files/install-config.yaml index c9134ad7..b761fc72 100644 --- a/files/install-config.yaml +++ b/files/install-config.yaml @@ -23,4 +23,4 @@ platform: none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDppZ1cEGLhC3T9rvex+ZTVq4V1IKgSUYclRkZPNFPyrMjUw+scSVw5O4wlgnhZF2ZB6XdTPKUOolvNv1TH6YdtMOulAsJ6mvs2KnRW2Sd1jdw9dJ3ijkMOAz2CBHvt9q4r0u+87WaAAMlGvHAEqCJvBawPTtYkNSXTDp7dagb2wt8+/j3HI+em7LSY4obNtYmXHsQLiEYSPQMcalHPmbgpTLerST19x214DiDCfHHkk2ek+BEdtgQEJNIc6ufyrwk6KUVv7MCgCyQB2o5r3G/KX31Va3hyRRAca1MDI3Ee+C5XpYfPZvJRhL/FKa2FRATffl7Kn3zZLH2ZtOV4Cs2zebjX0dOGUipyjnf/MacsTIeKK7Bm91IPgqgTSlcZbiIcYXoGSIO0u2pytzPJTUywmPvy2AP/NnPZn+NR39Zf/DYys8vWVPRWOKbJBSHDO0yikzH1xUt62+XS5Kl6kmPsQz3VEghA/lvVJ6KpW5PSgFXMkn8e+CcHV+bIO2zc5oc= root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDudjhbUkA7ekvuzZe7WpkkF2KOoLGwWU05Ch6Ty0OWDW0vdbtrG7TIgy8FYOvBnArnIJcy1AP6tpH6BcbmHRIosVFbZ3GrLkKEOroBD+8+6qGzN+fIXBzss20iRvMvDdlDaVJ1BqmTvNnkbeWdgCg1MbICQ1OYOExm+xgvGWHOjoJkUzkw3oBnAnpZHHTkvM2dY0SNtzV9i5FcSvdBM2kindvBpvDZ9PjTGkcFMnbVRpBXUV+axNg4yuX0YWFDWOFlQ4DjvasdN6jMQ2MrWBYtYF1RAklsOhSXTPKAYaxZZsfeEBnMVUFshaDYy7PlbCXcUEVEFEkMlTAbe//bBXx5l+2fVFtOUyhfws28bLg+rlJLivINoLW4EB+ERViYBTCEx5njl9mK9EcPpCJlLTR82kr8vEwE/sJ9ro3dH/aEUQcTkVYqlOCw2lsFZpkZtthArknZiHk91OtWgXj4GsAjihehfBvEL/GbEuO0HTzvl+QiTnC+mL4KcObl31K9LdlAbKA53l6G68py0u7plUervFN8H7rIFbv4ndUTAo2rODty/QoYH8jBu460LLgLyaRfaDFrGitnAdSiCS/8dEr8iAVxj5X5sgcTQlsJYyvMv61YCzItmlipDpbf+M/mJyJuxyuYWqudItOIWchFktstcpp3dx1iM/pr0o4HitpeQ== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file diff --git a/roles/bastion_server/tasks/fill_install_config.yaml b/roles/bastion_server/tasks/fill_install_config.yaml index 567fd275..55e817d6 100644 --- a/roles/bastion_server/tasks/fill_install_config.yaml +++ b/roles/bastion_server/tasks/fill_install_config.yaml @@ -41,4 +41,4 @@ none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' - sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDppZ1cEGLhC3T9rvex+ZTVq4V1IKgSUYclRkZPNFPyrMjUw+scSVw5O4wlgnhZF2ZB6XdTPKUOolvNv1TH6YdtMOulAsJ6mvs2KnRW2Sd1jdw9dJ3ijkMOAz2CBHvt9q4r0u+87WaAAMlGvHAEqCJvBawPTtYkNSXTDp7dagb2wt8+/j3HI+em7LSY4obNtYmXHsQLiEYSPQMcalHPmbgpTLerST19x214DiDCfHHkk2ek+BEdtgQEJNIc6ufyrwk6KUVv7MCgCyQB2o5r3G/KX31Va3hyRRAca1MDI3Ee+C5XpYfPZvJRhL/FKa2FRATffl7Kn3zZLH2ZtOV4Cs2zebjX0dOGUipyjnf/MacsTIeKK7Bm91IPgqgTSlcZbiIcYXoGSIO0u2pytzPJTUywmPvy2AP/NnPZn+NR39Zf/DYys8vWVPRWOKbJBSHDO0yikzH1xUt62+XS5Kl6kmPsQz3VEghA/lvVJ6KpW5PSgFXMkn8e+CcHV+bIO2zc5oc= root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' + sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDudjhbUkA7ekvuzZe7WpkkF2KOoLGwWU05Ch6Ty0OWDW0vdbtrG7TIgy8FYOvBnArnIJcy1AP6tpH6BcbmHRIosVFbZ3GrLkKEOroBD+8+6qGzN+fIXBzss20iRvMvDdlDaVJ1BqmTvNnkbeWdgCg1MbICQ1OYOExm+xgvGWHOjoJkUzkw3oBnAnpZHHTkvM2dY0SNtzV9i5FcSvdBM2kindvBpvDZ9PjTGkcFMnbVRpBXUV+axNg4yuX0YWFDWOFlQ4DjvasdN6jMQ2MrWBYtYF1RAklsOhSXTPKAYaxZZsfeEBnMVUFshaDYy7PlbCXcUEVEFEkMlTAbe//bBXx5l+2fVFtOUyhfws28bLg+rlJLivINoLW4EB+ERViYBTCEx5njl9mK9EcPpCJlLTR82kr8vEwE/sJ9ro3dH/aEUQcTkVYqlOCw2lsFZpkZtthArknZiHk91OtWgXj4GsAjihehfBvEL/GbEuO0HTzvl+QiTnC+mL4KcObl31K9LdlAbKA53l6G68py0u7plUervFN8H7rIFbv4ndUTAo2rODty/QoYH8jBu460LLgLyaRfaDFrGitnAdSiCS/8dEr8iAVxj5X5sgcTQlsJYyvMv61YCzItmlipDpbf+M/mJyJuxyuYWqudItOIWchFktstcpp3dx1iM/pr0o4HitpeQ== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' From 182ec3f1493b25493dc871ae1401c6f6a47a80aa Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 4 Aug 2021 14:34:20 -0500 Subject: [PATCH 204/885] updated bastion sshkey for accessing cluster nodes --- files/install-config.yaml | 2 +- roles/bastion_server/tasks/fill_install_config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/files/install-config.yaml b/files/install-config.yaml index c9134ad7..b761fc72 100644 --- a/files/install-config.yaml +++ b/files/install-config.yaml @@ -23,4 +23,4 @@ platform: none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDppZ1cEGLhC3T9rvex+ZTVq4V1IKgSUYclRkZPNFPyrMjUw+scSVw5O4wlgnhZF2ZB6XdTPKUOolvNv1TH6YdtMOulAsJ6mvs2KnRW2Sd1jdw9dJ3ijkMOAz2CBHvt9q4r0u+87WaAAMlGvHAEqCJvBawPTtYkNSXTDp7dagb2wt8+/j3HI+em7LSY4obNtYmXHsQLiEYSPQMcalHPmbgpTLerST19x214DiDCfHHkk2ek+BEdtgQEJNIc6ufyrwk6KUVv7MCgCyQB2o5r3G/KX31Va3hyRRAca1MDI3Ee+C5XpYfPZvJRhL/FKa2FRATffl7Kn3zZLH2ZtOV4Cs2zebjX0dOGUipyjnf/MacsTIeKK7Bm91IPgqgTSlcZbiIcYXoGSIO0u2pytzPJTUywmPvy2AP/NnPZn+NR39Zf/DYys8vWVPRWOKbJBSHDO0yikzH1xUt62+XS5Kl6kmPsQz3VEghA/lvVJ6KpW5PSgFXMkn8e+CcHV+bIO2zc5oc= root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDudjhbUkA7ekvuzZe7WpkkF2KOoLGwWU05Ch6Ty0OWDW0vdbtrG7TIgy8FYOvBnArnIJcy1AP6tpH6BcbmHRIosVFbZ3GrLkKEOroBD+8+6qGzN+fIXBzss20iRvMvDdlDaVJ1BqmTvNnkbeWdgCg1MbICQ1OYOExm+xgvGWHOjoJkUzkw3oBnAnpZHHTkvM2dY0SNtzV9i5FcSvdBM2kindvBpvDZ9PjTGkcFMnbVRpBXUV+axNg4yuX0YWFDWOFlQ4DjvasdN6jMQ2MrWBYtYF1RAklsOhSXTPKAYaxZZsfeEBnMVUFshaDYy7PlbCXcUEVEFEkMlTAbe//bBXx5l+2fVFtOUyhfws28bLg+rlJLivINoLW4EB+ERViYBTCEx5njl9mK9EcPpCJlLTR82kr8vEwE/sJ9ro3dH/aEUQcTkVYqlOCw2lsFZpkZtthArknZiHk91OtWgXj4GsAjihehfBvEL/GbEuO0HTzvl+QiTnC+mL4KcObl31K9LdlAbKA53l6G68py0u7plUervFN8H7rIFbv4ndUTAo2rODty/QoYH8jBu460LLgLyaRfaDFrGitnAdSiCS/8dEr8iAVxj5X5sgcTQlsJYyvMv61YCzItmlipDpbf+M/mJyJuxyuYWqudItOIWchFktstcpp3dx1iM/pr0o4HitpeQ== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file diff --git a/roles/bastion_server/tasks/fill_install_config.yaml b/roles/bastion_server/tasks/fill_install_config.yaml index 567fd275..55e817d6 100644 --- a/roles/bastion_server/tasks/fill_install_config.yaml +++ b/roles/bastion_server/tasks/fill_install_config.yaml @@ -41,4 +41,4 @@ none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' - sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDppZ1cEGLhC3T9rvex+ZTVq4V1IKgSUYclRkZPNFPyrMjUw+scSVw5O4wlgnhZF2ZB6XdTPKUOolvNv1TH6YdtMOulAsJ6mvs2KnRW2Sd1jdw9dJ3ijkMOAz2CBHvt9q4r0u+87WaAAMlGvHAEqCJvBawPTtYkNSXTDp7dagb2wt8+/j3HI+em7LSY4obNtYmXHsQLiEYSPQMcalHPmbgpTLerST19x214DiDCfHHkk2ek+BEdtgQEJNIc6ufyrwk6KUVv7MCgCyQB2o5r3G/KX31Va3hyRRAca1MDI3Ee+C5XpYfPZvJRhL/FKa2FRATffl7Kn3zZLH2ZtOV4Cs2zebjX0dOGUipyjnf/MacsTIeKK7Bm91IPgqgTSlcZbiIcYXoGSIO0u2pytzPJTUywmPvy2AP/NnPZn+NR39Zf/DYys8vWVPRWOKbJBSHDO0yikzH1xUt62+XS5Kl6kmPsQz3VEghA/lvVJ6KpW5PSgFXMkn8e+CcHV+bIO2zc5oc= root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' + sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDudjhbUkA7ekvuzZe7WpkkF2KOoLGwWU05Ch6Ty0OWDW0vdbtrG7TIgy8FYOvBnArnIJcy1AP6tpH6BcbmHRIosVFbZ3GrLkKEOroBD+8+6qGzN+fIXBzss20iRvMvDdlDaVJ1BqmTvNnkbeWdgCg1MbICQ1OYOExm+xgvGWHOjoJkUzkw3oBnAnpZHHTkvM2dY0SNtzV9i5FcSvdBM2kindvBpvDZ9PjTGkcFMnbVRpBXUV+axNg4yuX0YWFDWOFlQ4DjvasdN6jMQ2MrWBYtYF1RAklsOhSXTPKAYaxZZsfeEBnMVUFshaDYy7PlbCXcUEVEFEkMlTAbe//bBXx5l+2fVFtOUyhfws28bLg+rlJLivINoLW4EB+ERViYBTCEx5njl9mK9EcPpCJlLTR82kr8vEwE/sJ9ro3dH/aEUQcTkVYqlOCw2lsFZpkZtthArknZiHk91OtWgXj4GsAjihehfBvEL/GbEuO0HTzvl+QiTnC+mL4KcObl31K9LdlAbKA53l6G68py0u7plUervFN8H7rIFbv4ndUTAo2rODty/QoYH8jBu460LLgLyaRfaDFrGitnAdSiCS/8dEr8iAVxj5X5sgcTQlsJYyvMv61YCzItmlipDpbf+M/mJyJuxyuYWqudItOIWchFktstcpp3dx1iM/pr0o4HitpeQ== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' From 6c495bf6b893384de9a71ec05a897878fc240dd5 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 16:14:37 -0400 Subject: [PATCH 205/885] took out the name change tasks to maintain default unzipped file names --- roles/bastion_server/tasks/http_setup.yaml | 56 ++++++++++------------ 1 file changed, 24 insertions(+), 32 deletions(-) diff --git a/roles/bastion_server/tasks/http_setup.yaml b/roles/bastion_server/tasks/http_setup.yaml index 00bd3cd3..313fab32 100644 --- a/roles/bastion_server/tasks/http_setup.yaml +++ b/roles/bastion_server/tasks/http_setup.yaml @@ -16,7 +16,7 @@ - name: Ensure the default Apache port is 8080 replace: path: /etc/httpd/conf/httpd.conf - regexp: '^Listen 80 ' + regexp: '^Listen 80' replace: 'Listen 8080' backup: yes @@ -32,29 +32,29 @@ name: httpd state: restarted - - name: Allow all access to tcp port 8080 - community.general.ufw: - rule: allow - port: '8080' - proto: tcp - - - name: Allow all access to tcp port 80 - community.general.ufw: - rule: allow - port: '80' - proto: tcp - - - name: Allow all access to tcp port 443 - community.general.ufw: - rule: allow - port: '443' - proto: tcp - - - name: Allow all access to tcp port 4443 - community.general.ufw: - rule: allow - port: '4443' - proto: tcp +# - name: Allow all access to tcp port 8080 +# community.general.ufw: +# rule: allow +# port: '8080' +# proto: tcp +# +# - name: Allow all access to tcp port 80 +# community.general.ufw: +# rule: allow +# port: '80' +# proto: tcp +# +# - name: Allow all access to tcp port 443 +# community.general.ufw: +# rule: allow +# port: '443' +# proto: tcp +# +# - name: Allow all access to tcp port 4443 +# community.general.ufw: +# rule: allow +# port: '4443' +# proto: tcp - name: create directory bin for mirrors file: @@ -99,11 +99,3 @@ state: started name: httpd - - name: change mirror 1 file name - command: mv /var/www/html/bin/rhcos-live-kernel-s390x /var/www/html/bin/rhcos-kernel - - - name: change mirror 2 file name - command: mv /var/www/html/bin/rhcos-live-initramfs.s390x.img /var/www/html/bin/rhcos-initramfs.img - - - name: change mirror 3 file name - command: mv /var/www/html/bin/rhcos-live-rootfs.s390x.img /var/www/html/bin/rhcos-rootfs.img From 6e12cdcf5cc70cf148f7be5637b0bd8aff3522e5 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 16:14:37 -0400 Subject: [PATCH 206/885] took out the name change tasks to maintain default unzipped file names --- roles/bastion_server/tasks/http_setup.yaml | 56 ++++++++++------------ 1 file changed, 24 insertions(+), 32 deletions(-) diff --git a/roles/bastion_server/tasks/http_setup.yaml b/roles/bastion_server/tasks/http_setup.yaml index 00bd3cd3..313fab32 100644 --- a/roles/bastion_server/tasks/http_setup.yaml +++ b/roles/bastion_server/tasks/http_setup.yaml @@ -16,7 +16,7 @@ - name: Ensure the default Apache port is 8080 replace: path: /etc/httpd/conf/httpd.conf - regexp: '^Listen 80 ' + regexp: '^Listen 80' replace: 'Listen 8080' backup: yes @@ -32,29 +32,29 @@ name: httpd state: restarted - - name: Allow all access to tcp port 8080 - community.general.ufw: - rule: allow - port: '8080' - proto: tcp - - - name: Allow all access to tcp port 80 - community.general.ufw: - rule: allow - port: '80' - proto: tcp - - - name: Allow all access to tcp port 443 - community.general.ufw: - rule: allow - port: '443' - proto: tcp - - - name: Allow all access to tcp port 4443 - community.general.ufw: - rule: allow - port: '4443' - proto: tcp +# - name: Allow all access to tcp port 8080 +# community.general.ufw: +# rule: allow +# port: '8080' +# proto: tcp +# +# - name: Allow all access to tcp port 80 +# community.general.ufw: +# rule: allow +# port: '80' +# proto: tcp +# +# - name: Allow all access to tcp port 443 +# community.general.ufw: +# rule: allow +# port: '443' +# proto: tcp +# +# - name: Allow all access to tcp port 4443 +# community.general.ufw: +# rule: allow +# port: '4443' +# proto: tcp - name: create directory bin for mirrors file: @@ -99,11 +99,3 @@ state: started name: httpd - - name: change mirror 1 file name - command: mv /var/www/html/bin/rhcos-live-kernel-s390x /var/www/html/bin/rhcos-kernel - - - name: change mirror 2 file name - command: mv /var/www/html/bin/rhcos-live-initramfs.s390x.img /var/www/html/bin/rhcos-initramfs.img - - - name: change mirror 3 file name - command: mv /var/www/html/bin/rhcos-live-rootfs.s390x.img /var/www/html/bin/rhcos-rootfs.img From 8da6656ecf813e56501203cb1e1b9123b530d3b3 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 16:15:53 -0400 Subject: [PATCH 207/885] deleted http_test.yaml because no longer in use --- roles/bastion_server/tasks/http_test.yaml | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 roles/bastion_server/tasks/http_test.yaml diff --git a/roles/bastion_server/tasks/http_test.yaml b/roles/bastion_server/tasks/http_test.yaml deleted file mode 100644 index 4014775f..00000000 --- a/roles/bastion_server/tasks/http_test.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - From 1c0930a8ed2acb1b0669d00baf7b463ed7ea7b2d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 16:15:53 -0400 Subject: [PATCH 208/885] deleted http_test.yaml because no longer in use --- roles/bastion_server/tasks/http_test.yaml | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 roles/bastion_server/tasks/http_test.yaml diff --git a/roles/bastion_server/tasks/http_test.yaml b/roles/bastion_server/tasks/http_test.yaml deleted file mode 100644 index 4014775f..00000000 --- a/roles/bastion_server/tasks/http_test.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - From 92dbed738a17ab01069a610363989154ce54d57e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 16:17:06 -0400 Subject: [PATCH 209/885] changed nameserver references in create bootstrap and nodes playbooks to 9.60.70.82 --- roles/kvm_host/tasks/create_bootstrap.yaml | 2 +- roles/kvm_host/tasks/create_nodes.yaml | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/kvm_host/tasks/create_bootstrap.yaml b/roles/kvm_host/tasks/create_bootstrap.yaml index 751af0a5..c67d9745 100644 --- a/roles/kvm_host/tasks/create_bootstrap.yaml +++ b/roles/kvm_host/tasks/create_bootstrap.yaml @@ -8,7 +8,7 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - name: boot bootstrap - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images diff --git a/roles/kvm_host/tasks/create_nodes.yaml b/roles/kvm_host/tasks/create_nodes.yaml index 83eef633..d16def52 100644 --- a/roles/kvm_host/tasks/create_nodes.yaml +++ b/roles/kvm_host/tasks/create_nodes.yaml @@ -20,7 +20,7 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - name: install CoreOS on control-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -29,7 +29,7 @@ minutes: 8 - name: install CoreOS on control-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -38,7 +38,7 @@ minutes: 8 - name: install CoreOS on control-2 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -47,7 +47,7 @@ minutes: 8 - name: install CoreOS on compute-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -56,6 +56,6 @@ minutes: 8 - name: install CoreOS on compute-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images From f619cdb68ce87a4e38c5b6ad907ddc5c989a1cd3 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 16:17:06 -0400 Subject: [PATCH 210/885] changed nameserver references in create bootstrap and nodes playbooks to 9.60.70.82 --- roles/kvm_host/tasks/create_bootstrap.yaml | 2 +- roles/kvm_host/tasks/create_nodes.yaml | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/kvm_host/tasks/create_bootstrap.yaml b/roles/kvm_host/tasks/create_bootstrap.yaml index 751af0a5..c67d9745 100644 --- a/roles/kvm_host/tasks/create_bootstrap.yaml +++ b/roles/kvm_host/tasks/create_bootstrap.yaml @@ -8,7 +8,7 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - name: boot bootstrap - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images diff --git a/roles/kvm_host/tasks/create_nodes.yaml b/roles/kvm_host/tasks/create_nodes.yaml index 83eef633..d16def52 100644 --- a/roles/kvm_host/tasks/create_nodes.yaml +++ b/roles/kvm_host/tasks/create_nodes.yaml @@ -20,7 +20,7 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - name: install CoreOS on control-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -29,7 +29,7 @@ minutes: 8 - name: install CoreOS on control-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -38,7 +38,7 @@ minutes: 8 - name: install CoreOS on control-2 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -47,7 +47,7 @@ minutes: 8 - name: install CoreOS on compute-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -56,6 +56,6 @@ minutes: 8 - name: install CoreOS on compute-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images From f72247b794feb8d3247fe8a25c49e3cecd6eb04a Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 4 Aug 2021 15:31:52 -0500 Subject: [PATCH 211/885] edits to dns zone files --- files/distribution.db | 24 ++++++++++++------------ files/distribution.rev | 14 +++++++------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/files/distribution.db b/files/distribution.db index dc707e61..8e6e8583 100644 --- a/files/distribution.db +++ b/files/distribution.db @@ -14,38 +14,38 @@ $TTL 86400 bastion IN A 9.60.87.139 ;entry for bootstrap host. -bootstrap IN A 9.60.87.133 +;bootstrap IN A 9.60.87.133 bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 ;entry of your load balancer -haproxy IN A 9.60.87.139 +;haproxy IN A 9.60.87.139 haproxy.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 ;entries for the master nodes control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 -control-0 IN A 9.60.87.138 +;control-0 IN A 9.60.87.138 control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 -control-1 IN A 9.60.87.137 +;control-1 IN A 9.60.87.137 control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 -control-2 IN A 9.60.87.136 +;control-2 IN A 9.60.87.136 ;entry for the bastion host bastion IN A 9.60.87.139 ;entries for the worker nodes compute-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.135 -compute-0 IN A 9.60.87.135 +;compute-0 IN A 9.60.87.135 compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 -compute-1 IN A 9.60.87.134 +;compute-1 IN A 9.60.87.134 ;The api identifies the IP of your load balancer. api.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 -api IN A 9.60.87.139 -api-int.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 -api-int IN A 9.60.87.139 +;api IN A 9.60.87.139 +api-int.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com +;api-int IN A 9.60.87.139 ;The wildcard also identifies the load balancer. -*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 -*.apps IN A 9.60.87.139 +*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com +;*.apps IN A 9.60.87.139 ;EOF diff --git a/files/distribution.rev b/files/distribution.rev index d6653ed8..2bad9ffa 100644 --- a/files/distribution.rev +++ b/files/distribution.rev @@ -1,5 +1,5 @@ $TTL 86400 -@ IN SOA bastion-1.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com. ( +@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com. ( 2020011800 ;Serial 3600 ;Refresh 1800 ;Retry @@ -14,15 +14,15 @@ bastion IN A 9.60.87.139 139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. ;PTR Record IP address to Hostname +133 IN PTR bootstrap. +138 IN PTR control-0. +137 IN PTR control-1. +136 IN PTR control-2. +135 IN PTR compute-0. +134 IN PTR compute-1. 138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. 137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. 136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. 135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. 134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. 133 IN PTR bootstrap.distribution.ocpz.wsclab.endicott.ibm.com. -133 IN PTR bootstrap -138 IN PTR control-0 -137 IN PTR control-1 -136 IN PTR control-2 -135 IN PTR compute-0 -134 IN PTR compute-1 From 2d38a8ee6d2f43c470447de3efe5caf25a4c3b28 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 4 Aug 2021 15:31:52 -0500 Subject: [PATCH 212/885] edits to dns zone files --- files/distribution.db | 24 ++++++++++++------------ files/distribution.rev | 14 +++++++------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/files/distribution.db b/files/distribution.db index dc707e61..8e6e8583 100644 --- a/files/distribution.db +++ b/files/distribution.db @@ -14,38 +14,38 @@ $TTL 86400 bastion IN A 9.60.87.139 ;entry for bootstrap host. -bootstrap IN A 9.60.87.133 +;bootstrap IN A 9.60.87.133 bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 ;entry of your load balancer -haproxy IN A 9.60.87.139 +;haproxy IN A 9.60.87.139 haproxy.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 ;entries for the master nodes control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 -control-0 IN A 9.60.87.138 +;control-0 IN A 9.60.87.138 control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 -control-1 IN A 9.60.87.137 +;control-1 IN A 9.60.87.137 control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 -control-2 IN A 9.60.87.136 +;control-2 IN A 9.60.87.136 ;entry for the bastion host bastion IN A 9.60.87.139 ;entries for the worker nodes compute-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.135 -compute-0 IN A 9.60.87.135 +;compute-0 IN A 9.60.87.135 compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 -compute-1 IN A 9.60.87.134 +;compute-1 IN A 9.60.87.134 ;The api identifies the IP of your load balancer. api.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 -api IN A 9.60.87.139 -api-int.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 -api-int IN A 9.60.87.139 +;api IN A 9.60.87.139 +api-int.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com +;api-int IN A 9.60.87.139 ;The wildcard also identifies the load balancer. -*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 -*.apps IN A 9.60.87.139 +*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com +;*.apps IN A 9.60.87.139 ;EOF diff --git a/files/distribution.rev b/files/distribution.rev index d6653ed8..2bad9ffa 100644 --- a/files/distribution.rev +++ b/files/distribution.rev @@ -1,5 +1,5 @@ $TTL 86400 -@ IN SOA bastion-1.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com. ( +@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com. ( 2020011800 ;Serial 3600 ;Refresh 1800 ;Retry @@ -14,15 +14,15 @@ bastion IN A 9.60.87.139 139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. ;PTR Record IP address to Hostname +133 IN PTR bootstrap. +138 IN PTR control-0. +137 IN PTR control-1. +136 IN PTR control-2. +135 IN PTR compute-0. +134 IN PTR compute-1. 138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. 137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. 136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. 135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. 134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. 133 IN PTR bootstrap.distribution.ocpz.wsclab.endicott.ibm.com. -133 IN PTR bootstrap -138 IN PTR control-0 -137 IN PTR control-1 -136 IN PTR control-2 -135 IN PTR compute-0 -134 IN PTR compute-1 From 271fe1dbac3f2fd38a1cbf771c58a84a31fdebe7 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 4 Aug 2021 15:35:40 -0500 Subject: [PATCH 213/885] fixed spacing in named.conf file --- files/named.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/named.conf b/files/named.conf index 107460d0..b07a27be 100644 --- a/files/named.conf +++ b/files/named.conf @@ -9,7 +9,7 @@ options { // listen-on port 53 { 127.0.0.1; }; - listen-on port 53 { any; }; + listen-on port 53 { any; }; listen-on-v6 port 53 { ::1; }; directory "/var/named"; dump-file "/var/named/data/cache_dump.db"; From 4ca5168d5f32cb78d0912a549dbeeefffb3020e4 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 4 Aug 2021 15:35:40 -0500 Subject: [PATCH 214/885] fixed spacing in named.conf file --- files/named.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/named.conf b/files/named.conf index 107460d0..b07a27be 100644 --- a/files/named.conf +++ b/files/named.conf @@ -9,7 +9,7 @@ options { // listen-on port 53 { 127.0.0.1; }; - listen-on port 53 { any; }; + listen-on port 53 { any; }; listen-on-v6 port 53 { ::1; }; directory "/var/named"; dump-file "/var/named/data/cache_dump.db"; From 42262d65d7b94a1d1392b4fa1d493283b0333b85 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 4 Aug 2021 15:45:14 -0500 Subject: [PATCH 215/885] changed DNS name in bootstrap and creat nodes playbooks to .139 --- roles/kvm_host/tasks/create_bootstrap.yaml | 2 +- roles/kvm_host/tasks/create_nodes.yaml | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/kvm_host/tasks/create_bootstrap.yaml b/roles/kvm_host/tasks/create_bootstrap.yaml index c67d9745..1fc4728d 100644 --- a/roles/kvm_host/tasks/create_bootstrap.yaml +++ b/roles/kvm_host/tasks/create_bootstrap.yaml @@ -8,7 +8,7 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - name: boot bootstrap - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images diff --git a/roles/kvm_host/tasks/create_nodes.yaml b/roles/kvm_host/tasks/create_nodes.yaml index d16def52..83eef633 100644 --- a/roles/kvm_host/tasks/create_nodes.yaml +++ b/roles/kvm_host/tasks/create_nodes.yaml @@ -20,7 +20,7 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - name: install CoreOS on control-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -29,7 +29,7 @@ minutes: 8 - name: install CoreOS on control-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -38,7 +38,7 @@ minutes: 8 - name: install CoreOS on control-2 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -47,7 +47,7 @@ minutes: 8 - name: install CoreOS on compute-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -56,6 +56,6 @@ minutes: 8 - name: install CoreOS on compute-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images From c5752f39fad9b150bc3594ce5a050aa759b3b560 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 4 Aug 2021 15:45:14 -0500 Subject: [PATCH 216/885] changed DNS name in bootstrap and creat nodes playbooks to .139 --- roles/kvm_host/tasks/create_bootstrap.yaml | 2 +- roles/kvm_host/tasks/create_nodes.yaml | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/kvm_host/tasks/create_bootstrap.yaml b/roles/kvm_host/tasks/create_bootstrap.yaml index c67d9745..1fc4728d 100644 --- a/roles/kvm_host/tasks/create_bootstrap.yaml +++ b/roles/kvm_host/tasks/create_bootstrap.yaml @@ -8,7 +8,7 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - name: boot bootstrap - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images diff --git a/roles/kvm_host/tasks/create_nodes.yaml b/roles/kvm_host/tasks/create_nodes.yaml index d16def52..83eef633 100644 --- a/roles/kvm_host/tasks/create_nodes.yaml +++ b/roles/kvm_host/tasks/create_nodes.yaml @@ -20,7 +20,7 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - name: install CoreOS on control-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -29,7 +29,7 @@ minutes: 8 - name: install CoreOS on control-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -38,7 +38,7 @@ minutes: 8 - name: install CoreOS on control-2 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -47,7 +47,7 @@ minutes: 8 - name: install CoreOS on compute-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -56,6 +56,6 @@ minutes: 8 - name: install CoreOS on compute-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.70.82' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images From 49c5642fca83c2fc13f9310e4e69e61b83dbd032 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 4 Aug 2021 16:57:18 -0500 Subject: [PATCH 217/885] updated dns zone file --- files/distribution.db | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/files/distribution.db b/files/distribution.db index 8e6e8583..3d5467c6 100644 --- a/files/distribution.db +++ b/files/distribution.db @@ -17,10 +17,6 @@ bastion IN A 9.60.87.139 ;bootstrap IN A 9.60.87.133 bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 -;entry of your load balancer -;haproxy IN A 9.60.87.139 -haproxy.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 - ;entries for the master nodes control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 ;control-0 IN A 9.60.87.138 @@ -38,14 +34,15 @@ compute-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.135 compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 ;compute-1 IN A 9.60.87.134 +;entry of your load balancer +haproxy IN A 9.60.87.139 + ;The api identifies the IP of your load balancer. -api.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 -;api IN A 9.60.87.139 -api-int.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com -;api-int IN A 9.60.87.139 +api IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. +api-int IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. ;The wildcard also identifies the load balancer. -*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com -;*.apps IN A 9.60.87.139 +apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. +*.apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. ;EOF From b252475f55bc83580f8b5a2b5f720832b5b757ad Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 4 Aug 2021 16:57:18 -0500 Subject: [PATCH 218/885] updated dns zone file --- files/distribution.db | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/files/distribution.db b/files/distribution.db index 8e6e8583..3d5467c6 100644 --- a/files/distribution.db +++ b/files/distribution.db @@ -17,10 +17,6 @@ bastion IN A 9.60.87.139 ;bootstrap IN A 9.60.87.133 bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 -;entry of your load balancer -;haproxy IN A 9.60.87.139 -haproxy.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 - ;entries for the master nodes control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 ;control-0 IN A 9.60.87.138 @@ -38,14 +34,15 @@ compute-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.135 compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 ;compute-1 IN A 9.60.87.134 +;entry of your load balancer +haproxy IN A 9.60.87.139 + ;The api identifies the IP of your load balancer. -api.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 -;api IN A 9.60.87.139 -api-int.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com -;api-int IN A 9.60.87.139 +api IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. +api-int IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. ;The wildcard also identifies the load balancer. -*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com -;*.apps IN A 9.60.87.139 +apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. +*.apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. ;EOF From 8e298d8b8ab7f192da9f026e4498b44d18098cd9 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 18:10:08 -0400 Subject: [PATCH 219/885] updated DNS files and fixed create_nodes playbook with correct coreos file names --- create_nodes.yaml | 61 ++++++++++++++++++++++++++ files/distribution.db | 10 ++--- files/distribution.rev | 4 +- roles/kvm_host/tasks/create_nodes.yaml | 10 ++--- 4 files changed, 74 insertions(+), 11 deletions(-) create mode 100644 create_nodes.yaml diff --git a/create_nodes.yaml b/create_nodes.yaml new file mode 100644 index 00000000..9f3d9389 --- /dev/null +++ b/create_nodes.yaml @@ -0,0 +1,61 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: virtualize control-0 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G + + - name: virtualize control-1 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G + + - name: virtualize control-2 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G + + - name: virtualize compute-0 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G + + - name: virtualize compute-1 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G + + - name: install CoreOS on control-0 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: pause 8 minutes + pause: + minutes: 8 + + - name: install CoreOS on control-1 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: pause 8 minutes + pause: + minutes: 8 + + - name: install CoreOS on control-2 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: pause 8 minutes + pause: + minutes: 8 + + - name: install CoreOS on compute-0 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: pause 8 minutes + pause: + minutes: 8 + + - name: install CoreOS on compute-1 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images diff --git a/files/distribution.db b/files/distribution.db index 3d5467c6..35be68ba 100644 --- a/files/distribution.db +++ b/files/distribution.db @@ -1,10 +1,10 @@ $TTL 86400 @ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( - 2020021821 ;Serial - 3600 ;Refresh - 1800 ;Retry - 604800 ;Expire - 86400 ;Minimum TTL + 2020021821 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL ) ;Name Server Information diff --git a/files/distribution.rev b/files/distribution.rev index 2bad9ffa..127ec07b 100644 --- a/files/distribution.rev +++ b/files/distribution.rev @@ -1,5 +1,5 @@ $TTL 86400 -@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com. ( +@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ib$ 2020011800 ;Serial 3600 ;Refresh 1800 ;Retry @@ -26,3 +26,5 @@ bastion IN A 9.60.87.139 135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. 134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. 133 IN PTR bootstrap.distribution.ocpz.wsclab.endicott.ibm.com. +139 IN PTR api-int.distribution.ocpz.wsclab.endicott.ibm.com. +139 IN PTR api.distribution.ocpz.wsclab.endicott.ibm.com. diff --git a/roles/kvm_host/tasks/create_nodes.yaml b/roles/kvm_host/tasks/create_nodes.yaml index 83eef633..9f3d9389 100644 --- a/roles/kvm_host/tasks/create_nodes.yaml +++ b/roles/kvm_host/tasks/create_nodes.yaml @@ -20,7 +20,7 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - name: install CoreOS on control-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -29,7 +29,7 @@ minutes: 8 - name: install CoreOS on control-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -38,7 +38,7 @@ minutes: 8 - name: install CoreOS on control-2 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -47,7 +47,7 @@ minutes: 8 - name: install CoreOS on compute-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -56,6 +56,6 @@ minutes: 8 - name: install CoreOS on compute-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images From 7334c5fffe2fec81fd3170fc73883698502c2dbf Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 18:10:08 -0400 Subject: [PATCH 220/885] updated DNS files and fixed create_nodes playbook with correct coreos file names --- create_nodes.yaml | 61 ++++++++++++++++++++++++++ files/distribution.db | 10 ++--- files/distribution.rev | 4 +- roles/kvm_host/tasks/create_nodes.yaml | 10 ++--- 4 files changed, 74 insertions(+), 11 deletions(-) create mode 100644 create_nodes.yaml diff --git a/create_nodes.yaml b/create_nodes.yaml new file mode 100644 index 00000000..9f3d9389 --- /dev/null +++ b/create_nodes.yaml @@ -0,0 +1,61 @@ +--- + +- hosts: kvm_host + become: true + tasks: + + - name: virtualize control-0 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G + + - name: virtualize control-1 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G + + - name: virtualize control-2 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G + + - name: virtualize compute-0 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G + + - name: virtualize compute-1 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G + + - name: install CoreOS on control-0 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: pause 8 minutes + pause: + minutes: 8 + + - name: install CoreOS on control-1 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: pause 8 minutes + pause: + minutes: 8 + + - name: install CoreOS on control-2 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: pause 8 minutes + pause: + minutes: 8 + + - name: install CoreOS on compute-0 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + + - name: pause 8 minutes + pause: + minutes: 8 + + - name: install CoreOS on compute-1 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images diff --git a/files/distribution.db b/files/distribution.db index 3d5467c6..35be68ba 100644 --- a/files/distribution.db +++ b/files/distribution.db @@ -1,10 +1,10 @@ $TTL 86400 @ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( - 2020021821 ;Serial - 3600 ;Refresh - 1800 ;Retry - 604800 ;Expire - 86400 ;Minimum TTL + 2020021821 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL ) ;Name Server Information diff --git a/files/distribution.rev b/files/distribution.rev index 2bad9ffa..127ec07b 100644 --- a/files/distribution.rev +++ b/files/distribution.rev @@ -1,5 +1,5 @@ $TTL 86400 -@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com. ( +@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ib$ 2020011800 ;Serial 3600 ;Refresh 1800 ;Retry @@ -26,3 +26,5 @@ bastion IN A 9.60.87.139 135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. 134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. 133 IN PTR bootstrap.distribution.ocpz.wsclab.endicott.ibm.com. +139 IN PTR api-int.distribution.ocpz.wsclab.endicott.ibm.com. +139 IN PTR api.distribution.ocpz.wsclab.endicott.ibm.com. diff --git a/roles/kvm_host/tasks/create_nodes.yaml b/roles/kvm_host/tasks/create_nodes.yaml index 83eef633..9f3d9389 100644 --- a/roles/kvm_host/tasks/create_nodes.yaml +++ b/roles/kvm_host/tasks/create_nodes.yaml @@ -20,7 +20,7 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - name: install CoreOS on control-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -29,7 +29,7 @@ minutes: 8 - name: install CoreOS on control-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -38,7 +38,7 @@ minutes: 8 - name: install CoreOS on control-2 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -47,7 +47,7 @@ minutes: 8 - name: install CoreOS on compute-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images @@ -56,6 +56,6 @@ minutes: 8 - name: install CoreOS on compute-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole args: chdir: /var/lib/libvirt/images From 61757a53b7bdcca567d3a57ea769a2c5c65ebf89 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 19:26:41 -0400 Subject: [PATCH 221/885] made the bootstrap verify wait longer --- roles/bootstrap_server/tasks/bootstrap_verify.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/bootstrap_server/tasks/bootstrap_verify.yaml b/roles/bootstrap_server/tasks/bootstrap_verify.yaml index d47e3d71..4adafd7f 100644 --- a/roles/bootstrap_server/tasks/bootstrap_verify.yaml +++ b/roles/bootstrap_server/tasks/bootstrap_verify.yaml @@ -11,5 +11,5 @@ command: journalctl -u bootkube.service register: result until: result.stdout.find("bootkube.service complete") != -1 - retries: 10 - delay: 20 + retries: 100 + delay: 300 From 319509072a537420f9c9dbbc22b5658199bd8946 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 19:26:41 -0400 Subject: [PATCH 222/885] made the bootstrap verify wait longer --- roles/bootstrap_server/tasks/bootstrap_verify.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/bootstrap_server/tasks/bootstrap_verify.yaml b/roles/bootstrap_server/tasks/bootstrap_verify.yaml index d47e3d71..4adafd7f 100644 --- a/roles/bootstrap_server/tasks/bootstrap_verify.yaml +++ b/roles/bootstrap_server/tasks/bootstrap_verify.yaml @@ -11,5 +11,5 @@ command: journalctl -u bootkube.service register: result until: result.stdout.find("bootkube.service complete") != -1 - retries: 10 - delay: 20 + retries: 100 + delay: 300 From 148eabcaa3fd0b9dcd1b03f0c232e439e2727421 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 19:29:03 -0400 Subject: [PATCH 223/885] edited load_balancer playbook to include enabling haproxy, force overwrite, backup, and use .cfg instead of .cfg.j2 file type --- roles/bastion_server/tasks/load_balancer.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/roles/bastion_server/tasks/load_balancer.yaml b/roles/bastion_server/tasks/load_balancer.yaml index 35849dc0..acfde8ff 100644 --- a/roles/bastion_server/tasks/load_balancer.yaml +++ b/roles/bastion_server/tasks/load_balancer.yaml @@ -13,7 +13,14 @@ - name: move haproxy config file to bastion copy: src: haproxy.cfg - dest: /etc/haproxy/haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg + force: yes + backup: yes + + - name: enable haproxy + systemd: + state: enabled + named: haproxy - name: Start haproxy systemd: From 7c74eb5fa9b004b97098a4defd3a643b5e73245c Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 4 Aug 2021 19:29:03 -0400 Subject: [PATCH 224/885] edited load_balancer playbook to include enabling haproxy, force overwrite, backup, and use .cfg instead of .cfg.j2 file type --- roles/bastion_server/tasks/load_balancer.yaml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/roles/bastion_server/tasks/load_balancer.yaml b/roles/bastion_server/tasks/load_balancer.yaml index 35849dc0..acfde8ff 100644 --- a/roles/bastion_server/tasks/load_balancer.yaml +++ b/roles/bastion_server/tasks/load_balancer.yaml @@ -13,7 +13,14 @@ - name: move haproxy config file to bastion copy: src: haproxy.cfg - dest: /etc/haproxy/haproxy.cfg.j2 + dest: /etc/haproxy/haproxy.cfg + force: yes + backup: yes + + - name: enable haproxy + systemd: + state: enabled + named: haproxy - name: Start haproxy systemd: From e9c6f78593cda745565fc3e9ed8b2f4d673dc1b6 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 5 Aug 2021 10:24:14 -0500 Subject: [PATCH 225/885] adding persistent storage command to support image registry --- files/cluster-pvc.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 files/cluster-pvc.yaml diff --git a/files/cluster-pvc.yaml b/files/cluster-pvc.yaml new file mode 100644 index 00000000..020e81d6 --- /dev/null +++ b/files/cluster-pvc.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: pv0001 + annotations: + volume.beta.kubernetes.io/mount-options: rw,nfsvers=4,noexec +spec: + capacity: + storage: 50Gi + accessModes: + - ReadWriteOnce + nfs: + path: /mnt/nfs-shares/dist-ocp + server: 9.60.87.222 + persistentVolumeReclaimPolicy: Retain + claimRef: + name: claim1 + namespace: default \ No newline at end of file From 119cbd4360ea44d75dc6b4f0c5cac36267add2fd Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 5 Aug 2021 10:24:14 -0500 Subject: [PATCH 226/885] adding persistent storage command to support image registry --- files/cluster-pvc.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 files/cluster-pvc.yaml diff --git a/files/cluster-pvc.yaml b/files/cluster-pvc.yaml new file mode 100644 index 00000000..020e81d6 --- /dev/null +++ b/files/cluster-pvc.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: pv0001 + annotations: + volume.beta.kubernetes.io/mount-options: rw,nfsvers=4,noexec +spec: + capacity: + storage: 50Gi + accessModes: + - ReadWriteOnce + nfs: + path: /mnt/nfs-shares/dist-ocp + server: 9.60.87.222 + persistentVolumeReclaimPolicy: Retain + claimRef: + name: claim1 + namespace: default \ No newline at end of file From f68ef17931b9737c55ab475ad1f217016b11db72 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 5 Aug 2021 19:01:14 -0500 Subject: [PATCH 227/885] Update cluster-pvc.yaml --- files/cluster-pvc.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/cluster-pvc.yaml b/files/cluster-pvc.yaml index 020e81d6..f2feb6f2 100644 --- a/files/cluster-pvc.yaml +++ b/files/cluster-pvc.yaml @@ -6,7 +6,7 @@ metadata: volume.beta.kubernetes.io/mount-options: rw,nfsvers=4,noexec spec: capacity: - storage: 50Gi + storage: 150Gi accessModes: - ReadWriteOnce nfs: From a8d3db37339d9bdd533d20844c58702ada549878 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 5 Aug 2021 19:01:14 -0500 Subject: [PATCH 228/885] Update cluster-pvc.yaml --- files/cluster-pvc.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/cluster-pvc.yaml b/files/cluster-pvc.yaml index 020e81d6..f2feb6f2 100644 --- a/files/cluster-pvc.yaml +++ b/files/cluster-pvc.yaml @@ -6,7 +6,7 @@ metadata: volume.beta.kubernetes.io/mount-options: rw,nfsvers=4,noexec spec: capacity: - storage: 50Gi + storage: 150Gi accessModes: - ReadWriteOnce nfs: From 470511a538428af5a62741ff77705d23ecc9caef Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 6 Aug 2021 08:54:44 -0500 Subject: [PATCH 229/885] edited distribution.rev reverse lookup db to show only FQDN --- files/distribution.rev | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/files/distribution.rev b/files/distribution.rev index 127ec07b..3365a5f5 100644 --- a/files/distribution.rev +++ b/files/distribution.rev @@ -14,12 +14,12 @@ bastion IN A 9.60.87.139 139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. ;PTR Record IP address to Hostname -133 IN PTR bootstrap. -138 IN PTR control-0. -137 IN PTR control-1. -136 IN PTR control-2. -135 IN PTR compute-0. -134 IN PTR compute-1. +;133 IN PTR bootstrap. +;138 IN PTR control-0. +;137 IN PTR control-1. +;136 IN PTR control-2. +;135 IN PTR compute-0. +;134 IN PTR compute-1. 138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. 137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. 136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. From 34689a613611393ec8684a6442022f99d92a6c14 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 6 Aug 2021 08:54:44 -0500 Subject: [PATCH 230/885] edited distribution.rev reverse lookup db to show only FQDN --- files/distribution.rev | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/files/distribution.rev b/files/distribution.rev index 127ec07b..3365a5f5 100644 --- a/files/distribution.rev +++ b/files/distribution.rev @@ -14,12 +14,12 @@ bastion IN A 9.60.87.139 139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. ;PTR Record IP address to Hostname -133 IN PTR bootstrap. -138 IN PTR control-0. -137 IN PTR control-1. -136 IN PTR control-2. -135 IN PTR compute-0. -134 IN PTR compute-1. +;133 IN PTR bootstrap. +;138 IN PTR control-0. +;137 IN PTR control-1. +;136 IN PTR control-2. +;135 IN PTR compute-0. +;134 IN PTR compute-1. 138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. 137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. 136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. From 33a7d7aaea1b95373f8d66e9787647d5fe0ea27e Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 6 Aug 2021 09:14:03 -0500 Subject: [PATCH 231/885] remove dns db duplicates --- files/dns-text-rev.txt | 21 --------------------- files/dns-text.txt | 41 ----------------------------------------- 2 files changed, 62 deletions(-) delete mode 100644 files/dns-text-rev.txt delete mode 100644 files/dns-text.txt diff --git a/files/dns-text-rev.txt b/files/dns-text-rev.txt deleted file mode 100644 index 7202af7c..00000000 --- a/files/dns-text-rev.txt +++ /dev/null @@ -1,21 +0,0 @@ -$TTL 86400 -@ IN SOA bastion-1.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com. ( - 2020011800 ;Serial - 3600 ;Refresh - 1800 ;Retry - 604800 ;Expire - 86400 ;Minimum TTL -) -;Name Server Information -@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. -bastion IN A 9.60.87.139 - -;Reverse lookup for Name Server -139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. - -;PTR Record IP address to Hostname -138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. -137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. -136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. -135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. -134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. diff --git a/files/dns-text.txt b/files/dns-text.txt deleted file mode 100644 index 384e7dfa..00000000 --- a/files/dns-text.txt +++ /dev/null @@ -1,41 +0,0 @@ -$TTL 86400 -@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( - 2020021821 ;Serial - 3600 ;Refresh - 1800 ;Retry - 604800 ;Expire - 86400 ;Minimum TTL -) - -;Name Server Information -@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. - -;IP Address for Name Server -bastion IN A 9.60.87.139 - -;entry for bootstrap host. -bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 - -;entry of your load balancer -haproxy IN A 9.60.87.139 - -;entries for the master nodes -control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 -control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 -control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 - -;entry for the bastion host -bastion IN A 9.60.87.139 - -;entries for the worker nodes -compute-0.ocp.home.local IN A 9.60.87.135 -compute-1.ocp.home.local IN A 9.60.87.134 - -;The api identifies the IP of your load balancer. -api.ocp.home.local IN CNAME haproxy.ocp.home.local. -api-int.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. - -;The wildcard also identifies the load balancer. -*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. - -;EOF \ No newline at end of file From 9a14335dfe8ce827c40177c30c18696f3df590a1 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Fri, 6 Aug 2021 09:14:03 -0500 Subject: [PATCH 232/885] remove dns db duplicates --- files/dns-text-rev.txt | 21 --------------------- files/dns-text.txt | 41 ----------------------------------------- 2 files changed, 62 deletions(-) delete mode 100644 files/dns-text-rev.txt delete mode 100644 files/dns-text.txt diff --git a/files/dns-text-rev.txt b/files/dns-text-rev.txt deleted file mode 100644 index 7202af7c..00000000 --- a/files/dns-text-rev.txt +++ /dev/null @@ -1,21 +0,0 @@ -$TTL 86400 -@ IN SOA bastion-1.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com. ( - 2020011800 ;Serial - 3600 ;Refresh - 1800 ;Retry - 604800 ;Expire - 86400 ;Minimum TTL -) -;Name Server Information -@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. -bastion IN A 9.60.87.139 - -;Reverse lookup for Name Server -139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. - -;PTR Record IP address to Hostname -138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. -137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. -136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. -135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. -134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. diff --git a/files/dns-text.txt b/files/dns-text.txt deleted file mode 100644 index 384e7dfa..00000000 --- a/files/dns-text.txt +++ /dev/null @@ -1,41 +0,0 @@ -$TTL 86400 -@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( - 2020021821 ;Serial - 3600 ;Refresh - 1800 ;Retry - 604800 ;Expire - 86400 ;Minimum TTL -) - -;Name Server Information -@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. - -;IP Address for Name Server -bastion IN A 9.60.87.139 - -;entry for bootstrap host. -bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.139 - -;entry of your load balancer -haproxy IN A 9.60.87.139 - -;entries for the master nodes -control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 -control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 -control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 - -;entry for the bastion host -bastion IN A 9.60.87.139 - -;entries for the worker nodes -compute-0.ocp.home.local IN A 9.60.87.135 -compute-1.ocp.home.local IN A 9.60.87.134 - -;The api identifies the IP of your load balancer. -api.ocp.home.local IN CNAME haproxy.ocp.home.local. -api-int.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. - -;The wildcard also identifies the load balancer. -*.apps.distribution.ocpz.wsclab.endicott.ibm.com IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. - -;EOF \ No newline at end of file From bae56d32f6e32b175dd14945d3f12a6d1f524851 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 6 Aug 2021 15:38:16 -0400 Subject: [PATCH 233/885] deleted test playbooks --- create_nodes.yaml | 61 ----------------------------------------------- 1 file changed, 61 deletions(-) delete mode 100644 create_nodes.yaml diff --git a/create_nodes.yaml b/create_nodes.yaml deleted file mode 100644 index 9f3d9389..00000000 --- a/create_nodes.yaml +++ /dev/null @@ -1,61 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: virtualize control-0 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G - - - name: virtualize control-1 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G - - - name: virtualize control-2 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G - - - name: virtualize compute-0 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G - - - name: virtualize compute-1 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - - - name: install CoreOS on control-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on control-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on control-2 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on compute-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on compute-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images From 0b3145e66d2e28dd9d199cea993e431df0cc36ba Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 6 Aug 2021 15:38:16 -0400 Subject: [PATCH 234/885] deleted test playbooks --- create_nodes.yaml | 61 ----------------------------------------------- 1 file changed, 61 deletions(-) delete mode 100644 create_nodes.yaml diff --git a/create_nodes.yaml b/create_nodes.yaml deleted file mode 100644 index 9f3d9389..00000000 --- a/create_nodes.yaml +++ /dev/null @@ -1,61 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: virtualize control-0 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G - - - name: virtualize control-1 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G - - - name: virtualize control-2 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G - - - name: virtualize compute-0 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G - - - name: virtualize compute-1 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - - - name: install CoreOS on control-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on control-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on control-2 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on compute-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on compute-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images From 09a2eae5af3c540d68da0b2bd2562db3ed0de8e2 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 6 Aug 2021 17:44:35 -0400 Subject: [PATCH 235/885] Implemented proper use of roles. Lots of changes toward that end. --- files/macvtap.xml.j2 | 6 + main.yaml | 22 +++ roles/bastion_server/tasks/bastion.yaml | 213 --------------------- roles/bastion_server/tasks/get-ocp.yaml | 2 +- roles/bastion_server/tasks/main.yaml | 205 ++++++++++++++++++++ roles/create_bastion/tasks/main.yaml | 7 + roles/create_bootstrap/tasks/main.yaml | 9 + roles/create_compute_nodes/tasks/main.yaml | 21 ++ roles/create_control_nodes/tasks/main.yaml | 37 ++++ roles/get-ocp/files/install-config.yaml | 26 +++ roles/get-ocp/tasks/main.yaml | 87 +++++++++ roles/haproxy/files/haproxy.cfg | 58 ++++++ roles/haproxy/tasks/main.yaml | 22 +++ roles/httpd/tasks/main.yaml | 92 +++++++++ roles/kvm_host/tasks/main.yaml | 17 ++ roles/macvtap/files/macvtap.xml.j2 | 6 + roles/macvtap/tasks/main.yaml | 19 ++ roles/prep_kvm_guests/tasks/main.yaml | 10 + roles/workstations | 1 + 19 files changed, 646 insertions(+), 214 deletions(-) create mode 100644 files/macvtap.xml.j2 create mode 100644 main.yaml delete mode 100644 roles/bastion_server/tasks/bastion.yaml create mode 100644 roles/bastion_server/tasks/main.yaml create mode 100644 roles/create_bastion/tasks/main.yaml create mode 100644 roles/create_bootstrap/tasks/main.yaml create mode 100644 roles/create_compute_nodes/tasks/main.yaml create mode 100644 roles/create_control_nodes/tasks/main.yaml create mode 100644 roles/get-ocp/files/install-config.yaml create mode 100644 roles/get-ocp/tasks/main.yaml create mode 100644 roles/haproxy/files/haproxy.cfg create mode 100644 roles/haproxy/tasks/main.yaml create mode 100644 roles/httpd/tasks/main.yaml create mode 100644 roles/kvm_host/tasks/main.yaml create mode 100644 roles/macvtap/files/macvtap.xml.j2 create mode 100644 roles/macvtap/tasks/main.yaml create mode 100644 roles/prep_kvm_guests/tasks/main.yaml create mode 100644 roles/workstations diff --git a/files/macvtap.xml.j2 b/files/macvtap.xml.j2 new file mode 100644 index 00000000..388477ea --- /dev/null +++ b/files/macvtap.xml.j2 @@ -0,0 +1,6 @@ + + macvtap-net + + + + diff --git a/main.yaml b/main.yaml new file mode 100644 index 00000000..c3db35ea --- /dev/null +++ b/main.yaml @@ -0,0 +1,22 @@ +--- + +- hosts: kvm_host + become: true + roles: + - macvtap + - create_bastion + +- hosts: bastion_server + become: true + roles: + - haproxy + - httpd + - get-ocp + +- hosts: kvm_host + become: true + roles: + - prep_kvm_guests + - create_bootstrap + - create_control_nodes + - create_compute_nodes diff --git a/roles/bastion_server/tasks/bastion.yaml b/roles/bastion_server/tasks/bastion.yaml deleted file mode 100644 index 85f4ede6..00000000 --- a/roles/bastion_server/tasks/bastion.yaml +++ /dev/null @@ -1,213 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - -## from start_bastion_install.yaml - - - name: start bastion install process - community.libvirt.virt: - name: bastion - memory: 4096 - vcpus: 2 - disk size: 30 - cdrom: /var/lib/libvirt/images/rhel83.iso - accelerate: yes - import: yes - network: network=macvtap-net - extra-args: ""ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion.distribution.ocpz.wsclab.endicott.ibm> - location: /rhcos-install - qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,re> - noautoconsole: yes - -## from install_haproxy.yaml - -##- name: install haproxy -## dnf: -## - haproxy - -## required plugin: ansible-galaxy collection install community.general - -##- name: install haproxy -## dnf: -## - haproxy - - - name: move haproxy config file to bastion - copy: - src: haproxy.cfg - dest: /etc/haproxy/haproxy.cfg.j2 - - - name: Start haproxy - systemd: - state: started - name: haproxy - -## from http_setup.yaml - -##- name: update repository index -## dnf: -## update_cache: yes - -##- name: install httpd -## dnf: -## name: httpd -## state: latest - - - name: Ensure the default Apache port is 8080 - replace: - path: /etc/httpd/conf/httpd.conf - regexp: '^Listen 80 ' - replace: 'Listen 8080' - backup: yes - - - name: Ensure the SSL default port is 4443 - replace: - path: /etc/httpd/conf.d/ssl.conf - regexp: '^Listen 443 https' - replace: 'Listen 4443 https' - backup: yes - - - name: restart httpd to reflect changes to port - service: - name: httpd - state: restarted - - - name: Allow all access to tcp port 8080 - community.general.ufw: - rule: allow - port: '8080' - proto: tcp - - - name: Allow all access to tcp port 80 - community.general.ufw: - rule: allow - port: '80' - proto: tcp - - - name: Allow all access to tcp port 443 - community.general.ufw: - rule: allow - port: '443' - proto: tcp - - - name: Allow all access to tcp port 4443 - community.general.ufw: - rule: allow - port: '4443' - proto: tcp - - - name: create directory bin for mirrors - file: - path: /var/www/html/bin - state: directory - mode: '0755' - - - name: create directory bootstrap for mirrors - file: - path: /var/www/html/bootstrap - state: directory - mode: '0755' - -##link got cut off - - name: get mirrors 1 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - -##link got cut off - - name: get mirrors 2 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> - dest: /var/www/html/bin - remote_src: yes - mode: '0755' -##link got cut off - - name: get mirrors 3 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - - - name: check to make sure httpd is started - service: - name: httpd - state: started - - - name: check httpd status - service: - state: started - name: httpd - - - name: change mirror 1 file name - command: mv /var/www/html/bin/rhcos-live-kernel-s390x /var/www/html/bin/rhcos-kernel - - - name: change mirror 2 file name - command: mv /var/www/html/bin/rhcos-live-initramfs.s390x.img /var/www/html/bin/rhcos-initramfs.img - - - name: change mirror 3 file name - command: mv /var/www/html/bin/rhcos-live-rootfs.s390x.img /var/www/html/bin/rhcos-rootfs.img - -##from get-ocp.yaml - - - name: create OCP download landing directory - file: - path: /ocpinst/ - state: directory - - - name: Unzip OCP Client - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz - dest: /ocpinst/ - remote_src: yes - - - name: Unzip OCP Installer - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz - dest: /ocpinst/ - remote_src: yes - - - name: Copy kubectl file - ansible.builtin.copy: - src: /ocpinst/kubectl - dest: /usr/local/bin/kubectl - remote_src: yes - owner: root - group: root - mode: '0755' - - - name: Copy oc file - ansible.builtin.copy: - src: /ocpinst/oc - dest: /usr/local/bin/oc - remote_src: yes - owner: root - group: root - mode: '0755' - - - name: Copy openshift-install file - ansible.builtin.copy: - src: /ocpinst/openshift-install - dest: /usr/local/bin/openshift-install - remote_src: yes - owner: root - group: root - mode: '0755' - -## Rough draft task here. To be completed once we start using templates. Use ansible.builtin.template to fill install_config.yaml with correct variables. - -## - name: create install-config.yaml -## file: -## path: "~/files/install-config.yaml" -## state: touch -## -## - name: Fill contents of install-config.yaml file -## ansible.builtin.template: -## src: install-config.yaml -## dest: "~/files/install-config.yaml" -## remote_src: yes - -## also still needs the ignition files task and the prepare the KVM OCP guests tasks diff --git a/roles/bastion_server/tasks/get-ocp.yaml b/roles/bastion_server/tasks/get-ocp.yaml index ec520cb9..5b42a9e5 100644 --- a/roles/bastion_server/tasks/get-ocp.yaml +++ b/roles/bastion_server/tasks/get-ocp.yaml @@ -84,7 +84,7 @@ dest: /var/www/html/ignition remote_src: yes - - name: Copy worker Ignition file to web server + - name: Copy compute Ignition file to web server copy: src: /ocpinst/worker.ign dest: /var/www/html/ignition diff --git a/roles/bastion_server/tasks/main.yaml b/roles/bastion_server/tasks/main.yaml new file mode 100644 index 00000000..cb328f74 --- /dev/null +++ b/roles/bastion_server/tasks/main.yaml @@ -0,0 +1,205 @@ +#This is the main task book for the bastion server to set up the load balancer, http server, and download OCP install and ignition files + +# required plugin: ansible-galaxy collection install community.general + +#- name: install haproxy +# dnf: +# - haproxy + +- name: move haproxy config file to bastion + copy: + src: haproxy.cfg + dest: /etc/haproxy/haproxy.cfg + force: yes + backup: yes + +- name: enable haproxy + systemd: + state: enabled + named: haproxy + +- name: Start haproxy + systemd: + state: restarted + name: haproxy + +## - name: update repository index +## dnf: +## update_cache: yes + +## - name: install httpd +## dnf: +## name: httpd +## state: latest + +- name: Ensure the default Apache port is 8080 + replace: + path: /etc/httpd/conf/httpd.conf + regexp: '^Listen 80' + replace: 'Listen 8080' + backup: yes + +- name: Ensure the SSL default port is 4443 + replace: + path: /etc/httpd/conf.d/ssl.conf + regexp: '^Listen 443 https' + replace: 'Listen 4443 https' + backup: yes + +- name: restart httpd to reflect changes to port + service: + name: httpd + state: restarted + +# - name: Allow all access to tcp port 8080 +# community.general.ufw: +# rule: allow +# port: '8080' +# proto: tcp +# +# - name: Allow all access to tcp port 80 +# community.general.ufw: +# rule: allow +# port: '80' +# proto: tcp +# +# - name: Allow all access to tcp port 443 +# community.general.ufw: +# rule: allow +# port: '443' +# proto: tcp +# +# - name: Allow all access to tcp port 4443 +# community.general.ufw: +# rule: allow +# port: '4443' +# proto: tcp + +- name: create directory bin for mirrors + file: + path: /var/www/html/bin + state: directory + mode: '0755' + +- name: create directory bootstrap for mirrors + file: + path: /var/www/html/bootstrap + state: directory + mode: '0755' + +- name: get mirrors 1 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + +- name: get mirrors 2 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + +- name: get mirrors 3 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + +- name: check to make sure httpd is started + service: + name: httpd + state: started + +- name: check httpd status + service: + state: started + name: httpd + +- name: create OCP download landing directory + file: + path: /ocpinst/ + state: directory + +- name: Unzip OCP Client + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + +- name: Unzip OCP Installer + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + +- name: Copy kubectl file + ansible.builtin.copy: + src: /ocpinst/kubectl + dest: /usr/local/bin/kubectl + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Copy oc file + ansible.builtin.copy: + src: /ocpinst/oc + dest: /usr/local/bin/oc + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Copy openshift-install file + ansible.builtin.copy: + src: /ocpinst/openshift-install + dest: /usr/local/bin/openshift-install + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Copy install-config.yaml to ocp install directory + copy: + src: install-config.yaml + dest: /ocpinst/install-config.yaml + +- name: Create Manifests + command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ + become: yes + +- name: Set mastersSchedulable parameter to False + replace: + path: /ocpinst/manifests/cluster-scheduler-02-config.yml + regexp: ': true' + replace: ': false' + +- name: Create Ignition files + command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ + become: yes + +- name: create Ignition directory on webserver + file: + path: /var/www/html/ignition + state: directory + +- name: Copy bootstrap Ignition file to web server + copy: + src: /ocpinst/bootstrap.ign + dest: /var/www/html/ignition + remote_src: yes + +- name: Copy control plane Ignition file to web server + copy: + src: /ocpinst/master.ign + dest: /var/www/html/ignition + remote_src: yes + +- name: Copy worker Ignition file to web server + copy: + src: /ocpinst/worker.ign + dest: /var/www/html/ignition + remote_src: yes diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml new file mode 100644 index 00000000..ac0fa7f3 --- /dev/null +++ b/roles/create_bastion/tasks/main.yaml @@ -0,0 +1,7 @@ +--- + +- name: virtualize bastion server + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G + +- name: start bastion install + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml new file mode 100644 index 00000000..200fc052 --- /dev/null +++ b/roles/create_bootstrap/tasks/main.yaml @@ -0,0 +1,9 @@ +--- + +- name: virtualize bootstrap + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G + +- name: boot bootstrap + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml new file mode 100644 index 00000000..ec726a66 --- /dev/null +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -0,0 +1,21 @@ +--- + +- name: virtualize compute-0 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G + +- name: virtualize compute-1 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G + +- name: install CoreOS on compute-0 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + +- name: pause 8 minutes + pause: + minutes: 8 + +- name: install CoreOS on compute-1 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml new file mode 100644 index 00000000..190c3c05 --- /dev/null +++ b/roles/create_control_nodes/tasks/main.yaml @@ -0,0 +1,37 @@ +--- + +- name: virtualize control-0 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G + +- name: virtualize control-1 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G + +- name: virtualize control-2 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G + +- name: install CoreOS on control-0 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + +- name: pause 8 minutes + pause: + minutes: 8 + +- name: install CoreOS on control-1 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + +- name: pause 8 minutes + pause: + minutes: 8 + +- name: install CoreOS on control-2 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + +- name: pause 8 minutes + pause: + minutes: 8 diff --git a/roles/get-ocp/files/install-config.yaml b/roles/get-ocp/files/install-config.yaml new file mode 100644 index 00000000..b761fc72 --- /dev/null +++ b/roles/get-ocp/files/install-config.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +baseDomain: ocpz.wsclab.endicott.ibm.com +compute: +- hyperthreading: Enabled + name: worker + replicas: 0 + architecture : s390x +controlPlane: + hyperthreading: Enabled + name: master + replicas: 3 + architecture : s390x +metadata: + name: distribution +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 +platform: + none: {} +fips: false +pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDudjhbUkA7ekvuzZe7WpkkF2KOoLGwWU05Ch6Ty0OWDW0vdbtrG7TIgy8FYOvBnArnIJcy1AP6tpH6BcbmHRIosVFbZ3GrLkKEOroBD+8+6qGzN+fIXBzss20iRvMvDdlDaVJ1BqmTvNnkbeWdgCg1MbICQ1OYOExm+xgvGWHOjoJkUzkw3oBnAnpZHHTkvM2dY0SNtzV9i5FcSvdBM2kindvBpvDZ9PjTGkcFMnbVRpBXUV+axNg4yuX0YWFDWOFlQ4DjvasdN6jMQ2MrWBYtYF1RAklsOhSXTPKAYaxZZsfeEBnMVUFshaDYy7PlbCXcUEVEFEkMlTAbe//bBXx5l+2fVFtOUyhfws28bLg+rlJLivINoLW4EB+ERViYBTCEx5njl9mK9EcPpCJlLTR82kr8vEwE/sJ9ro3dH/aEUQcTkVYqlOCw2lsFZpkZtthArknZiHk91OtWgXj4GsAjihehfBvEL/GbEuO0HTzvl+QiTnC+mL4KcObl31K9LdlAbKA53l6G68py0u7plUervFN8H7rIFbv4ndUTAo2rODty/QoYH8jBu460LLgLyaRfaDFrGitnAdSiCS/8dEr8iAVxj5X5sgcTQlsJYyvMv61YCzItmlipDpbf+M/mJyJuxyuYWqudItOIWchFktstcpp3dx1iM/pr0o4HitpeQ== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml new file mode 100644 index 00000000..77c029be --- /dev/null +++ b/roles/get-ocp/tasks/main.yaml @@ -0,0 +1,87 @@ +--- + +- name: create OCP download landing directory + file: + path: /ocpinst/ + state: directory + +- name: Unzip OCP Client + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + +- name: Unzip OCP Installer + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + +- name: Copy kubectl file + ansible.builtin.copy: + src: /ocpinst/kubectl + dest: /usr/local/bin/kubectl + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Copy oc file + ansible.builtin.copy: + src: /ocpinst/oc + dest: /usr/local/bin/oc + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Copy openshift-install file + ansible.builtin.copy: + src: /ocpinst/openshift-install + dest: /usr/local/bin/openshift-install + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Copy install-config.yaml to ocp install directory + copy: + src: install-config.yaml + dest: /ocpinst/install-config.yaml + +- name: Create Manifests + command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ + become: yes + +- name: Set mastersSchedulable parameter to False + replace: + path: /ocpinst/manifests/cluster-scheduler-02-config.yml + regexp: ': true' + replace: ': false' + +- name: Create Ignition files + command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ + become: yes + +- name: create Ignition directory on webserver + file: + path: /var/www/html/ignition + state: directory + +- name: Copy bootstrap Ignition file to web server + copy: + src: /ocpinst/bootstrap.ign + dest: /var/www/html/ignition + remote_src: yes + +- name: Copy control plane Ignition file to web server + copy: + src: /ocpinst/master.ign + dest: /var/www/html/ignition + remote_src: yes + +- name: Copy worker Ignition file to web server + copy: + src: /ocpinst/worker.ign + dest: /var/www/html/ignition + remote_src: yes diff --git a/roles/haproxy/files/haproxy.cfg b/roles/haproxy/files/haproxy.cfg new file mode 100644 index 00000000..f7b1f7f0 --- /dev/null +++ b/roles/haproxy/files/haproxy.cfg @@ -0,0 +1,58 @@ +global + log 127.0.0.1 local2 + pidfile /var/run/haproxy.pid + maxconn 4000 + daemon +defaults + mode http + log global + option dontlognull + option http-server-close + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 +frontend stats + bind *:1936 + mode http + log global + maxconn 10 + stats enable + stats hide-version + stats refresh 30s + stats show-node + stats show-desc Stats for distribution cluster + stats auth admin:distribution + stats uri /stats +listen api-server-6443 + bind *:6443 + mode tcp + server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s backup + server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s + server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s + server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s +listen machine-config-server-22623 + bind *:22623 + mode tcp + server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup + server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s +listen ingress-router-443 + bind *:443 + mode tcp + balance source + server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s +listen ingress-router-80 + bind *:80 + mode tcp + balance source + server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml new file mode 100644 index 00000000..589622f5 --- /dev/null +++ b/roles/haproxy/tasks/main.yaml @@ -0,0 +1,22 @@ +--- + +#- name: install haproxy +# dnf: +# - haproxy + +- name: move haproxy config file to bastion + copy: + src: haproxy.cfg + dest: /etc/haproxy/haproxy.cfg + force: yes + backup: yes + +- name: enable haproxy + systemd: + state: enabled + named: haproxy + +- name: Start haproxy + systemd: + state: restarted + name: haproxy diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml new file mode 100644 index 00000000..20948d25 --- /dev/null +++ b/roles/httpd/tasks/main.yaml @@ -0,0 +1,92 @@ +--- + +#- name: install httpd +# dnf: +# name: httpd +# state: latest + +- name: Ensure the default Apache port is 8080 + replace: + path: /etc/httpd/conf/httpd.conf + regexp: '^Listen 80' + replace: 'Listen 8080' + backup: yes + +- name: Ensure the SSL default port is 4443 + replace: + path: /etc/httpd/conf.d/ssl.conf + regexp: '^Listen 443 https' + replace: 'Listen 4443 https' + backup: yes + +- name: restart httpd to reflect changes to port + service: + name: httpd + state: restarted + +#- name: Allow all access to tcp port 8080 +# community.general.ufw: +# rule: allow +# port: '8080' +# proto: tcp +# +#- name: Allow all access to tcp port 80 +# community.general.ufw: +# rule: allow +# port: '80' +# proto: tcp +# +#- name: Allow all access to tcp port 443 +# community.general.ufw: +# rule: allow +# port: '443' +# proto: tcp +# +#- name: Allow all access to tcp port 4443 +# community.general.ufw: +# rule: allow +# port: '4443' +# proto: tcp + +- name: create directory bin for mirrors + file: + path: /var/www/html/bin + state: directory + mode: '0755' + +- name: create directory bootstrap for mirrors + file: + path: /var/www/html/bootstrap + state: directory + mode: '0755' + +- name: get mirrors 1 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + +- name: get mirrors 2 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + +- name: get mirrors 3 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + +- name: check to make sure httpd is started + service: + name: httpd + state: started + +- name: check httpd status + service: + state: started + name: httpd diff --git a/roles/kvm_host/tasks/main.yaml b/roles/kvm_host/tasks/main.yaml new file mode 100644 index 00000000..5ba167c8 --- /dev/null +++ b/roles/kvm_host/tasks/main.yaml @@ -0,0 +1,17 @@ +- name: Set up macvtap bridge + community.libvirt.virt_net: + command: define + name: macvtap-net + autostart: true + xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" + +- name: Start macvtap-net + community.libvirt.virt_net: + autostart: yes + command: start + name: macvtap-net + +- name: Set autostart for macvtap-net + community.libvirt.virt_net: + autostart: yes + name: macvtap-net diff --git a/roles/macvtap/files/macvtap.xml.j2 b/roles/macvtap/files/macvtap.xml.j2 new file mode 100644 index 00000000..388477ea --- /dev/null +++ b/roles/macvtap/files/macvtap.xml.j2 @@ -0,0 +1,6 @@ + + macvtap-net + + + + diff --git a/roles/macvtap/tasks/main.yaml b/roles/macvtap/tasks/main.yaml new file mode 100644 index 00000000..af7daa8b --- /dev/null +++ b/roles/macvtap/tasks/main.yaml @@ -0,0 +1,19 @@ +--- + +- name: Set up macvtap bridge + community.libvirt.virt_net: + command: define + name: macvtap-net + autostart: true + xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" + +- name: Start macvtap-net + community.libvirt.virt_net: + autostart: yes + command: start + name: macvtap-net + +- name: Set autostart for macvtap-net + community.libvirt.virt_net: + autostart: yes + name: macvtap-net diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml new file mode 100644 index 00000000..0c2afbde --- /dev/null +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -0,0 +1,10 @@ +--- + +- name: get rhcos qcow2 files + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz + dest: /var/lib/libvirt/images/ + + +- name: Unzip OCP dependencies + command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz diff --git a/roles/workstations b/roles/workstations new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/roles/workstations @@ -0,0 +1 @@ +--- From b00ac32912c06b41276ea95c14bb497751ab9124 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 6 Aug 2021 17:44:35 -0400 Subject: [PATCH 236/885] Implemented proper use of roles. Lots of changes toward that end. --- files/macvtap.xml.j2 | 6 + main.yaml | 22 +++ roles/bastion_server/tasks/bastion.yaml | 213 --------------------- roles/bastion_server/tasks/get-ocp.yaml | 2 +- roles/bastion_server/tasks/main.yaml | 205 ++++++++++++++++++++ roles/create_bastion/tasks/main.yaml | 7 + roles/create_bootstrap/tasks/main.yaml | 9 + roles/create_compute_nodes/tasks/main.yaml | 21 ++ roles/create_control_nodes/tasks/main.yaml | 37 ++++ roles/get-ocp/files/install-config.yaml | 26 +++ roles/get-ocp/tasks/main.yaml | 87 +++++++++ roles/haproxy/files/haproxy.cfg | 58 ++++++ roles/haproxy/tasks/main.yaml | 22 +++ roles/httpd/tasks/main.yaml | 92 +++++++++ roles/kvm_host/tasks/main.yaml | 17 ++ roles/macvtap/files/macvtap.xml.j2 | 6 + roles/macvtap/tasks/main.yaml | 19 ++ roles/prep_kvm_guests/tasks/main.yaml | 10 + roles/workstations | 1 + 19 files changed, 646 insertions(+), 214 deletions(-) create mode 100644 files/macvtap.xml.j2 create mode 100644 main.yaml delete mode 100644 roles/bastion_server/tasks/bastion.yaml create mode 100644 roles/bastion_server/tasks/main.yaml create mode 100644 roles/create_bastion/tasks/main.yaml create mode 100644 roles/create_bootstrap/tasks/main.yaml create mode 100644 roles/create_compute_nodes/tasks/main.yaml create mode 100644 roles/create_control_nodes/tasks/main.yaml create mode 100644 roles/get-ocp/files/install-config.yaml create mode 100644 roles/get-ocp/tasks/main.yaml create mode 100644 roles/haproxy/files/haproxy.cfg create mode 100644 roles/haproxy/tasks/main.yaml create mode 100644 roles/httpd/tasks/main.yaml create mode 100644 roles/kvm_host/tasks/main.yaml create mode 100644 roles/macvtap/files/macvtap.xml.j2 create mode 100644 roles/macvtap/tasks/main.yaml create mode 100644 roles/prep_kvm_guests/tasks/main.yaml create mode 100644 roles/workstations diff --git a/files/macvtap.xml.j2 b/files/macvtap.xml.j2 new file mode 100644 index 00000000..388477ea --- /dev/null +++ b/files/macvtap.xml.j2 @@ -0,0 +1,6 @@ + + macvtap-net + + + + diff --git a/main.yaml b/main.yaml new file mode 100644 index 00000000..c3db35ea --- /dev/null +++ b/main.yaml @@ -0,0 +1,22 @@ +--- + +- hosts: kvm_host + become: true + roles: + - macvtap + - create_bastion + +- hosts: bastion_server + become: true + roles: + - haproxy + - httpd + - get-ocp + +- hosts: kvm_host + become: true + roles: + - prep_kvm_guests + - create_bootstrap + - create_control_nodes + - create_compute_nodes diff --git a/roles/bastion_server/tasks/bastion.yaml b/roles/bastion_server/tasks/bastion.yaml deleted file mode 100644 index 85f4ede6..00000000 --- a/roles/bastion_server/tasks/bastion.yaml +++ /dev/null @@ -1,213 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - -## from start_bastion_install.yaml - - - name: start bastion install process - community.libvirt.virt: - name: bastion - memory: 4096 - vcpus: 2 - disk size: 30 - cdrom: /var/lib/libvirt/images/rhel83.iso - accelerate: yes - import: yes - network: network=macvtap-net - extra-args: ""ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion.distribution.ocpz.wsclab.endicott.ibm> - location: /rhcos-install - qemu-commandline: "-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,re> - noautoconsole: yes - -## from install_haproxy.yaml - -##- name: install haproxy -## dnf: -## - haproxy - -## required plugin: ansible-galaxy collection install community.general - -##- name: install haproxy -## dnf: -## - haproxy - - - name: move haproxy config file to bastion - copy: - src: haproxy.cfg - dest: /etc/haproxy/haproxy.cfg.j2 - - - name: Start haproxy - systemd: - state: started - name: haproxy - -## from http_setup.yaml - -##- name: update repository index -## dnf: -## update_cache: yes - -##- name: install httpd -## dnf: -## name: httpd -## state: latest - - - name: Ensure the default Apache port is 8080 - replace: - path: /etc/httpd/conf/httpd.conf - regexp: '^Listen 80 ' - replace: 'Listen 8080' - backup: yes - - - name: Ensure the SSL default port is 4443 - replace: - path: /etc/httpd/conf.d/ssl.conf - regexp: '^Listen 443 https' - replace: 'Listen 4443 https' - backup: yes - - - name: restart httpd to reflect changes to port - service: - name: httpd - state: restarted - - - name: Allow all access to tcp port 8080 - community.general.ufw: - rule: allow - port: '8080' - proto: tcp - - - name: Allow all access to tcp port 80 - community.general.ufw: - rule: allow - port: '80' - proto: tcp - - - name: Allow all access to tcp port 443 - community.general.ufw: - rule: allow - port: '443' - proto: tcp - - - name: Allow all access to tcp port 4443 - community.general.ufw: - rule: allow - port: '4443' - proto: tcp - - - name: create directory bin for mirrors - file: - path: /var/www/html/bin - state: directory - mode: '0755' - - - name: create directory bootstrap for mirrors - file: - path: /var/www/html/bootstrap - state: directory - mode: '0755' - -##link got cut off - - name: get mirrors 1 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - -##link got cut off - - name: get mirrors 2 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> - dest: /var/www/html/bin - remote_src: yes - mode: '0755' -##link got cut off - - name: get mirrors 3 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-liv> - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - - - name: check to make sure httpd is started - service: - name: httpd - state: started - - - name: check httpd status - service: - state: started - name: httpd - - - name: change mirror 1 file name - command: mv /var/www/html/bin/rhcos-live-kernel-s390x /var/www/html/bin/rhcos-kernel - - - name: change mirror 2 file name - command: mv /var/www/html/bin/rhcos-live-initramfs.s390x.img /var/www/html/bin/rhcos-initramfs.img - - - name: change mirror 3 file name - command: mv /var/www/html/bin/rhcos-live-rootfs.s390x.img /var/www/html/bin/rhcos-rootfs.img - -##from get-ocp.yaml - - - name: create OCP download landing directory - file: - path: /ocpinst/ - state: directory - - - name: Unzip OCP Client - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz - dest: /ocpinst/ - remote_src: yes - - - name: Unzip OCP Installer - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz - dest: /ocpinst/ - remote_src: yes - - - name: Copy kubectl file - ansible.builtin.copy: - src: /ocpinst/kubectl - dest: /usr/local/bin/kubectl - remote_src: yes - owner: root - group: root - mode: '0755' - - - name: Copy oc file - ansible.builtin.copy: - src: /ocpinst/oc - dest: /usr/local/bin/oc - remote_src: yes - owner: root - group: root - mode: '0755' - - - name: Copy openshift-install file - ansible.builtin.copy: - src: /ocpinst/openshift-install - dest: /usr/local/bin/openshift-install - remote_src: yes - owner: root - group: root - mode: '0755' - -## Rough draft task here. To be completed once we start using templates. Use ansible.builtin.template to fill install_config.yaml with correct variables. - -## - name: create install-config.yaml -## file: -## path: "~/files/install-config.yaml" -## state: touch -## -## - name: Fill contents of install-config.yaml file -## ansible.builtin.template: -## src: install-config.yaml -## dest: "~/files/install-config.yaml" -## remote_src: yes - -## also still needs the ignition files task and the prepare the KVM OCP guests tasks diff --git a/roles/bastion_server/tasks/get-ocp.yaml b/roles/bastion_server/tasks/get-ocp.yaml index ec520cb9..5b42a9e5 100644 --- a/roles/bastion_server/tasks/get-ocp.yaml +++ b/roles/bastion_server/tasks/get-ocp.yaml @@ -84,7 +84,7 @@ dest: /var/www/html/ignition remote_src: yes - - name: Copy worker Ignition file to web server + - name: Copy compute Ignition file to web server copy: src: /ocpinst/worker.ign dest: /var/www/html/ignition diff --git a/roles/bastion_server/tasks/main.yaml b/roles/bastion_server/tasks/main.yaml new file mode 100644 index 00000000..cb328f74 --- /dev/null +++ b/roles/bastion_server/tasks/main.yaml @@ -0,0 +1,205 @@ +#This is the main task book for the bastion server to set up the load balancer, http server, and download OCP install and ignition files + +# required plugin: ansible-galaxy collection install community.general + +#- name: install haproxy +# dnf: +# - haproxy + +- name: move haproxy config file to bastion + copy: + src: haproxy.cfg + dest: /etc/haproxy/haproxy.cfg + force: yes + backup: yes + +- name: enable haproxy + systemd: + state: enabled + named: haproxy + +- name: Start haproxy + systemd: + state: restarted + name: haproxy + +## - name: update repository index +## dnf: +## update_cache: yes + +## - name: install httpd +## dnf: +## name: httpd +## state: latest + +- name: Ensure the default Apache port is 8080 + replace: + path: /etc/httpd/conf/httpd.conf + regexp: '^Listen 80' + replace: 'Listen 8080' + backup: yes + +- name: Ensure the SSL default port is 4443 + replace: + path: /etc/httpd/conf.d/ssl.conf + regexp: '^Listen 443 https' + replace: 'Listen 4443 https' + backup: yes + +- name: restart httpd to reflect changes to port + service: + name: httpd + state: restarted + +# - name: Allow all access to tcp port 8080 +# community.general.ufw: +# rule: allow +# port: '8080' +# proto: tcp +# +# - name: Allow all access to tcp port 80 +# community.general.ufw: +# rule: allow +# port: '80' +# proto: tcp +# +# - name: Allow all access to tcp port 443 +# community.general.ufw: +# rule: allow +# port: '443' +# proto: tcp +# +# - name: Allow all access to tcp port 4443 +# community.general.ufw: +# rule: allow +# port: '4443' +# proto: tcp + +- name: create directory bin for mirrors + file: + path: /var/www/html/bin + state: directory + mode: '0755' + +- name: create directory bootstrap for mirrors + file: + path: /var/www/html/bootstrap + state: directory + mode: '0755' + +- name: get mirrors 1 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + +- name: get mirrors 2 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + +- name: get mirrors 3 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + +- name: check to make sure httpd is started + service: + name: httpd + state: started + +- name: check httpd status + service: + state: started + name: httpd + +- name: create OCP download landing directory + file: + path: /ocpinst/ + state: directory + +- name: Unzip OCP Client + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + +- name: Unzip OCP Installer + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + +- name: Copy kubectl file + ansible.builtin.copy: + src: /ocpinst/kubectl + dest: /usr/local/bin/kubectl + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Copy oc file + ansible.builtin.copy: + src: /ocpinst/oc + dest: /usr/local/bin/oc + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Copy openshift-install file + ansible.builtin.copy: + src: /ocpinst/openshift-install + dest: /usr/local/bin/openshift-install + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Copy install-config.yaml to ocp install directory + copy: + src: install-config.yaml + dest: /ocpinst/install-config.yaml + +- name: Create Manifests + command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ + become: yes + +- name: Set mastersSchedulable parameter to False + replace: + path: /ocpinst/manifests/cluster-scheduler-02-config.yml + regexp: ': true' + replace: ': false' + +- name: Create Ignition files + command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ + become: yes + +- name: create Ignition directory on webserver + file: + path: /var/www/html/ignition + state: directory + +- name: Copy bootstrap Ignition file to web server + copy: + src: /ocpinst/bootstrap.ign + dest: /var/www/html/ignition + remote_src: yes + +- name: Copy control plane Ignition file to web server + copy: + src: /ocpinst/master.ign + dest: /var/www/html/ignition + remote_src: yes + +- name: Copy worker Ignition file to web server + copy: + src: /ocpinst/worker.ign + dest: /var/www/html/ignition + remote_src: yes diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml new file mode 100644 index 00000000..ac0fa7f3 --- /dev/null +++ b/roles/create_bastion/tasks/main.yaml @@ -0,0 +1,7 @@ +--- + +- name: virtualize bastion server + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G + +- name: start bastion install + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml new file mode 100644 index 00000000..200fc052 --- /dev/null +++ b/roles/create_bootstrap/tasks/main.yaml @@ -0,0 +1,9 @@ +--- + +- name: virtualize bootstrap + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G + +- name: boot bootstrap + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml new file mode 100644 index 00000000..ec726a66 --- /dev/null +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -0,0 +1,21 @@ +--- + +- name: virtualize compute-0 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G + +- name: virtualize compute-1 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G + +- name: install CoreOS on compute-0 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + +- name: pause 8 minutes + pause: + minutes: 8 + +- name: install CoreOS on compute-1 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml new file mode 100644 index 00000000..190c3c05 --- /dev/null +++ b/roles/create_control_nodes/tasks/main.yaml @@ -0,0 +1,37 @@ +--- + +- name: virtualize control-0 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G + +- name: virtualize control-1 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G + +- name: virtualize control-2 node + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G + +- name: install CoreOS on control-0 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + +- name: pause 8 minutes + pause: + minutes: 8 + +- name: install CoreOS on control-1 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + +- name: pause 8 minutes + pause: + minutes: 8 + +- name: install CoreOS on control-2 node + command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + args: + chdir: /var/lib/libvirt/images + +- name: pause 8 minutes + pause: + minutes: 8 diff --git a/roles/get-ocp/files/install-config.yaml b/roles/get-ocp/files/install-config.yaml new file mode 100644 index 00000000..b761fc72 --- /dev/null +++ b/roles/get-ocp/files/install-config.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +baseDomain: ocpz.wsclab.endicott.ibm.com +compute: +- hyperthreading: Enabled + name: worker + replicas: 0 + architecture : s390x +controlPlane: + hyperthreading: Enabled + name: master + replicas: 3 + architecture : s390x +metadata: + name: distribution +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 + hostPrefix: 23 + networkType: OpenShiftSDN + serviceNetwork: + - 172.30.0.0/16 +platform: + none: {} +fips: false +pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDudjhbUkA7ekvuzZe7WpkkF2KOoLGwWU05Ch6Ty0OWDW0vdbtrG7TIgy8FYOvBnArnIJcy1AP6tpH6BcbmHRIosVFbZ3GrLkKEOroBD+8+6qGzN+fIXBzss20iRvMvDdlDaVJ1BqmTvNnkbeWdgCg1MbICQ1OYOExm+xgvGWHOjoJkUzkw3oBnAnpZHHTkvM2dY0SNtzV9i5FcSvdBM2kindvBpvDZ9PjTGkcFMnbVRpBXUV+axNg4yuX0YWFDWOFlQ4DjvasdN6jMQ2MrWBYtYF1RAklsOhSXTPKAYaxZZsfeEBnMVUFshaDYy7PlbCXcUEVEFEkMlTAbe//bBXx5l+2fVFtOUyhfws28bLg+rlJLivINoLW4EB+ERViYBTCEx5njl9mK9EcPpCJlLTR82kr8vEwE/sJ9ro3dH/aEUQcTkVYqlOCw2lsFZpkZtthArknZiHk91OtWgXj4GsAjihehfBvEL/GbEuO0HTzvl+QiTnC+mL4KcObl31K9LdlAbKA53l6G68py0u7plUervFN8H7rIFbv4ndUTAo2rODty/QoYH8jBu460LLgLyaRfaDFrGitnAdSiCS/8dEr8iAVxj5X5sgcTQlsJYyvMv61YCzItmlipDpbf+M/mJyJuxyuYWqudItOIWchFktstcpp3dx1iM/pr0o4HitpeQ== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml new file mode 100644 index 00000000..77c029be --- /dev/null +++ b/roles/get-ocp/tasks/main.yaml @@ -0,0 +1,87 @@ +--- + +- name: create OCP download landing directory + file: + path: /ocpinst/ + state: directory + +- name: Unzip OCP Client + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + +- name: Unzip OCP Installer + ansible.builtin.unarchive: + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + dest: /ocpinst/ + remote_src: yes + +- name: Copy kubectl file + ansible.builtin.copy: + src: /ocpinst/kubectl + dest: /usr/local/bin/kubectl + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Copy oc file + ansible.builtin.copy: + src: /ocpinst/oc + dest: /usr/local/bin/oc + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Copy openshift-install file + ansible.builtin.copy: + src: /ocpinst/openshift-install + dest: /usr/local/bin/openshift-install + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Copy install-config.yaml to ocp install directory + copy: + src: install-config.yaml + dest: /ocpinst/install-config.yaml + +- name: Create Manifests + command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ + become: yes + +- name: Set mastersSchedulable parameter to False + replace: + path: /ocpinst/manifests/cluster-scheduler-02-config.yml + regexp: ': true' + replace: ': false' + +- name: Create Ignition files + command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ + become: yes + +- name: create Ignition directory on webserver + file: + path: /var/www/html/ignition + state: directory + +- name: Copy bootstrap Ignition file to web server + copy: + src: /ocpinst/bootstrap.ign + dest: /var/www/html/ignition + remote_src: yes + +- name: Copy control plane Ignition file to web server + copy: + src: /ocpinst/master.ign + dest: /var/www/html/ignition + remote_src: yes + +- name: Copy worker Ignition file to web server + copy: + src: /ocpinst/worker.ign + dest: /var/www/html/ignition + remote_src: yes diff --git a/roles/haproxy/files/haproxy.cfg b/roles/haproxy/files/haproxy.cfg new file mode 100644 index 00000000..f7b1f7f0 --- /dev/null +++ b/roles/haproxy/files/haproxy.cfg @@ -0,0 +1,58 @@ +global + log 127.0.0.1 local2 + pidfile /var/run/haproxy.pid + maxconn 4000 + daemon +defaults + mode http + log global + option dontlognull + option http-server-close + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 +frontend stats + bind *:1936 + mode http + log global + maxconn 10 + stats enable + stats hide-version + stats refresh 30s + stats show-node + stats show-desc Stats for distribution cluster + stats auth admin:distribution + stats uri /stats +listen api-server-6443 + bind *:6443 + mode tcp + server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s backup + server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s + server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s + server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s +listen machine-config-server-22623 + bind *:22623 + mode tcp + server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup + server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s + server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s +listen ingress-router-443 + bind *:443 + mode tcp + balance source + server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s +listen ingress-router-80 + bind *:80 + mode tcp + balance source + server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s + server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml new file mode 100644 index 00000000..589622f5 --- /dev/null +++ b/roles/haproxy/tasks/main.yaml @@ -0,0 +1,22 @@ +--- + +#- name: install haproxy +# dnf: +# - haproxy + +- name: move haproxy config file to bastion + copy: + src: haproxy.cfg + dest: /etc/haproxy/haproxy.cfg + force: yes + backup: yes + +- name: enable haproxy + systemd: + state: enabled + named: haproxy + +- name: Start haproxy + systemd: + state: restarted + name: haproxy diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml new file mode 100644 index 00000000..20948d25 --- /dev/null +++ b/roles/httpd/tasks/main.yaml @@ -0,0 +1,92 @@ +--- + +#- name: install httpd +# dnf: +# name: httpd +# state: latest + +- name: Ensure the default Apache port is 8080 + replace: + path: /etc/httpd/conf/httpd.conf + regexp: '^Listen 80' + replace: 'Listen 8080' + backup: yes + +- name: Ensure the SSL default port is 4443 + replace: + path: /etc/httpd/conf.d/ssl.conf + regexp: '^Listen 443 https' + replace: 'Listen 4443 https' + backup: yes + +- name: restart httpd to reflect changes to port + service: + name: httpd + state: restarted + +#- name: Allow all access to tcp port 8080 +# community.general.ufw: +# rule: allow +# port: '8080' +# proto: tcp +# +#- name: Allow all access to tcp port 80 +# community.general.ufw: +# rule: allow +# port: '80' +# proto: tcp +# +#- name: Allow all access to tcp port 443 +# community.general.ufw: +# rule: allow +# port: '443' +# proto: tcp +# +#- name: Allow all access to tcp port 4443 +# community.general.ufw: +# rule: allow +# port: '4443' +# proto: tcp + +- name: create directory bin for mirrors + file: + path: /var/www/html/bin + state: directory + mode: '0755' + +- name: create directory bootstrap for mirrors + file: + path: /var/www/html/bootstrap + state: directory + mode: '0755' + +- name: get mirrors 1 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + +- name: get mirrors 2 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + +- name: get mirrors 3 + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + dest: /var/www/html/bin + remote_src: yes + mode: '0755' + +- name: check to make sure httpd is started + service: + name: httpd + state: started + +- name: check httpd status + service: + state: started + name: httpd diff --git a/roles/kvm_host/tasks/main.yaml b/roles/kvm_host/tasks/main.yaml new file mode 100644 index 00000000..5ba167c8 --- /dev/null +++ b/roles/kvm_host/tasks/main.yaml @@ -0,0 +1,17 @@ +- name: Set up macvtap bridge + community.libvirt.virt_net: + command: define + name: macvtap-net + autostart: true + xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" + +- name: Start macvtap-net + community.libvirt.virt_net: + autostart: yes + command: start + name: macvtap-net + +- name: Set autostart for macvtap-net + community.libvirt.virt_net: + autostart: yes + name: macvtap-net diff --git a/roles/macvtap/files/macvtap.xml.j2 b/roles/macvtap/files/macvtap.xml.j2 new file mode 100644 index 00000000..388477ea --- /dev/null +++ b/roles/macvtap/files/macvtap.xml.j2 @@ -0,0 +1,6 @@ + + macvtap-net + + + + diff --git a/roles/macvtap/tasks/main.yaml b/roles/macvtap/tasks/main.yaml new file mode 100644 index 00000000..af7daa8b --- /dev/null +++ b/roles/macvtap/tasks/main.yaml @@ -0,0 +1,19 @@ +--- + +- name: Set up macvtap bridge + community.libvirt.virt_net: + command: define + name: macvtap-net + autostart: true + xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" + +- name: Start macvtap-net + community.libvirt.virt_net: + autostart: yes + command: start + name: macvtap-net + +- name: Set autostart for macvtap-net + community.libvirt.virt_net: + autostart: yes + name: macvtap-net diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml new file mode 100644 index 00000000..0c2afbde --- /dev/null +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -0,0 +1,10 @@ +--- + +- name: get rhcos qcow2 files + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz + dest: /var/lib/libvirt/images/ + + +- name: Unzip OCP dependencies + command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz diff --git a/roles/workstations b/roles/workstations new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/roles/workstations @@ -0,0 +1 @@ +--- From b6c9cade3e34fec59dbbd490d6cc78a06b353b5e Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 9 Aug 2021 14:26:48 -0500 Subject: [PATCH 237/885] added workstations to inventory file. Added some roles to the main playbook. Have not created those roles' files yet. --- inventory | 3 +++ main.yaml | 41 ++++++++++++++++++++++++++++++++--------- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/inventory b/inventory index 521b9c44..8434357f 100644 --- a/inventory +++ b/inventory @@ -1,3 +1,6 @@ +[workstation] +{{ workstation_ip }} + [kvm_host] 9.60.87.132 diff --git a/main.yaml b/main.yaml index c3db35ea..0902061a 100644 --- a/main.yaml +++ b/main.yaml @@ -1,22 +1,45 @@ --- +- hosts: workstation + become: true + roles: + - install_packages + - update_repo_index + - hosts: kvm_host become: true roles: - - macvtap - - create_bastion + - macvtap + - create_bastion - hosts: bastion_server become: true roles: - - haproxy - - httpd - - get-ocp + - haproxy + - httpd + - get-ocp + +- hosts: kvm_host + become: true + roles: + - prep_kvm_guests + - create_bootstrap - hosts: kvm_host become: true roles: - - prep_kvm_guests - - create_bootstrap - - create_control_nodes - - create_compute_nodes + - create_control_nodes + - create_compute_nodes + +- hosts: bootstrap_server + become: true + roles: + - verify_bootstrap + +- hosts: bastion_server + become: true + roles: + - remove_bootstrap + - approve_certs + - verify_installation + From 1723bbb8a1ec69f8a70501b864b1f025613f253d Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 9 Aug 2021 14:26:48 -0500 Subject: [PATCH 238/885] added workstations to inventory file. Added some roles to the main playbook. Have not created those roles' files yet. --- inventory | 3 +++ main.yaml | 41 ++++++++++++++++++++++++++++++++--------- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/inventory b/inventory index 521b9c44..8434357f 100644 --- a/inventory +++ b/inventory @@ -1,3 +1,6 @@ +[workstation] +{{ workstation_ip }} + [kvm_host] 9.60.87.132 diff --git a/main.yaml b/main.yaml index c3db35ea..0902061a 100644 --- a/main.yaml +++ b/main.yaml @@ -1,22 +1,45 @@ --- +- hosts: workstation + become: true + roles: + - install_packages + - update_repo_index + - hosts: kvm_host become: true roles: - - macvtap - - create_bastion + - macvtap + - create_bastion - hosts: bastion_server become: true roles: - - haproxy - - httpd - - get-ocp + - haproxy + - httpd + - get-ocp + +- hosts: kvm_host + become: true + roles: + - prep_kvm_guests + - create_bootstrap - hosts: kvm_host become: true roles: - - prep_kvm_guests - - create_bootstrap - - create_control_nodes - - create_compute_nodes + - create_control_nodes + - create_compute_nodes + +- hosts: bootstrap_server + become: true + roles: + - verify_bootstrap + +- hosts: bastion_server + become: true + roles: + - remove_bootstrap + - approve_certs + - verify_installation + From c13ae4a3c72b528348dae6b73a5cf727722bd995 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 9 Aug 2021 15:17:00 -0500 Subject: [PATCH 239/885] Small modifications to playbooks for the implementation of roles --- main.yaml | 31 ++++++++++++++------------- roles/prep_kvm_guests/tasks/main.yaml | 3 +-- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/main.yaml b/main.yaml index 0902061a..ae59b7cb 100644 --- a/main.yaml +++ b/main.yaml @@ -1,14 +1,15 @@ --- -- hosts: workstation - become: true - roles: - - install_packages - - update_repo_index +#- hosts: workstation +# become: true +# roles: +# - install_ansible_galaxy +# - update_repo_index - hosts: kvm_host become: true roles: + #- install_packages - macvtap - create_bastion @@ -31,15 +32,15 @@ - create_control_nodes - create_compute_nodes -- hosts: bootstrap_server - become: true - roles: - - verify_bootstrap +#- hosts: bootstrap_server +# become: true +# roles: +# - verify_bootstrap -- hosts: bastion_server - become: true - roles: - - remove_bootstrap - - approve_certs - - verify_installation +#- hosts: bastion_server +# become: true +# roles: +# - remove_bootstrap +# - approve_certs +# - verify_installation diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 0c2afbde..7673dce9 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -5,6 +5,5 @@ url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images/ - -- name: Unzip OCP dependencies +- name: Unzip rhcos qcow2 files command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz From 3fe22655d7c147bf137f69188da65e1d9e332840 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 9 Aug 2021 15:17:00 -0500 Subject: [PATCH 240/885] Small modifications to playbooks for the implementation of roles --- main.yaml | 31 ++++++++++++++------------- roles/prep_kvm_guests/tasks/main.yaml | 3 +-- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/main.yaml b/main.yaml index 0902061a..ae59b7cb 100644 --- a/main.yaml +++ b/main.yaml @@ -1,14 +1,15 @@ --- -- hosts: workstation - become: true - roles: - - install_packages - - update_repo_index +#- hosts: workstation +# become: true +# roles: +# - install_ansible_galaxy +# - update_repo_index - hosts: kvm_host become: true roles: + #- install_packages - macvtap - create_bastion @@ -31,15 +32,15 @@ - create_control_nodes - create_compute_nodes -- hosts: bootstrap_server - become: true - roles: - - verify_bootstrap +#- hosts: bootstrap_server +# become: true +# roles: +# - verify_bootstrap -- hosts: bastion_server - become: true - roles: - - remove_bootstrap - - approve_certs - - verify_installation +#- hosts: bastion_server +# become: true +# roles: +# - remove_bootstrap +# - approve_certs +# - verify_installation diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 0c2afbde..7673dce9 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -5,6 +5,5 @@ url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images/ - -- name: Unzip OCP dependencies +- name: Unzip rhcos qcow2 files command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz From fffb3f586ab5805793f18878e9626b711f6a727e Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 10 Aug 2021 09:57:43 -0500 Subject: [PATCH 241/885] Added and commented out a task in the create_bastion role taskbook to download the RHEL ISO image for the bastion. Unable to use it right now because we don't have a RHEL software license yet. --- roles/create_bastion/tasks/main.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index ac0fa7f3..731bb572 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -1,5 +1,12 @@ --- +#Uncomment once we have a RHEL license +#- name: download RHEL ISO image to KVM +# get_url: +# url: {{ RHEL ISO URL }} +# dest: /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 +# mode: '0775' + - name: virtualize bastion server command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G From c7116b7f35c5b453f02a2fc3a28d1eb38b81a37a Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 10 Aug 2021 09:57:43 -0500 Subject: [PATCH 242/885] Added and commented out a task in the create_bastion role taskbook to download the RHEL ISO image for the bastion. Unable to use it right now because we don't have a RHEL software license yet. --- roles/create_bastion/tasks/main.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index ac0fa7f3..731bb572 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -1,5 +1,12 @@ --- +#Uncomment once we have a RHEL license +#- name: download RHEL ISO image to KVM +# get_url: +# url: {{ RHEL ISO URL }} +# dest: /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 +# mode: '0775' + - name: virtualize bastion server command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G From 6226c42cb5b6676b68d022c0cf2f56332747ba67 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 10 Aug 2021 12:58:37 -0500 Subject: [PATCH 243/885] Added taskbook for removing the bootstrap. Still commented out in the main playbook so it is inactive, but should be ready to use when we need it. Also commented out the workstation host in inventory because it's not ready yet. --- inventory | 4 ++-- roles/remove_bootstrap/tasks/main.yaml | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 roles/remove_bootstrap/tasks/main.yaml diff --git a/inventory b/inventory index 8434357f..cd608040 100644 --- a/inventory +++ b/inventory @@ -1,5 +1,5 @@ -[workstation] -{{ workstation_ip }} +#[workstation] +#{{ workstation_ip }} [kvm_host] 9.60.87.132 diff --git a/roles/remove_bootstrap/tasks/main.yaml b/roles/remove_bootstrap/tasks/main.yaml new file mode 100644 index 00000000..cb17edef --- /dev/null +++ b/roles/remove_bootstrap/tasks/main.yaml @@ -0,0 +1,15 @@ +--- + +- name: shutdown bootstrap + community.libvirt.virt: + name: bootstrap_server + state: shutdown + +- name: wait for shutdown + pause: + minutes: 1 + +- name: destroy bootstrap + community.libvirt.virt: + name: bootstrap_server + state: destroyed \ No newline at end of file From fdf252f845714b810ceb334e148ac57f638f9020 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 10 Aug 2021 12:58:37 -0500 Subject: [PATCH 244/885] Added taskbook for removing the bootstrap. Still commented out in the main playbook so it is inactive, but should be ready to use when we need it. Also commented out the workstation host in inventory because it's not ready yet. --- inventory | 4 ++-- roles/remove_bootstrap/tasks/main.yaml | 15 +++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 roles/remove_bootstrap/tasks/main.yaml diff --git a/inventory b/inventory index 8434357f..cd608040 100644 --- a/inventory +++ b/inventory @@ -1,5 +1,5 @@ -[workstation] -{{ workstation_ip }} +#[workstation] +#{{ workstation_ip }} [kvm_host] 9.60.87.132 diff --git a/roles/remove_bootstrap/tasks/main.yaml b/roles/remove_bootstrap/tasks/main.yaml new file mode 100644 index 00000000..cb17edef --- /dev/null +++ b/roles/remove_bootstrap/tasks/main.yaml @@ -0,0 +1,15 @@ +--- + +- name: shutdown bootstrap + community.libvirt.virt: + name: bootstrap_server + state: shutdown + +- name: wait for shutdown + pause: + minutes: 1 + +- name: destroy bootstrap + community.libvirt.virt: + name: bootstrap_server + state: destroyed \ No newline at end of file From 0e298544b7a9faec8a78b01286029cebac963a8a Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 10 Aug 2021 16:01:25 -0500 Subject: [PATCH 245/885] Testing create_bastion --- roles/create_bastion/tasks/main.yaml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 731bb572..edbdb673 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- hosts: kvm_host + become: true + tasks: + #Uncomment once we have a RHEL license #- name: download RHEL ISO image to KVM # get_url: @@ -7,8 +11,8 @@ # dest: /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 # mode: '0775' -- name: virtualize bastion server - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G + - name: virtualize bastion server + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G -- name: start bastion install - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + - name: start bastion install + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From dbb507f2bd104caff89925f92d1ef3963445ca86 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 10 Aug 2021 16:01:25 -0500 Subject: [PATCH 246/885] Testing create_bastion --- roles/create_bastion/tasks/main.yaml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 731bb572..edbdb673 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- hosts: kvm_host + become: true + tasks: + #Uncomment once we have a RHEL license #- name: download RHEL ISO image to KVM # get_url: @@ -7,8 +11,8 @@ # dest: /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 # mode: '0775' -- name: virtualize bastion server - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G + - name: virtualize bastion server + command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G -- name: start bastion install - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + - name: start bastion install + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole From f94e755cab5d0a8e630312203d37e3e3f741a6b4 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 11 Aug 2021 18:02:46 -0500 Subject: [PATCH 247/885] Implemented wait after create_bastion. --- roles/create_bastion/tasks/main.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index edbdb673..861bba71 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -1,9 +1,5 @@ --- -- hosts: kvm_host - become: true - tasks: - #Uncomment once we have a RHEL license #- name: download RHEL ISO image to KVM # get_url: @@ -15,4 +11,8 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: start bastion install - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel82.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel82.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + + - name: wait for bastion to install + wait: + minutes: 8 \ No newline at end of file From fa99dd12f79a27f560a7b63ae0ec91bac12bb41c Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 11 Aug 2021 18:02:46 -0500 Subject: [PATCH 248/885] Implemented wait after create_bastion. --- roles/create_bastion/tasks/main.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index edbdb673..861bba71 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -1,9 +1,5 @@ --- -- hosts: kvm_host - become: true - tasks: - #Uncomment once we have a RHEL license #- name: download RHEL ISO image to KVM # get_url: @@ -15,4 +11,8 @@ command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: start bastion install - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel82.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel82.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + + - name: wait for bastion to install + wait: + minutes: 8 \ No newline at end of file From f7e46ab66d10360263d0aef6432322701fd535b8 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 12 Aug 2021 14:44:39 -0500 Subject: [PATCH 249/885] Updated many playbooks with small tweaks as they came up when running main playbook with roles. --- main.yaml | 14 +++++++++++-- roles/create_bastion/tasks/main.yaml | 20 +++++++++++++++---- roles/haproxy/tasks/main.yaml | 10 +++------- roles/httpd/tasks/main.yaml | 20 ++++++------------- roles/install_packages/tasks/main.yaml | 6 ++++++ .../{files => templates}/macvtap.xml.j2 | 0 roles/set_selinux_permissive/tasks/main.yaml | 6 ++++++ 7 files changed, 49 insertions(+), 27 deletions(-) create mode 100644 roles/install_packages/tasks/main.yaml rename roles/macvtap/{files => templates}/macvtap.xml.j2 (100%) create mode 100644 roles/set_selinux_permissive/tasks/main.yaml diff --git a/main.yaml b/main.yaml index ae59b7cb..68a2e4c0 100644 --- a/main.yaml +++ b/main.yaml @@ -3,19 +3,29 @@ #- hosts: workstation # become: true # roles: +# vars: "reminder to add vars for remote host to copy ssh key to, kvm_host in this case" # - install_ansible_galaxy # - update_repo_index +# - ssh_key_gen +# - copy_ssh_key - hosts: kvm_host become: true roles: #- install_packages - - macvtap - - create_bastion + #- macvtap + #- create_bastion + +#- hosts: workstation +# become: true +# roles: +# vars: "reminder to add vars for remote host to copy ssh key to, bastion in this case" +# - copy_ssh_key - hosts: bastion_server become: true roles: + #- set_selinux_permissive - haproxy - httpd - get-ocp diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 861bba71..ea9521ea 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -4,15 +4,27 @@ #- name: download RHEL ISO image to KVM # get_url: # url: {{ RHEL ISO URL }} -# dest: /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 +# dest: /var/lib/libvirt/images/rhel83.iso # mode: '0775' +#- name: Unzip RHEL iso +# ansible.builtin.unarchive: +# src: https://mirror.redhat.com/rhel/latest/latest/RHEL-8.3.0-20201009.2-s390x-dvd1.iso +# dest: /var/lib/libvirt/images/rhel83.iso +# remote_src: yes + + #- name: create install mount directory + # command: mkdir /rhcos-install + + #- name: mount rhcos install directory + # command: mount -o loop /var/lib/libvirt/images/rhel83.iso /rhcos-install/ + - name: virtualize bastion server command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: start bastion install - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel82.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel82.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - name: wait for bastion to install - wait: - minutes: 8 \ No newline at end of file + pause: + minutes: 15 \ No newline at end of file diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index 589622f5..a3099c51 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -1,9 +1,5 @@ --- -#- name: install haproxy -# dnf: -# - haproxy - - name: move haproxy config file to bastion copy: src: haproxy.cfg @@ -13,10 +9,10 @@ - name: enable haproxy systemd: - state: enabled - named: haproxy + enabled: yes + name: haproxy -- name: Start haproxy +- name: Restart haproxy systemd: state: restarted name: haproxy diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 20948d25..38bafe5f 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -1,10 +1,10 @@ --- -#- name: install httpd -# dnf: -# name: httpd -# state: latest - +- name: enable httpd + systemd: + name: httpd + enabled: yes + - name: Ensure the default Apache port is 8080 replace: path: /etc/httpd/conf/httpd.conf @@ -64,29 +64,21 @@ get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin - remote_src: yes mode: '0755' - name: get mirrors 2 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin - remote_src: yes mode: '0755' - name: get mirrors 3 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin - remote_src: yes mode: '0755' - name: check to make sure httpd is started service: name: httpd - state: started - -- name: check httpd status - service: - state: started - name: httpd + state: restarted diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml new file mode 100644 index 00000000..13a6dfc2 --- /dev/null +++ b/roles/install_packages/tasks/main.yaml @@ -0,0 +1,6 @@ +#Placeholder, incomplete +--- + +- name: {{ host_name }} + dnf: + name: [{{ package1 }}, {{ package2 }}, diff --git a/roles/macvtap/files/macvtap.xml.j2 b/roles/macvtap/templates/macvtap.xml.j2 similarity index 100% rename from roles/macvtap/files/macvtap.xml.j2 rename to roles/macvtap/templates/macvtap.xml.j2 diff --git a/roles/set_selinux_permissive/tasks/main.yaml b/roles/set_selinux_permissive/tasks/main.yaml new file mode 100644 index 00000000..a30e361c --- /dev/null +++ b/roles/set_selinux_permissive/tasks/main.yaml @@ -0,0 +1,6 @@ +--- + +- name: Put SELinux in permissive mode, logging actions that would be blocked. + ansible.posix.selinux: + policy: targeted + state: permissive From 4b151d287f95f0dcb60b75b8cfe7e4c09d81ec5b Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 12 Aug 2021 14:44:39 -0500 Subject: [PATCH 250/885] Updated many playbooks with small tweaks as they came up when running main playbook with roles. --- main.yaml | 14 +++++++++++-- roles/create_bastion/tasks/main.yaml | 20 +++++++++++++++---- roles/haproxy/tasks/main.yaml | 10 +++------- roles/httpd/tasks/main.yaml | 20 ++++++------------- roles/install_packages/tasks/main.yaml | 6 ++++++ .../{files => templates}/macvtap.xml.j2 | 0 roles/set_selinux_permissive/tasks/main.yaml | 6 ++++++ 7 files changed, 49 insertions(+), 27 deletions(-) create mode 100644 roles/install_packages/tasks/main.yaml rename roles/macvtap/{files => templates}/macvtap.xml.j2 (100%) create mode 100644 roles/set_selinux_permissive/tasks/main.yaml diff --git a/main.yaml b/main.yaml index ae59b7cb..68a2e4c0 100644 --- a/main.yaml +++ b/main.yaml @@ -3,19 +3,29 @@ #- hosts: workstation # become: true # roles: +# vars: "reminder to add vars for remote host to copy ssh key to, kvm_host in this case" # - install_ansible_galaxy # - update_repo_index +# - ssh_key_gen +# - copy_ssh_key - hosts: kvm_host become: true roles: #- install_packages - - macvtap - - create_bastion + #- macvtap + #- create_bastion + +#- hosts: workstation +# become: true +# roles: +# vars: "reminder to add vars for remote host to copy ssh key to, bastion in this case" +# - copy_ssh_key - hosts: bastion_server become: true roles: + #- set_selinux_permissive - haproxy - httpd - get-ocp diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 861bba71..ea9521ea 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -4,15 +4,27 @@ #- name: download RHEL ISO image to KVM # get_url: # url: {{ RHEL ISO URL }} -# dest: /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 +# dest: /var/lib/libvirt/images/rhel83.iso # mode: '0775' +#- name: Unzip RHEL iso +# ansible.builtin.unarchive: +# src: https://mirror.redhat.com/rhel/latest/latest/RHEL-8.3.0-20201009.2-s390x-dvd1.iso +# dest: /var/lib/libvirt/images/rhel83.iso +# remote_src: yes + + #- name: create install mount directory + # command: mkdir /rhcos-install + + #- name: mount rhcos install directory + # command: mount -o loop /var/lib/libvirt/images/rhel83.iso /rhcos-install/ + - name: virtualize bastion server command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: start bastion install - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel82.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel82.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - name: wait for bastion to install - wait: - minutes: 8 \ No newline at end of file + pause: + minutes: 15 \ No newline at end of file diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index 589622f5..a3099c51 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -1,9 +1,5 @@ --- -#- name: install haproxy -# dnf: -# - haproxy - - name: move haproxy config file to bastion copy: src: haproxy.cfg @@ -13,10 +9,10 @@ - name: enable haproxy systemd: - state: enabled - named: haproxy + enabled: yes + name: haproxy -- name: Start haproxy +- name: Restart haproxy systemd: state: restarted name: haproxy diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 20948d25..38bafe5f 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -1,10 +1,10 @@ --- -#- name: install httpd -# dnf: -# name: httpd -# state: latest - +- name: enable httpd + systemd: + name: httpd + enabled: yes + - name: Ensure the default Apache port is 8080 replace: path: /etc/httpd/conf/httpd.conf @@ -64,29 +64,21 @@ get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin - remote_src: yes mode: '0755' - name: get mirrors 2 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin - remote_src: yes mode: '0755' - name: get mirrors 3 get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin - remote_src: yes mode: '0755' - name: check to make sure httpd is started service: name: httpd - state: started - -- name: check httpd status - service: - state: started - name: httpd + state: restarted diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml new file mode 100644 index 00000000..13a6dfc2 --- /dev/null +++ b/roles/install_packages/tasks/main.yaml @@ -0,0 +1,6 @@ +#Placeholder, incomplete +--- + +- name: {{ host_name }} + dnf: + name: [{{ package1 }}, {{ package2 }}, diff --git a/roles/macvtap/files/macvtap.xml.j2 b/roles/macvtap/templates/macvtap.xml.j2 similarity index 100% rename from roles/macvtap/files/macvtap.xml.j2 rename to roles/macvtap/templates/macvtap.xml.j2 diff --git a/roles/set_selinux_permissive/tasks/main.yaml b/roles/set_selinux_permissive/tasks/main.yaml new file mode 100644 index 00000000..a30e361c --- /dev/null +++ b/roles/set_selinux_permissive/tasks/main.yaml @@ -0,0 +1,6 @@ +--- + +- name: Put SELinux in permissive mode, logging actions that would be blocked. + ansible.posix.selinux: + policy: targeted + state: permissive From 8d34bbf3710bcc8f1d5bd000b193960b6b29496d Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 12 Aug 2021 15:51:46 -0500 Subject: [PATCH 251/885] Made small tweaks to playbooks that came up in the implementation of roles --- main.yaml | 13 ++-- .../tasks/bootstrap_verify.yaml | 20 ++--- roles/create_bastion/tasks/main.yaml | 2 +- roles/dns/files/distribution.db | 48 ++++++++++++ roles/dns/files/distribution.rev | 24 ++++++ roles/dns/files/named.conf | 78 +++++++++++++++++++ roles/dns/tasks/main.yaml | 43 ++++++++++ roles/prep_kvm_guests/tasks/main.yaml | 15 ++++ 8 files changed, 223 insertions(+), 20 deletions(-) create mode 100644 roles/dns/files/distribution.db create mode 100644 roles/dns/files/distribution.rev create mode 100644 roles/dns/files/named.conf create mode 100644 roles/dns/tasks/main.yaml diff --git a/main.yaml b/main.yaml index 68a2e4c0..c8dd1bb9 100644 --- a/main.yaml +++ b/main.yaml @@ -4,7 +4,7 @@ # become: true # roles: # vars: "reminder to add vars for remote host to copy ssh key to, kvm_host in this case" -# - install_ansible_galaxy +# - install_packages (ansible_galaxy) # - update_repo_index # - ssh_key_gen # - copy_ssh_key @@ -12,9 +12,10 @@ - hosts: kvm_host become: true roles: - #- install_packages + #- install_packages (libvirt, libvirt-devel, libvirt-daemon-kvm, qemu-kvm, virt-manager, libvirt-daemon-config-network, libvirt-client, qemu-img) + #- enable libvirt #- macvtap - #- create_bastion + - create_bastion #- hosts: workstation # become: true @@ -25,7 +26,9 @@ - hosts: bastion_server become: true roles: + #- install_packages (haproxy, httpd, mod_ssl, bind, bind_utils) don't forget to enable, reminder that bind is called named when being called after install #- set_selinux_permissive + - dns - haproxy - httpd - get-ocp @@ -35,10 +38,6 @@ roles: - prep_kvm_guests - create_bootstrap - -- hosts: kvm_host - become: true - roles: - create_control_nodes - create_compute_nodes diff --git a/roles/bootstrap_server/tasks/bootstrap_verify.yaml b/roles/bootstrap_server/tasks/bootstrap_verify.yaml index 4adafd7f..993c10b3 100644 --- a/roles/bootstrap_server/tasks/bootstrap_verify.yaml +++ b/roles/bootstrap_server/tasks/bootstrap_verify.yaml @@ -1,15 +1,11 @@ --- - -- hosts: bootstrap_server - become: true - tasks: - - name: connect bootstrap - command: virsh console bootstrap +- name: connect bootstrap + command: virsh console bootstrap - - name: Verify bootstrap install process until complete - command: journalctl -u bootkube.service - register: result - until: result.stdout.find("bootkube.service complete") != -1 - retries: 100 - delay: 300 +- name: Verify bootstrap install process until complete + command: journalctl -u bootkube.service + register: result + until: result.stdout.find("bootkube.service complete") != -1 + retries: 100 + delay: 300 diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index ea9521ea..160b5a5d 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -27,4 +27,4 @@ - name: wait for bastion to install pause: - minutes: 15 \ No newline at end of file + minutes: 60 \ No newline at end of file diff --git a/roles/dns/files/distribution.db b/roles/dns/files/distribution.db new file mode 100644 index 00000000..35be68ba --- /dev/null +++ b/roles/dns/files/distribution.db @@ -0,0 +1,48 @@ +$TTL 86400 +@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( + 2020021821 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) + +;Name Server Information +@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. + +;IP Address for Name Server +bastion IN A 9.60.87.139 + +;entry for bootstrap host. +;bootstrap IN A 9.60.87.133 +bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 + +;entries for the master nodes +control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 +;control-0 IN A 9.60.87.138 +control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 +;control-1 IN A 9.60.87.137 +control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 +;control-2 IN A 9.60.87.136 + +;entry for the bastion host +bastion IN A 9.60.87.139 + +;entries for the worker nodes +compute-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.135 +;compute-0 IN A 9.60.87.135 +compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 +;compute-1 IN A 9.60.87.134 + +;entry of your load balancer +haproxy IN A 9.60.87.139 + +;The api identifies the IP of your load balancer. +api IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. +api-int IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. + +;The wildcard also identifies the load balancer. +apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. +*.apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. + +;EOF diff --git a/roles/dns/files/distribution.rev b/roles/dns/files/distribution.rev new file mode 100644 index 00000000..51ff3eb9 --- /dev/null +++ b/roles/dns/files/distribution.rev @@ -0,0 +1,24 @@ +$TTL 86400 +@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com ( + 2020011800 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) +;Name Server Information +@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. +bastion IN A 9.60.87.139 + +;Reverse lookup for Name Server +139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. + +;PTR Record IP address to Hostname +138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. +137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. +136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. +135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. +134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. +133 IN PTR bootstrap.distribution.ocpz.wsclab.endicott.ibm.com. +139 IN PTR api-int.distribution.ocpz.wsclab.endicott.ibm.com. +139 IN PTR api.distribution.ocpz.wsclab.endicott.ibm.com. diff --git a/roles/dns/files/named.conf b/roles/dns/files/named.conf new file mode 100644 index 00000000..b07a27be --- /dev/null +++ b/roles/dns/files/named.conf @@ -0,0 +1,78 @@ +// +// named.conf +// +// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS +// server as a caching only nameserver (as a localhost DNS resolver only). +// +// See /usr/share/doc/bind*/sample/ for example named configuration files. +// + +options { +// listen-on port 53 { 127.0.0.1; }; + listen-on port 53 { any; }; + listen-on-v6 port 53 { ::1; }; + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + statistics-file "/var/named/data/named_stats.txt"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + secroots-file "/var/named/data/named.secroots"; + recursing-file "/var/named/data/named.recursing"; + allow-query { any; }; + + /* + - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. + - If you are building a RECURSIVE (caching) DNS server, you need to enable + recursion. + - If your recursive DNS server has a public IP address, you MUST enable access + control to limit queries to your legitimate users. Failing to do so will + cause your server to become part of large scale DNS amplification + attacks. Implementing BCP38 within your network would greatly + reduce such attack surface + */ + recursion yes; + + dnssec-enable no; + dnssec-validation no; + + managed-keys-directory "/var/named/dynamic"; + + pid-file "/run/named/named.pid"; + session-keyfile "/run/named/session.key"; + + /* https://fedoraproject.org/wiki/Changes/CryptoPolicy */ + include "/etc/crypto-policies/back-ends/bind.config"; +}; + +logging { + channel default_debug { + file "data/named.run"; + severity dynamic; + }; +}; + +zone "." IN { + type forward; + forwarders { 9.60.70.82; }; +// type hint; +// file "named.ca"; +}; + +include "/etc/named.rfc1912.zones"; +include "/etc/named.root.key"; + +//forward zone +zone "distribution.ocpz.wsclab.endicott.ibm.com" IN { + type master; + file "distribution.db"; + allow-update { any; }; + allow-query { any; }; +}; + +//backward zone +zone "87.60.9.in-addr.arpa" IN { + type master; + file "distribution.rev"; + allow-update { any; }; + allow-query { any; }; +}; + diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml new file mode 100644 index 00000000..88adf33c --- /dev/null +++ b/roles/dns/tasks/main.yaml @@ -0,0 +1,43 @@ +--- + +- name: enable named + ansible.builtin.systemd: + name: named + enabled: yes + +- name: start named + ansible.builtin.systemd: + name: named + state: started + +- name: Copy named.conf file to bastion + ansible.builtin.copy: + src: named.conf + dest: /etc/ + owner: root + group: root + mode: '0755' + backup: yes + +- name: Copy distribution.db file to bastion + ansible.builtin.copy: + src: distribution.db + dest: /var/named + owner: named + group: named + mode: '0755' + backup: yes + +- name: Copy distribution.rev file to bastion + ansible.builtin.copy: + src: distribution.rev + dest: /var/named + owner: named + group: named + mode: '0755' + backup: yes + +- name: restart named to update changes made to DNS + ansible.builtin.systemd: + name: named + state: restarted \ No newline at end of file diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 7673dce9..359dbcf7 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -7,3 +7,18 @@ - name: Unzip rhcos qcow2 files command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz + +- name: get rhcos initramfs image + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + dest: /var/lib/libvirt/images/ + +- name: get rhcos kernel + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + dest: /var/lib/libvirt/images/ + +- name: get rhcos rootfs image + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + dest: /var/lib/libvirt/images/ \ No newline at end of file From 46ded78607ce333ec2718684ca59e72807a19719 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 12 Aug 2021 15:51:46 -0500 Subject: [PATCH 252/885] Made small tweaks to playbooks that came up in the implementation of roles --- main.yaml | 13 ++-- .../tasks/bootstrap_verify.yaml | 20 ++--- roles/create_bastion/tasks/main.yaml | 2 +- roles/dns/files/distribution.db | 48 ++++++++++++ roles/dns/files/distribution.rev | 24 ++++++ roles/dns/files/named.conf | 78 +++++++++++++++++++ roles/dns/tasks/main.yaml | 43 ++++++++++ roles/prep_kvm_guests/tasks/main.yaml | 15 ++++ 8 files changed, 223 insertions(+), 20 deletions(-) create mode 100644 roles/dns/files/distribution.db create mode 100644 roles/dns/files/distribution.rev create mode 100644 roles/dns/files/named.conf create mode 100644 roles/dns/tasks/main.yaml diff --git a/main.yaml b/main.yaml index 68a2e4c0..c8dd1bb9 100644 --- a/main.yaml +++ b/main.yaml @@ -4,7 +4,7 @@ # become: true # roles: # vars: "reminder to add vars for remote host to copy ssh key to, kvm_host in this case" -# - install_ansible_galaxy +# - install_packages (ansible_galaxy) # - update_repo_index # - ssh_key_gen # - copy_ssh_key @@ -12,9 +12,10 @@ - hosts: kvm_host become: true roles: - #- install_packages + #- install_packages (libvirt, libvirt-devel, libvirt-daemon-kvm, qemu-kvm, virt-manager, libvirt-daemon-config-network, libvirt-client, qemu-img) + #- enable libvirt #- macvtap - #- create_bastion + - create_bastion #- hosts: workstation # become: true @@ -25,7 +26,9 @@ - hosts: bastion_server become: true roles: + #- install_packages (haproxy, httpd, mod_ssl, bind, bind_utils) don't forget to enable, reminder that bind is called named when being called after install #- set_selinux_permissive + - dns - haproxy - httpd - get-ocp @@ -35,10 +38,6 @@ roles: - prep_kvm_guests - create_bootstrap - -- hosts: kvm_host - become: true - roles: - create_control_nodes - create_compute_nodes diff --git a/roles/bootstrap_server/tasks/bootstrap_verify.yaml b/roles/bootstrap_server/tasks/bootstrap_verify.yaml index 4adafd7f..993c10b3 100644 --- a/roles/bootstrap_server/tasks/bootstrap_verify.yaml +++ b/roles/bootstrap_server/tasks/bootstrap_verify.yaml @@ -1,15 +1,11 @@ --- - -- hosts: bootstrap_server - become: true - tasks: - - name: connect bootstrap - command: virsh console bootstrap +- name: connect bootstrap + command: virsh console bootstrap - - name: Verify bootstrap install process until complete - command: journalctl -u bootkube.service - register: result - until: result.stdout.find("bootkube.service complete") != -1 - retries: 100 - delay: 300 +- name: Verify bootstrap install process until complete + command: journalctl -u bootkube.service + register: result + until: result.stdout.find("bootkube.service complete") != -1 + retries: 100 + delay: 300 diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index ea9521ea..160b5a5d 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -27,4 +27,4 @@ - name: wait for bastion to install pause: - minutes: 15 \ No newline at end of file + minutes: 60 \ No newline at end of file diff --git a/roles/dns/files/distribution.db b/roles/dns/files/distribution.db new file mode 100644 index 00000000..35be68ba --- /dev/null +++ b/roles/dns/files/distribution.db @@ -0,0 +1,48 @@ +$TTL 86400 +@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( + 2020021821 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) + +;Name Server Information +@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. + +;IP Address for Name Server +bastion IN A 9.60.87.139 + +;entry for bootstrap host. +;bootstrap IN A 9.60.87.133 +bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 + +;entries for the master nodes +control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 +;control-0 IN A 9.60.87.138 +control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 +;control-1 IN A 9.60.87.137 +control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 +;control-2 IN A 9.60.87.136 + +;entry for the bastion host +bastion IN A 9.60.87.139 + +;entries for the worker nodes +compute-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.135 +;compute-0 IN A 9.60.87.135 +compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 +;compute-1 IN A 9.60.87.134 + +;entry of your load balancer +haproxy IN A 9.60.87.139 + +;The api identifies the IP of your load balancer. +api IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. +api-int IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. + +;The wildcard also identifies the load balancer. +apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. +*.apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. + +;EOF diff --git a/roles/dns/files/distribution.rev b/roles/dns/files/distribution.rev new file mode 100644 index 00000000..51ff3eb9 --- /dev/null +++ b/roles/dns/files/distribution.rev @@ -0,0 +1,24 @@ +$TTL 86400 +@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com ( + 2020011800 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) +;Name Server Information +@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. +bastion IN A 9.60.87.139 + +;Reverse lookup for Name Server +139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. + +;PTR Record IP address to Hostname +138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. +137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. +136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. +135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. +134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. +133 IN PTR bootstrap.distribution.ocpz.wsclab.endicott.ibm.com. +139 IN PTR api-int.distribution.ocpz.wsclab.endicott.ibm.com. +139 IN PTR api.distribution.ocpz.wsclab.endicott.ibm.com. diff --git a/roles/dns/files/named.conf b/roles/dns/files/named.conf new file mode 100644 index 00000000..b07a27be --- /dev/null +++ b/roles/dns/files/named.conf @@ -0,0 +1,78 @@ +// +// named.conf +// +// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS +// server as a caching only nameserver (as a localhost DNS resolver only). +// +// See /usr/share/doc/bind*/sample/ for example named configuration files. +// + +options { +// listen-on port 53 { 127.0.0.1; }; + listen-on port 53 { any; }; + listen-on-v6 port 53 { ::1; }; + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + statistics-file "/var/named/data/named_stats.txt"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + secroots-file "/var/named/data/named.secroots"; + recursing-file "/var/named/data/named.recursing"; + allow-query { any; }; + + /* + - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. + - If you are building a RECURSIVE (caching) DNS server, you need to enable + recursion. + - If your recursive DNS server has a public IP address, you MUST enable access + control to limit queries to your legitimate users. Failing to do so will + cause your server to become part of large scale DNS amplification + attacks. Implementing BCP38 within your network would greatly + reduce such attack surface + */ + recursion yes; + + dnssec-enable no; + dnssec-validation no; + + managed-keys-directory "/var/named/dynamic"; + + pid-file "/run/named/named.pid"; + session-keyfile "/run/named/session.key"; + + /* https://fedoraproject.org/wiki/Changes/CryptoPolicy */ + include "/etc/crypto-policies/back-ends/bind.config"; +}; + +logging { + channel default_debug { + file "data/named.run"; + severity dynamic; + }; +}; + +zone "." IN { + type forward; + forwarders { 9.60.70.82; }; +// type hint; +// file "named.ca"; +}; + +include "/etc/named.rfc1912.zones"; +include "/etc/named.root.key"; + +//forward zone +zone "distribution.ocpz.wsclab.endicott.ibm.com" IN { + type master; + file "distribution.db"; + allow-update { any; }; + allow-query { any; }; +}; + +//backward zone +zone "87.60.9.in-addr.arpa" IN { + type master; + file "distribution.rev"; + allow-update { any; }; + allow-query { any; }; +}; + diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml new file mode 100644 index 00000000..88adf33c --- /dev/null +++ b/roles/dns/tasks/main.yaml @@ -0,0 +1,43 @@ +--- + +- name: enable named + ansible.builtin.systemd: + name: named + enabled: yes + +- name: start named + ansible.builtin.systemd: + name: named + state: started + +- name: Copy named.conf file to bastion + ansible.builtin.copy: + src: named.conf + dest: /etc/ + owner: root + group: root + mode: '0755' + backup: yes + +- name: Copy distribution.db file to bastion + ansible.builtin.copy: + src: distribution.db + dest: /var/named + owner: named + group: named + mode: '0755' + backup: yes + +- name: Copy distribution.rev file to bastion + ansible.builtin.copy: + src: distribution.rev + dest: /var/named + owner: named + group: named + mode: '0755' + backup: yes + +- name: restart named to update changes made to DNS + ansible.builtin.systemd: + name: named + state: restarted \ No newline at end of file diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 7673dce9..359dbcf7 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -7,3 +7,18 @@ - name: Unzip rhcos qcow2 files command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz + +- name: get rhcos initramfs image + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + dest: /var/lib/libvirt/images/ + +- name: get rhcos kernel + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + dest: /var/lib/libvirt/images/ + +- name: get rhcos rootfs image + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + dest: /var/lib/libvirt/images/ \ No newline at end of file From 06a8815c411af587e2157212b83543f83eeaf7c3 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 12 Aug 2021 17:18:08 -0500 Subject: [PATCH 253/885] Updates for roles implementation --- main.yaml | 9 +++++---- roles/create_bastion/tasks/main.yaml | 4 ++-- roles/prep_kvm_guests/tasks/main.yaml | 7 +++++-- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/main.yaml b/main.yaml index c8dd1bb9..f6ac74fd 100644 --- a/main.yaml +++ b/main.yaml @@ -9,13 +9,13 @@ # - ssh_key_gen # - copy_ssh_key -- hosts: kvm_host - become: true - roles: +#- hosts: kvm_host + #become: true + #roles: #- install_packages (libvirt, libvirt-devel, libvirt-daemon-kvm, qemu-kvm, virt-manager, libvirt-daemon-config-network, libvirt-client, qemu-img) #- enable libvirt #- macvtap - - create_bastion + #- create_bastion (reminder to eventually use boot instructions) #- hosts: workstation # become: true @@ -27,6 +27,7 @@ become: true roles: #- install_packages (haproxy, httpd, mod_ssl, bind, bind_utils) don't forget to enable, reminder that bind is called named when being called after install + #- firewall (whatever that may be) #- set_selinux_permissive - dns - haproxy diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 160b5a5d..e748fe5b 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -19,8 +19,8 @@ #- name: mount rhcos install directory # command: mount -o loop /var/lib/libvirt/images/rhel83.iso /rhcos-install/ - - name: virtualize bastion server - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G +# - name: virtualize bastion server +# command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: start bastion install command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 359dbcf7..e8e371ff 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -1,6 +1,6 @@ --- -- name: get rhcos qcow2 files +- name: get rhcos qcow2 file get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images/ @@ -12,13 +12,16 @@ get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/lib/libvirt/images/ + mode: '0755' - name: get rhcos kernel get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/lib/libvirt/images/ + mode: '0755' - name: get rhcos rootfs image get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/lib/libvirt/images/ \ No newline at end of file + dest: /var/lib/libvirt/images/ + mode: '0755' \ No newline at end of file From e891b35496d067e20206853e8a84cccc9f944b77 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 12 Aug 2021 17:18:08 -0500 Subject: [PATCH 254/885] Updates for roles implementation --- main.yaml | 9 +++++---- roles/create_bastion/tasks/main.yaml | 4 ++-- roles/prep_kvm_guests/tasks/main.yaml | 7 +++++-- 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/main.yaml b/main.yaml index c8dd1bb9..f6ac74fd 100644 --- a/main.yaml +++ b/main.yaml @@ -9,13 +9,13 @@ # - ssh_key_gen # - copy_ssh_key -- hosts: kvm_host - become: true - roles: +#- hosts: kvm_host + #become: true + #roles: #- install_packages (libvirt, libvirt-devel, libvirt-daemon-kvm, qemu-kvm, virt-manager, libvirt-daemon-config-network, libvirt-client, qemu-img) #- enable libvirt #- macvtap - - create_bastion + #- create_bastion (reminder to eventually use boot instructions) #- hosts: workstation # become: true @@ -27,6 +27,7 @@ become: true roles: #- install_packages (haproxy, httpd, mod_ssl, bind, bind_utils) don't forget to enable, reminder that bind is called named when being called after install + #- firewall (whatever that may be) #- set_selinux_permissive - dns - haproxy diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 160b5a5d..e748fe5b 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -19,8 +19,8 @@ #- name: mount rhcos install directory # command: mount -o loop /var/lib/libvirt/images/rhel83.iso /rhcos-install/ - - name: virtualize bastion server - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G +# - name: virtualize bastion server +# command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: start bastion install command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 359dbcf7..e8e371ff 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -1,6 +1,6 @@ --- -- name: get rhcos qcow2 files +- name: get rhcos qcow2 file get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images/ @@ -12,13 +12,16 @@ get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/lib/libvirt/images/ + mode: '0755' - name: get rhcos kernel get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/lib/libvirt/images/ + mode: '0755' - name: get rhcos rootfs image get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/lib/libvirt/images/ \ No newline at end of file + dest: /var/lib/libvirt/images/ + mode: '0755' \ No newline at end of file From eb2b39896bfe54e080c22d2b1a34ec58e5962dcc Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 12 Aug 2021 22:51:17 -0500 Subject: [PATCH 255/885] modified install-config.yaml in files and roles/get-ocp to reflect new rootssh key to access bootstrap and nodes --- files/install-config.yaml | 2 +- roles/get-ocp/files/install-config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/files/install-config.yaml b/files/install-config.yaml index b761fc72..d016fa9c 100644 --- a/files/install-config.yaml +++ b/files/install-config.yaml @@ -23,4 +23,4 @@ platform: none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDudjhbUkA7ekvuzZe7WpkkF2KOoLGwWU05Ch6Ty0OWDW0vdbtrG7TIgy8FYOvBnArnIJcy1AP6tpH6BcbmHRIosVFbZ3GrLkKEOroBD+8+6qGzN+fIXBzss20iRvMvDdlDaVJ1BqmTvNnkbeWdgCg1MbICQ1OYOExm+xgvGWHOjoJkUzkw3oBnAnpZHHTkvM2dY0SNtzV9i5FcSvdBM2kindvBpvDZ9PjTGkcFMnbVRpBXUV+axNg4yuX0YWFDWOFlQ4DjvasdN6jMQ2MrWBYtYF1RAklsOhSXTPKAYaxZZsfeEBnMVUFshaDYy7PlbCXcUEVEFEkMlTAbe//bBXx5l+2fVFtOUyhfws28bLg+rlJLivINoLW4EB+ERViYBTCEx5njl9mK9EcPpCJlLTR82kr8vEwE/sJ9ro3dH/aEUQcTkVYqlOCw2lsFZpkZtthArknZiHk91OtWgXj4GsAjihehfBvEL/GbEuO0HTzvl+QiTnC+mL4KcObl31K9LdlAbKA53l6G68py0u7plUervFN8H7rIFbv4ndUTAo2rODty/QoYH8jBu460LLgLyaRfaDFrGitnAdSiCS/8dEr8iAVxj5X5sgcTQlsJYyvMv61YCzItmlipDpbf+M/mJyJuxyuYWqudItOIWchFktstcpp3dx1iM/pr0o4HitpeQ== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file diff --git a/roles/get-ocp/files/install-config.yaml b/roles/get-ocp/files/install-config.yaml index b761fc72..d016fa9c 100644 --- a/roles/get-ocp/files/install-config.yaml +++ b/roles/get-ocp/files/install-config.yaml @@ -23,4 +23,4 @@ platform: none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDudjhbUkA7ekvuzZe7WpkkF2KOoLGwWU05Ch6Ty0OWDW0vdbtrG7TIgy8FYOvBnArnIJcy1AP6tpH6BcbmHRIosVFbZ3GrLkKEOroBD+8+6qGzN+fIXBzss20iRvMvDdlDaVJ1BqmTvNnkbeWdgCg1MbICQ1OYOExm+xgvGWHOjoJkUzkw3oBnAnpZHHTkvM2dY0SNtzV9i5FcSvdBM2kindvBpvDZ9PjTGkcFMnbVRpBXUV+axNg4yuX0YWFDWOFlQ4DjvasdN6jMQ2MrWBYtYF1RAklsOhSXTPKAYaxZZsfeEBnMVUFshaDYy7PlbCXcUEVEFEkMlTAbe//bBXx5l+2fVFtOUyhfws28bLg+rlJLivINoLW4EB+ERViYBTCEx5njl9mK9EcPpCJlLTR82kr8vEwE/sJ9ro3dH/aEUQcTkVYqlOCw2lsFZpkZtthArknZiHk91OtWgXj4GsAjihehfBvEL/GbEuO0HTzvl+QiTnC+mL4KcObl31K9LdlAbKA53l6G68py0u7plUervFN8H7rIFbv4ndUTAo2rODty/QoYH8jBu460LLgLyaRfaDFrGitnAdSiCS/8dEr8iAVxj5X5sgcTQlsJYyvMv61YCzItmlipDpbf+M/mJyJuxyuYWqudItOIWchFktstcpp3dx1iM/pr0o4HitpeQ== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file From aa5e3f7486fdd8726ab9da6bf7428352a84690d9 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 12 Aug 2021 22:51:17 -0500 Subject: [PATCH 256/885] modified install-config.yaml in files and roles/get-ocp to reflect new rootssh key to access bootstrap and nodes --- files/install-config.yaml | 2 +- roles/get-ocp/files/install-config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/files/install-config.yaml b/files/install-config.yaml index b761fc72..d016fa9c 100644 --- a/files/install-config.yaml +++ b/files/install-config.yaml @@ -23,4 +23,4 @@ platform: none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDudjhbUkA7ekvuzZe7WpkkF2KOoLGwWU05Ch6Ty0OWDW0vdbtrG7TIgy8FYOvBnArnIJcy1AP6tpH6BcbmHRIosVFbZ3GrLkKEOroBD+8+6qGzN+fIXBzss20iRvMvDdlDaVJ1BqmTvNnkbeWdgCg1MbICQ1OYOExm+xgvGWHOjoJkUzkw3oBnAnpZHHTkvM2dY0SNtzV9i5FcSvdBM2kindvBpvDZ9PjTGkcFMnbVRpBXUV+axNg4yuX0YWFDWOFlQ4DjvasdN6jMQ2MrWBYtYF1RAklsOhSXTPKAYaxZZsfeEBnMVUFshaDYy7PlbCXcUEVEFEkMlTAbe//bBXx5l+2fVFtOUyhfws28bLg+rlJLivINoLW4EB+ERViYBTCEx5njl9mK9EcPpCJlLTR82kr8vEwE/sJ9ro3dH/aEUQcTkVYqlOCw2lsFZpkZtthArknZiHk91OtWgXj4GsAjihehfBvEL/GbEuO0HTzvl+QiTnC+mL4KcObl31K9LdlAbKA53l6G68py0u7plUervFN8H7rIFbv4ndUTAo2rODty/QoYH8jBu460LLgLyaRfaDFrGitnAdSiCS/8dEr8iAVxj5X5sgcTQlsJYyvMv61YCzItmlipDpbf+M/mJyJuxyuYWqudItOIWchFktstcpp3dx1iM/pr0o4HitpeQ== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file diff --git a/roles/get-ocp/files/install-config.yaml b/roles/get-ocp/files/install-config.yaml index b761fc72..d016fa9c 100644 --- a/roles/get-ocp/files/install-config.yaml +++ b/roles/get-ocp/files/install-config.yaml @@ -23,4 +23,4 @@ platform: none: {} fips: false pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDudjhbUkA7ekvuzZe7WpkkF2KOoLGwWU05Ch6Ty0OWDW0vdbtrG7TIgy8FYOvBnArnIJcy1AP6tpH6BcbmHRIosVFbZ3GrLkKEOroBD+8+6qGzN+fIXBzss20iRvMvDdlDaVJ1BqmTvNnkbeWdgCg1MbICQ1OYOExm+xgvGWHOjoJkUzkw3oBnAnpZHHTkvM2dY0SNtzV9i5FcSvdBM2kindvBpvDZ9PjTGkcFMnbVRpBXUV+axNg4yuX0YWFDWOFlQ4DjvasdN6jMQ2MrWBYtYF1RAklsOhSXTPKAYaxZZsfeEBnMVUFshaDYy7PlbCXcUEVEFEkMlTAbe//bBXx5l+2fVFtOUyhfws28bLg+rlJLivINoLW4EB+ERViYBTCEx5njl9mK9EcPpCJlLTR82kr8vEwE/sJ9ro3dH/aEUQcTkVYqlOCw2lsFZpkZtthArknZiHk91OtWgXj4GsAjihehfBvEL/GbEuO0HTzvl+QiTnC+mL4KcObl31K9LdlAbKA53l6G68py0u7plUervFN8H7rIFbv4ndUTAo2rODty/QoYH8jBu460LLgLyaRfaDFrGitnAdSiCS/8dEr8iAVxj5X5sgcTQlsJYyvMv61YCzItmlipDpbf+M/mJyJuxyuYWqudItOIWchFktstcpp3dx1iM/pr0o4HitpeQ== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file From 90c15dbbc241eaf6fe50f33a73a9420278d6e253 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Fri, 13 Aug 2021 17:03:58 -0500 Subject: [PATCH 257/885] Small updates for the implementation of roles. Also added an unfinished playbook for waiting for bootkube to finish before continuing. --- main.yaml | 29 +++++----- roles/create_bootstrap/tasks/main.yaml | 18 ++++--- roles/create_compute_nodes/tasks/main.yaml | 36 ++++++++----- roles/create_control_nodes/tasks/main.yaml | 62 ++++++++++++---------- roles/get-ocp/tasks/main.yaml | 24 +++++++++ roles/httpd/tasks/main.yaml | 38 ++----------- roles/prep_kvm_guests/tasks/main.yaml | 9 +++- roles/wait_for_bootkube/tasks/main.yaml | 20 +++++++ 8 files changed, 140 insertions(+), 96 deletions(-) create mode 100644 roles/wait_for_bootkube/tasks/main.yaml diff --git a/main.yaml b/main.yaml index f6ac74fd..2c7b7cd3 100644 --- a/main.yaml +++ b/main.yaml @@ -23,24 +23,29 @@ # vars: "reminder to add vars for remote host to copy ssh key to, bastion in this case" # - copy_ssh_key -- hosts: bastion_server - become: true - roles: +#- hosts: bastion_server + #become: true + #roles: #- install_packages (haproxy, httpd, mod_ssl, bind, bind_utils) don't forget to enable, reminder that bind is called named when being called after install #- firewall (whatever that may be) #- set_selinux_permissive - - dns - - haproxy - - httpd - - get-ocp + #- dns + #- haproxy + #- httpd + #- get-ocp -- hosts: kvm_host +#- hosts: kvm_host + #become: true + #roles: + #- prep_kvm_guests + #- create_bootstrap + #- create_control_nodes + #- create_compute_nodes + +- hosts: bastion_server become: true roles: - - prep_kvm_guests - - create_bootstrap - - create_control_nodes - - create_compute_nodes + - wait_for_bootkube #- hosts: bootstrap_server # become: true diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 200fc052..ed6a6dc4 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -1,9 +1,15 @@ --- -- name: virtualize bootstrap - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - - name: boot bootstrap - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images + command: | + virt-install --name bootstrap + --disk size=100 --ram 16000 --cpu host --vcpus 4 + --os-type linux --os-variant rhel8.0 + --network network=macvtap-net + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.133::9.60.86.1:255.255.254.0:bootstrap:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign" + --noautoconsole + +- name: pause 15 minutes + pause: + minutes: 15 \ No newline at end of file diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index ec726a66..8d9af69f 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -1,21 +1,29 @@ --- -- name: virtualize compute-0 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G - -- name: virtualize compute-1 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - - name: install CoreOS on compute-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images + command: | + virt-install --name compute-0 + --disk size=100 --ram 16000 --cpu host --vcpus 4 + --os-type linux --os-variant rhel8.0 + --network network=macvtap-net + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.135::9.60.86.1:255.255.254.0:compute-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" + --noautoconsole -- name: pause 8 minutes +- name: pause 15 minutes pause: - minutes: 8 + minutes: 15 - name: install CoreOS on compute-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images + command: | + virt-install --name compute-1 + --disk size=100 --ram 16000 --cpu host --vcpus 4 + --os-type linux --os-variant rhel8.0 + --network network=macvtap-net + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.134::9.60.86.1:255.255.254.0:compute-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" + --noautoconsole + +- name: pause 15 minutes + pause: + minutes: 15 \ No newline at end of file diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 190c3c05..f92cfe4f 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -1,37 +1,43 @@ --- -- name: virtualize control-0 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G - -- name: virtualize control-1 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G - -- name: virtualize control-2 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G - - name: install CoreOS on control-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - -- name: pause 8 minutes + command: | + virt-install --name control-0 + --disk size=100 --ram 16000 --cpu host --vcpus 4 + --os-type linux --os-variant rhel8.0 + --network network=macvtap-net + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.138::9.60.86.1:255.255.254.0:control-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" + --noautoconsole + +- name: pause 15 minutes pause: - minutes: 8 - + minutes: 15 + - name: install CoreOS on control-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - -- name: pause 8 minutes + command: | + virt-install --name control-1 + --disk size=100 --ram 16000 --cpu host --vcpus 4 + --os-type linux --os-variant rhel8.0 + --network network=macvtap-net + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.137::9.60.86.1:255.255.254.0:control-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" + --noautoconsole + +- name: pause 15 minutes pause: - minutes: 8 + minutes: 15 - name: install CoreOS on control-2 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - -- name: pause 8 minutes + command: | + virt-install --name control-2 + --disk size=100 --ram 16000 --cpu host --vcpus 4 + --os-type linux --os-variant rhel8.0 + --network network=macvtap-net + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.136::9.60.86.1:255.255.254.0:control-2:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" + --noautoconsole + +- name: pause 15 minutes pause: - minutes: 8 + minutes: 15 diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 77c029be..4e6c5a26 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -1,5 +1,29 @@ --- +- name: create directory bin for mirrors + file: + path: /var/www/html/bin + state: directory + mode: '0755' + +- name: get ocp kernel + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + dest: /var/www/html/bin + mode: '0755' + +- name: get ocp initramfs + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + dest: /var/www/html/bin + mode: '0755' + +- name: get ocp rootfs + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + dest: /var/www/html/bin + mode: '0755' + - name: create OCP download landing directory file: path: /ocpinst/ diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 38bafe5f..0a276a9d 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -48,37 +48,7 @@ # port: '4443' # proto: tcp -- name: create directory bin for mirrors - file: - path: /var/www/html/bin - state: directory - mode: '0755' - -- name: create directory bootstrap for mirrors - file: - path: /var/www/html/bootstrap - state: directory - mode: '0755' - -- name: get mirrors 1 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/www/html/bin - mode: '0755' - -- name: get mirrors 2 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/www/html/bin - mode: '0755' - -- name: get mirrors 3 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/www/html/bin - mode: '0755' - -- name: check to make sure httpd is started - service: - name: httpd - state: restarted +#- name: check to make sure httpd is started +# service: +# name: httpd +# state: restarted diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index e8e371ff..da0d795d 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -4,24 +4,29 @@ get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images/ + force: yes + mode: '0755' - name: Unzip rhcos qcow2 files - command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz + command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz - name: get rhcos initramfs image get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/lib/libvirt/images/ mode: '0755' + force: yes - name: get rhcos kernel get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/lib/libvirt/images/ mode: '0755' + force: yes - name: get rhcos rootfs image get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/lib/libvirt/images/ - mode: '0755' \ No newline at end of file + mode: '0755' + force: yes \ No newline at end of file diff --git a/roles/wait_for_bootkube/tasks/main.yaml b/roles/wait_for_bootkube/tasks/main.yaml new file mode 100644 index 00000000..25e47d5a --- /dev/null +++ b/roles/wait_for_bootkube/tasks/main.yaml @@ -0,0 +1,20 @@ +--- + +- name: ssh to bootstrap from bastion + command: ssh core@9.60.87.133 + +#- name: Wait 30 minutes for bootkube to connect nodes. May take up to 45 minutes. + #pause: + #minutes: 1 + +#1800 + +- name: Start checking for bootkube to complete connecting nodes. Checks every 2 minutes. + command: journalctl -u bootkube.service + register: bootkube_status + until: bootkube_status.stdout.find("bootkube.service complete") != -1 + retries: 15 + delay: 1 + +#retries: 15 +#delay: 120 \ No newline at end of file From 12255f27926ee3564e521c6d1d78c1239f56bf56 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Fri, 13 Aug 2021 17:03:58 -0500 Subject: [PATCH 258/885] Small updates for the implementation of roles. Also added an unfinished playbook for waiting for bootkube to finish before continuing. --- main.yaml | 29 +++++----- roles/create_bootstrap/tasks/main.yaml | 18 ++++--- roles/create_compute_nodes/tasks/main.yaml | 36 ++++++++----- roles/create_control_nodes/tasks/main.yaml | 62 ++++++++++++---------- roles/get-ocp/tasks/main.yaml | 24 +++++++++ roles/httpd/tasks/main.yaml | 38 ++----------- roles/prep_kvm_guests/tasks/main.yaml | 9 +++- roles/wait_for_bootkube/tasks/main.yaml | 20 +++++++ 8 files changed, 140 insertions(+), 96 deletions(-) create mode 100644 roles/wait_for_bootkube/tasks/main.yaml diff --git a/main.yaml b/main.yaml index f6ac74fd..2c7b7cd3 100644 --- a/main.yaml +++ b/main.yaml @@ -23,24 +23,29 @@ # vars: "reminder to add vars for remote host to copy ssh key to, bastion in this case" # - copy_ssh_key -- hosts: bastion_server - become: true - roles: +#- hosts: bastion_server + #become: true + #roles: #- install_packages (haproxy, httpd, mod_ssl, bind, bind_utils) don't forget to enable, reminder that bind is called named when being called after install #- firewall (whatever that may be) #- set_selinux_permissive - - dns - - haproxy - - httpd - - get-ocp + #- dns + #- haproxy + #- httpd + #- get-ocp -- hosts: kvm_host +#- hosts: kvm_host + #become: true + #roles: + #- prep_kvm_guests + #- create_bootstrap + #- create_control_nodes + #- create_compute_nodes + +- hosts: bastion_server become: true roles: - - prep_kvm_guests - - create_bootstrap - - create_control_nodes - - create_compute_nodes + - wait_for_bootkube #- hosts: bootstrap_server # become: true diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 200fc052..ed6a6dc4 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -1,9 +1,15 @@ --- -- name: virtualize bootstrap - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - - name: boot bootstrap - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images + command: | + virt-install --name bootstrap + --disk size=100 --ram 16000 --cpu host --vcpus 4 + --os-type linux --os-variant rhel8.0 + --network network=macvtap-net + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.133::9.60.86.1:255.255.254.0:bootstrap:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign" + --noautoconsole + +- name: pause 15 minutes + pause: + minutes: 15 \ No newline at end of file diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index ec726a66..8d9af69f 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -1,21 +1,29 @@ --- -- name: virtualize compute-0 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G - -- name: virtualize compute-1 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - - name: install CoreOS on compute-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images + command: | + virt-install --name compute-0 + --disk size=100 --ram 16000 --cpu host --vcpus 4 + --os-type linux --os-variant rhel8.0 + --network network=macvtap-net + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.135::9.60.86.1:255.255.254.0:compute-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" + --noautoconsole -- name: pause 8 minutes +- name: pause 15 minutes pause: - minutes: 8 + minutes: 15 - name: install CoreOS on compute-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images + command: | + virt-install --name compute-1 + --disk size=100 --ram 16000 --cpu host --vcpus 4 + --os-type linux --os-variant rhel8.0 + --network network=macvtap-net + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.134::9.60.86.1:255.255.254.0:compute-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" + --noautoconsole + +- name: pause 15 minutes + pause: + minutes: 15 \ No newline at end of file diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 190c3c05..f92cfe4f 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -1,37 +1,43 @@ --- -- name: virtualize control-0 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G - -- name: virtualize control-1 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G - -- name: virtualize control-2 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G - - name: install CoreOS on control-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - -- name: pause 8 minutes + command: | + virt-install --name control-0 + --disk size=100 --ram 16000 --cpu host --vcpus 4 + --os-type linux --os-variant rhel8.0 + --network network=macvtap-net + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.138::9.60.86.1:255.255.254.0:control-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" + --noautoconsole + +- name: pause 15 minutes pause: - minutes: 8 - + minutes: 15 + - name: install CoreOS on control-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - -- name: pause 8 minutes + command: | + virt-install --name control-1 + --disk size=100 --ram 16000 --cpu host --vcpus 4 + --os-type linux --os-variant rhel8.0 + --network network=macvtap-net + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.137::9.60.86.1:255.255.254.0:control-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" + --noautoconsole + +- name: pause 15 minutes pause: - minutes: 8 + minutes: 15 - name: install CoreOS on control-2 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - -- name: pause 8 minutes + command: | + virt-install --name control-2 + --disk size=100 --ram 16000 --cpu host --vcpus 4 + --os-type linux --os-variant rhel8.0 + --network network=macvtap-net + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.136::9.60.86.1:255.255.254.0:control-2:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" + --noautoconsole + +- name: pause 15 minutes pause: - minutes: 8 + minutes: 15 diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 77c029be..4e6c5a26 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -1,5 +1,29 @@ --- +- name: create directory bin for mirrors + file: + path: /var/www/html/bin + state: directory + mode: '0755' + +- name: get ocp kernel + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + dest: /var/www/html/bin + mode: '0755' + +- name: get ocp initramfs + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + dest: /var/www/html/bin + mode: '0755' + +- name: get ocp rootfs + get_url: + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + dest: /var/www/html/bin + mode: '0755' + - name: create OCP download landing directory file: path: /ocpinst/ diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 38bafe5f..0a276a9d 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -48,37 +48,7 @@ # port: '4443' # proto: tcp -- name: create directory bin for mirrors - file: - path: /var/www/html/bin - state: directory - mode: '0755' - -- name: create directory bootstrap for mirrors - file: - path: /var/www/html/bootstrap - state: directory - mode: '0755' - -- name: get mirrors 1 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/www/html/bin - mode: '0755' - -- name: get mirrors 2 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/www/html/bin - mode: '0755' - -- name: get mirrors 3 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/www/html/bin - mode: '0755' - -- name: check to make sure httpd is started - service: - name: httpd - state: restarted +#- name: check to make sure httpd is started +# service: +# name: httpd +# state: restarted diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index e8e371ff..da0d795d 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -4,24 +4,29 @@ get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images/ + force: yes + mode: '0755' - name: Unzip rhcos qcow2 files - command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz + command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz - name: get rhcos initramfs image get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/lib/libvirt/images/ mode: '0755' + force: yes - name: get rhcos kernel get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/lib/libvirt/images/ mode: '0755' + force: yes - name: get rhcos rootfs image get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/lib/libvirt/images/ - mode: '0755' \ No newline at end of file + mode: '0755' + force: yes \ No newline at end of file diff --git a/roles/wait_for_bootkube/tasks/main.yaml b/roles/wait_for_bootkube/tasks/main.yaml new file mode 100644 index 00000000..25e47d5a --- /dev/null +++ b/roles/wait_for_bootkube/tasks/main.yaml @@ -0,0 +1,20 @@ +--- + +- name: ssh to bootstrap from bastion + command: ssh core@9.60.87.133 + +#- name: Wait 30 minutes for bootkube to connect nodes. May take up to 45 minutes. + #pause: + #minutes: 1 + +#1800 + +- name: Start checking for bootkube to complete connecting nodes. Checks every 2 minutes. + command: journalctl -u bootkube.service + register: bootkube_status + until: bootkube_status.stdout.find("bootkube.service complete") != -1 + retries: 15 + delay: 1 + +#retries: 15 +#delay: 120 \ No newline at end of file From a0adc54e319d37411511f296c5ea17d04c7387da Mon Sep 17 00:00:00 2001 From: jacobemery Date: Fri, 13 Aug 2021 18:23:35 -0500 Subject: [PATCH 259/885] Working on playbooks that wait for bootkube to finish before continuing. Not ready yet. --- main.yaml | 56 ++++++++++++------------- roles/ssh_config_jump/tasks/main.yaml | 30 +++++++++++++ roles/wait_for_bootkube/tasks/main.yaml | 4 ++ 3 files changed, 62 insertions(+), 28 deletions(-) create mode 100644 roles/ssh_config_jump/tasks/main.yaml diff --git a/main.yaml b/main.yaml index 2c7b7cd3..fec82573 100644 --- a/main.yaml +++ b/main.yaml @@ -9,52 +9,52 @@ # - ssh_key_gen # - copy_ssh_key -#- hosts: kvm_host - #become: true - #roles: +- hosts: kvm_host + become: true + roles: #- install_packages (libvirt, libvirt-devel, libvirt-daemon-kvm, qemu-kvm, virt-manager, libvirt-daemon-config-network, libvirt-client, qemu-img) #- enable libvirt - #- macvtap - #- create_bastion (reminder to eventually use boot instructions) + - macvtap + - create_bastion #(reminder to eventually use boot instructions) #- hosts: workstation # become: true +# gather_facts: no # roles: # vars: "reminder to add vars for remote host to copy ssh key to, bastion in this case" # - copy_ssh_key -#- hosts: bastion_server - #become: true - #roles: +- hosts: bastion_server + become: true + roles: #- install_packages (haproxy, httpd, mod_ssl, bind, bind_utils) don't forget to enable, reminder that bind is called named when being called after install #- firewall (whatever that may be) #- set_selinux_permissive - #- dns - #- haproxy - #- httpd - #- get-ocp + - dns + - haproxy + - httpd + - get-ocp -#- hosts: kvm_host - #become: true - #roles: - #- prep_kvm_guests - #- create_bootstrap - #- create_control_nodes - #- create_compute_nodes - -- hosts: bastion_server +- hosts: kvm_host become: true + gather_facts: no roles: - - wait_for_bootkube + - prep_kvm_guests + - create_bootstrap + - create_control_nodes + - create_compute_nodes -#- hosts: bootstrap_server -# become: true -# roles: -# - verify_bootstrap +#- hosts: workstation + #become: true + #gather_facts: yes + #roles: + #- ssh_config_jump #- hosts: bastion_server -# become: true -# roles: + #become: true + #gather_facts: no + #roles: + #- wait_for_bootkube # - remove_bootstrap # - approve_certs # - verify_installation diff --git a/roles/ssh_config_jump/tasks/main.yaml b/roles/ssh_config_jump/tasks/main.yaml new file mode 100644 index 00000000..a556b0af --- /dev/null +++ b/roles/ssh_config_jump/tasks/main.yaml @@ -0,0 +1,30 @@ +## will not work as is. Need to research using ProxyCommands or SSHuttle to run commands on bootstrap via the bastion jump host + +--- + +- name: Check that the ssh_config exists + stat: + path: ~/.ssh/config + register: ssh_config + +- name: Create ssh config file, if it doesnt exist already + file: + path: ~/.ssh/config + state: touch + when: not ssh_config.stat.exists + +- name: Insert ssh keys for jump host configuration in /ssh/config + blockinfile: + path: ~/.ssh/config + block: | + Host bastion_server + HostName 9.60.87.139 + IdentityFile ~/.ssh/ansible.pub + User root + + Host bootstrap_server + HostName 9.60.87.133 + IdentityFile ~/.ssh/ansible.pub + User core + ProxyJump bastion_server + \ No newline at end of file diff --git a/roles/wait_for_bootkube/tasks/main.yaml b/roles/wait_for_bootkube/tasks/main.yaml index 25e47d5a..0899e363 100644 --- a/roles/wait_for_bootkube/tasks/main.yaml +++ b/roles/wait_for_bootkube/tasks/main.yaml @@ -1,3 +1,5 @@ +#Will not work as is. Waiting to figure out sshuttle or ProxyCommands to run commands on the bootstrap from bastion jump host. + --- - name: ssh to bootstrap from bastion @@ -7,6 +9,7 @@ #pause: #minutes: 1 +#above is for testing. Use below when above works. #1800 - name: Start checking for bootkube to complete connecting nodes. Checks every 2 minutes. @@ -16,5 +19,6 @@ retries: 15 delay: 1 +#above is for testing. Use below when above works. #retries: 15 #delay: 120 \ No newline at end of file From eeec999484704a479afe8225c37d59a236164168 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Fri, 13 Aug 2021 18:23:35 -0500 Subject: [PATCH 260/885] Working on playbooks that wait for bootkube to finish before continuing. Not ready yet. --- main.yaml | 56 ++++++++++++------------- roles/ssh_config_jump/tasks/main.yaml | 30 +++++++++++++ roles/wait_for_bootkube/tasks/main.yaml | 4 ++ 3 files changed, 62 insertions(+), 28 deletions(-) create mode 100644 roles/ssh_config_jump/tasks/main.yaml diff --git a/main.yaml b/main.yaml index 2c7b7cd3..fec82573 100644 --- a/main.yaml +++ b/main.yaml @@ -9,52 +9,52 @@ # - ssh_key_gen # - copy_ssh_key -#- hosts: kvm_host - #become: true - #roles: +- hosts: kvm_host + become: true + roles: #- install_packages (libvirt, libvirt-devel, libvirt-daemon-kvm, qemu-kvm, virt-manager, libvirt-daemon-config-network, libvirt-client, qemu-img) #- enable libvirt - #- macvtap - #- create_bastion (reminder to eventually use boot instructions) + - macvtap + - create_bastion #(reminder to eventually use boot instructions) #- hosts: workstation # become: true +# gather_facts: no # roles: # vars: "reminder to add vars for remote host to copy ssh key to, bastion in this case" # - copy_ssh_key -#- hosts: bastion_server - #become: true - #roles: +- hosts: bastion_server + become: true + roles: #- install_packages (haproxy, httpd, mod_ssl, bind, bind_utils) don't forget to enable, reminder that bind is called named when being called after install #- firewall (whatever that may be) #- set_selinux_permissive - #- dns - #- haproxy - #- httpd - #- get-ocp + - dns + - haproxy + - httpd + - get-ocp -#- hosts: kvm_host - #become: true - #roles: - #- prep_kvm_guests - #- create_bootstrap - #- create_control_nodes - #- create_compute_nodes - -- hosts: bastion_server +- hosts: kvm_host become: true + gather_facts: no roles: - - wait_for_bootkube + - prep_kvm_guests + - create_bootstrap + - create_control_nodes + - create_compute_nodes -#- hosts: bootstrap_server -# become: true -# roles: -# - verify_bootstrap +#- hosts: workstation + #become: true + #gather_facts: yes + #roles: + #- ssh_config_jump #- hosts: bastion_server -# become: true -# roles: + #become: true + #gather_facts: no + #roles: + #- wait_for_bootkube # - remove_bootstrap # - approve_certs # - verify_installation diff --git a/roles/ssh_config_jump/tasks/main.yaml b/roles/ssh_config_jump/tasks/main.yaml new file mode 100644 index 00000000..a556b0af --- /dev/null +++ b/roles/ssh_config_jump/tasks/main.yaml @@ -0,0 +1,30 @@ +## will not work as is. Need to research using ProxyCommands or SSHuttle to run commands on bootstrap via the bastion jump host + +--- + +- name: Check that the ssh_config exists + stat: + path: ~/.ssh/config + register: ssh_config + +- name: Create ssh config file, if it doesnt exist already + file: + path: ~/.ssh/config + state: touch + when: not ssh_config.stat.exists + +- name: Insert ssh keys for jump host configuration in /ssh/config + blockinfile: + path: ~/.ssh/config + block: | + Host bastion_server + HostName 9.60.87.139 + IdentityFile ~/.ssh/ansible.pub + User root + + Host bootstrap_server + HostName 9.60.87.133 + IdentityFile ~/.ssh/ansible.pub + User core + ProxyJump bastion_server + \ No newline at end of file diff --git a/roles/wait_for_bootkube/tasks/main.yaml b/roles/wait_for_bootkube/tasks/main.yaml index 25e47d5a..0899e363 100644 --- a/roles/wait_for_bootkube/tasks/main.yaml +++ b/roles/wait_for_bootkube/tasks/main.yaml @@ -1,3 +1,5 @@ +#Will not work as is. Waiting to figure out sshuttle or ProxyCommands to run commands on the bootstrap from bastion jump host. + --- - name: ssh to bootstrap from bastion @@ -7,6 +9,7 @@ #pause: #minutes: 1 +#above is for testing. Use below when above works. #1800 - name: Start checking for bootkube to complete connecting nodes. Checks every 2 minutes. @@ -16,5 +19,6 @@ retries: 15 delay: 1 +#above is for testing. Use below when above works. #retries: 15 #delay: 120 \ No newline at end of file From bb82441fb7fd2e08a6131f0e36d627cf53c55ba8 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 16 Aug 2021 14:58:49 -0500 Subject: [PATCH 261/885] Created plays for ssh key generation and copying to remote host. Also created idempotent install packages taskbook. --- main.yaml | 58 +++++++++++++++++--------- roles/check_ssh/tasks/main.yaml | 11 +++++ roles/install_packages/tasks/main.yaml | 16 +++++-- roles/main.yaml | 0 roles/ssh_copy_id/tasks/main.yaml | 10 +++++ roles/ssh_key_gen/tasks/main.yaml | 58 ++++++++++++++++++++++++++ 6 files changed, 129 insertions(+), 24 deletions(-) create mode 100644 roles/check_ssh/tasks/main.yaml create mode 100644 roles/main.yaml create mode 100644 roles/ssh_copy_id/tasks/main.yaml create mode 100644 roles/ssh_key_gen/tasks/main.yaml diff --git a/main.yaml b/main.yaml index fec82573..1669ec54 100644 --- a/main.yaml +++ b/main.yaml @@ -1,33 +1,51 @@ --- -#- hosts: workstation -# become: true -# roles: -# vars: "reminder to add vars for remote host to copy ssh key to, kvm_host in this case" -# - install_packages (ansible_galaxy) -# - update_repo_index -# - ssh_key_gen -# - copy_ssh_key +- hosts: localhost + become: true + vars_prompt: + - name: Confirm start of OpenShift cluster automated provisioning process. + prompt: "Type 'YES' to confirm automated provisioning of OpenShift cluster." + default: 'NO' + private: no + when: confirmation != "YES" + pre_tasks: + - name: "Check Confirmation" + fail: msg="Exiting... You must type 'YES' to continue." + when: confirmation != "YES" + vars: + - [ package_1: ansible_galaxy, package_2: sshpass, package_3: ssh-keygen, package_4: ssh-copy-id ] + - remote_host_ip: host + #- other vars needed: ssh_key_filename, remote_machine_password, remote_machine_username, remote_host_ip + roles: + #- ansible_setup (check vars file for undefined vars, run ansible_setup, and fill necessary env vars files) + - install_packages + #- ssh_key_gen + #- ssh_copy_id - hosts: kvm_host become: true + vars: + - [ package_1: libvirt, package_2: libvirt-devel, package_3: libvirt-daemon-kvm, package_4: qemu-kvm, package_5: virt-manager, package_6: libvirt-daemon-config-network, package_7: libvirt-client, package_8: qemu-img ] roles: - #- install_packages (libvirt, libvirt-devel, libvirt-daemon-kvm, qemu-kvm, virt-manager, libvirt-daemon-config-network, libvirt-client, qemu-img) - #- enable libvirt - - macvtap - - create_bastion #(reminder to eventually use boot instructions) + #- check_ssh + - install_packages + #- enable libvirt + - macvtap + - create_bastion #(reminder to eventually use boot instructions) -#- hosts: workstation -# become: true -# gather_facts: no -# roles: -# vars: "reminder to add vars for remote host to copy ssh key to, bastion in this case" -# - copy_ssh_key +- hosts: localhost + become: true + gather_facts: no + vars: + - target: bastion_server + roles: + - ssh_copy_id - hosts: bastion_server become: true + vars: + - [ package_1: haproxy, package_2: httpd, package_3: mod_ssl, package_4: bind, package_5: bind_utils ] #reminder that "bind" is called "named" after install roles: - #- install_packages (haproxy, httpd, mod_ssl, bind, bind_utils) don't forget to enable, reminder that bind is called named when being called after install #- firewall (whatever that may be) #- set_selinux_permissive - dns @@ -44,7 +62,7 @@ - create_control_nodes - create_compute_nodes -#- hosts: workstation +#- hosts: localhost #become: true #gather_facts: yes #roles: diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml new file mode 100644 index 00000000..35adcb55 --- /dev/null +++ b/roles/check_ssh/tasks/main.yaml @@ -0,0 +1,11 @@ +--- + + +- name: check ssh to remote hosts works + shell: "hostname; id" + register: ssh_connection_test + failed_when: ssh_connection_test.rc != 0 + +- name: print the connectivity test results + debug: + var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index 13a6dfc2..80394f93 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,6 +1,14 @@ -#Placeholder, incomplete --- -- name: {{ host_name }} - dnf: - name: [{{ package1 }}, {{ package2 }}, +- name: Installing required packages on {{ ansible_facts['host'] }} + ansible.builtin.package: + name: + - {{ package_1 }} + - {{ package_2 or Default(omit) }} + - {{ package_3 or Default(omit) }} + - {{ package_5 or Default(omit) }} + - {{ package_6 or Default(omit) }} + - {{ package_7 or Default(omit) }} + - {{ package_8 or Default(omit) }} + state: present + update_cache: yes diff --git a/roles/main.yaml b/roles/main.yaml new file mode 100644 index 00000000..e69de29b diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml new file mode 100644 index 00000000..623edc7e --- /dev/null +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -0,0 +1,10 @@ +--- + +- name: distribute the ssh key to the remote hosts + shell: "/usr/local/bin/sshpass -p \"{{remote_machine_password}}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ssh_key_filename}}.pub -p {{ remote_host_ip}}.22 \"{{remote_machine_username}}@{{ remote_host_ip }}\"" + register: ssh_copy_id_execution + failed_when: ssh_copy_id_execution.rc != 0 + +- name: Print results of copying ssh id to remote host. + debug: + var: ssh_copy_id_execution diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml new file mode 100644 index 00000000..92d95d72 --- /dev/null +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -0,0 +1,58 @@ +--- + +- name: Check to see if local .ssh directory exists + stat: + path: "~/.ssh" + register: ssh_directory_exists_check + +- name: Print results of .ssh directory check + debug: + var: ssh_directory_exists_check + +- name: Create .ssh local directory if it doesn't already exist + file: + path: "~/.ssh" + state: directory + mode: "0700" + register: ssh_directory_creation + when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false + +- name: Print results of ssh directory creation + debug: + var: ssh_directory_creation + +- name: check .ssh key pair files exist + stat: + path: "~/.ssh/{{item}}" + register: ssh_key_file_exists_check + with_items: + - "{{ssh_key_filename}}" + - "{{ssh_key_filename}}.pub" + +- name: Print results of ssh key pair files check + debug: + var: ssh_key_file_exists_check.results[1].stat.exists + +- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key + community.crypto.openssh_keypair: + path: ~/.ssh/{{ ssh_key_filename }} + passphrase: "" + register: ssh_key_creation + failed_when: ssh_key_creation.rc != 0 + when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + +- name: Print results of ssh key pair creation + debug: + var: ssh_key_creation + +- name: add the new ssh key to the ansible.cfg file + lineinfile: + path: ansible.cfg + line: "private_key_file = ~/.ssh/{{ssh_key_filename}}" + state: present + backup: yes + register: ssh_config_file_key_addition + +- name: Print results of adding ssh key to ansible.cfg file + debug: + var: ssh_config_file_key_addition From 9f2901d2e94820bce32c249b960bd502513e2b2d Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 16 Aug 2021 14:58:49 -0500 Subject: [PATCH 262/885] Created plays for ssh key generation and copying to remote host. Also created idempotent install packages taskbook. --- main.yaml | 58 +++++++++++++++++--------- roles/check_ssh/tasks/main.yaml | 11 +++++ roles/install_packages/tasks/main.yaml | 16 +++++-- roles/main.yaml | 0 roles/ssh_copy_id/tasks/main.yaml | 10 +++++ roles/ssh_key_gen/tasks/main.yaml | 58 ++++++++++++++++++++++++++ 6 files changed, 129 insertions(+), 24 deletions(-) create mode 100644 roles/check_ssh/tasks/main.yaml create mode 100644 roles/main.yaml create mode 100644 roles/ssh_copy_id/tasks/main.yaml create mode 100644 roles/ssh_key_gen/tasks/main.yaml diff --git a/main.yaml b/main.yaml index fec82573..1669ec54 100644 --- a/main.yaml +++ b/main.yaml @@ -1,33 +1,51 @@ --- -#- hosts: workstation -# become: true -# roles: -# vars: "reminder to add vars for remote host to copy ssh key to, kvm_host in this case" -# - install_packages (ansible_galaxy) -# - update_repo_index -# - ssh_key_gen -# - copy_ssh_key +- hosts: localhost + become: true + vars_prompt: + - name: Confirm start of OpenShift cluster automated provisioning process. + prompt: "Type 'YES' to confirm automated provisioning of OpenShift cluster." + default: 'NO' + private: no + when: confirmation != "YES" + pre_tasks: + - name: "Check Confirmation" + fail: msg="Exiting... You must type 'YES' to continue." + when: confirmation != "YES" + vars: + - [ package_1: ansible_galaxy, package_2: sshpass, package_3: ssh-keygen, package_4: ssh-copy-id ] + - remote_host_ip: host + #- other vars needed: ssh_key_filename, remote_machine_password, remote_machine_username, remote_host_ip + roles: + #- ansible_setup (check vars file for undefined vars, run ansible_setup, and fill necessary env vars files) + - install_packages + #- ssh_key_gen + #- ssh_copy_id - hosts: kvm_host become: true + vars: + - [ package_1: libvirt, package_2: libvirt-devel, package_3: libvirt-daemon-kvm, package_4: qemu-kvm, package_5: virt-manager, package_6: libvirt-daemon-config-network, package_7: libvirt-client, package_8: qemu-img ] roles: - #- install_packages (libvirt, libvirt-devel, libvirt-daemon-kvm, qemu-kvm, virt-manager, libvirt-daemon-config-network, libvirt-client, qemu-img) - #- enable libvirt - - macvtap - - create_bastion #(reminder to eventually use boot instructions) + #- check_ssh + - install_packages + #- enable libvirt + - macvtap + - create_bastion #(reminder to eventually use boot instructions) -#- hosts: workstation -# become: true -# gather_facts: no -# roles: -# vars: "reminder to add vars for remote host to copy ssh key to, bastion in this case" -# - copy_ssh_key +- hosts: localhost + become: true + gather_facts: no + vars: + - target: bastion_server + roles: + - ssh_copy_id - hosts: bastion_server become: true + vars: + - [ package_1: haproxy, package_2: httpd, package_3: mod_ssl, package_4: bind, package_5: bind_utils ] #reminder that "bind" is called "named" after install roles: - #- install_packages (haproxy, httpd, mod_ssl, bind, bind_utils) don't forget to enable, reminder that bind is called named when being called after install #- firewall (whatever that may be) #- set_selinux_permissive - dns @@ -44,7 +62,7 @@ - create_control_nodes - create_compute_nodes -#- hosts: workstation +#- hosts: localhost #become: true #gather_facts: yes #roles: diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml new file mode 100644 index 00000000..35adcb55 --- /dev/null +++ b/roles/check_ssh/tasks/main.yaml @@ -0,0 +1,11 @@ +--- + + +- name: check ssh to remote hosts works + shell: "hostname; id" + register: ssh_connection_test + failed_when: ssh_connection_test.rc != 0 + +- name: print the connectivity test results + debug: + var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index 13a6dfc2..80394f93 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,6 +1,14 @@ -#Placeholder, incomplete --- -- name: {{ host_name }} - dnf: - name: [{{ package1 }}, {{ package2 }}, +- name: Installing required packages on {{ ansible_facts['host'] }} + ansible.builtin.package: + name: + - {{ package_1 }} + - {{ package_2 or Default(omit) }} + - {{ package_3 or Default(omit) }} + - {{ package_5 or Default(omit) }} + - {{ package_6 or Default(omit) }} + - {{ package_7 or Default(omit) }} + - {{ package_8 or Default(omit) }} + state: present + update_cache: yes diff --git a/roles/main.yaml b/roles/main.yaml new file mode 100644 index 00000000..e69de29b diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml new file mode 100644 index 00000000..623edc7e --- /dev/null +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -0,0 +1,10 @@ +--- + +- name: distribute the ssh key to the remote hosts + shell: "/usr/local/bin/sshpass -p \"{{remote_machine_password}}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ssh_key_filename}}.pub -p {{ remote_host_ip}}.22 \"{{remote_machine_username}}@{{ remote_host_ip }}\"" + register: ssh_copy_id_execution + failed_when: ssh_copy_id_execution.rc != 0 + +- name: Print results of copying ssh id to remote host. + debug: + var: ssh_copy_id_execution diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml new file mode 100644 index 00000000..92d95d72 --- /dev/null +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -0,0 +1,58 @@ +--- + +- name: Check to see if local .ssh directory exists + stat: + path: "~/.ssh" + register: ssh_directory_exists_check + +- name: Print results of .ssh directory check + debug: + var: ssh_directory_exists_check + +- name: Create .ssh local directory if it doesn't already exist + file: + path: "~/.ssh" + state: directory + mode: "0700" + register: ssh_directory_creation + when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false + +- name: Print results of ssh directory creation + debug: + var: ssh_directory_creation + +- name: check .ssh key pair files exist + stat: + path: "~/.ssh/{{item}}" + register: ssh_key_file_exists_check + with_items: + - "{{ssh_key_filename}}" + - "{{ssh_key_filename}}.pub" + +- name: Print results of ssh key pair files check + debug: + var: ssh_key_file_exists_check.results[1].stat.exists + +- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key + community.crypto.openssh_keypair: + path: ~/.ssh/{{ ssh_key_filename }} + passphrase: "" + register: ssh_key_creation + failed_when: ssh_key_creation.rc != 0 + when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + +- name: Print results of ssh key pair creation + debug: + var: ssh_key_creation + +- name: add the new ssh key to the ansible.cfg file + lineinfile: + path: ansible.cfg + line: "private_key_file = ~/.ssh/{{ssh_key_filename}}" + state: present + backup: yes + register: ssh_config_file_key_addition + +- name: Print results of adding ssh key to ansible.cfg file + debug: + var: ssh_config_file_key_addition From 23144a56408c47237ba914dbefecefc9e39cfc6b Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 16 Aug 2021 14:59:56 -0500 Subject: [PATCH 263/885] Deleted unnecessary main.yaml file in /roles directory. --- roles/main.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 roles/main.yaml diff --git a/roles/main.yaml b/roles/main.yaml deleted file mode 100644 index e69de29b..00000000 From e4d3b5f31e8c33faa82b83afd46822ec5f400246 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 16 Aug 2021 14:59:56 -0500 Subject: [PATCH 264/885] Deleted unnecessary main.yaml file in /roles directory. --- roles/main.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 roles/main.yaml diff --git a/roles/main.yaml b/roles/main.yaml deleted file mode 100644 index e69de29b..00000000 From d896a4079eabd26edfe565c58b7d8006aea38c8b Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 16 Aug 2021 20:15:01 -0500 Subject: [PATCH 265/885] Playbook tweaks for the implementation of automating ssh key gen and copying to remote servers. --- inventory | 4 ++-- main.yaml | 30 ++++++++++---------------- roles/check_ssh/tasks/main.yaml | 1 - roles/install_packages/tasks/main.yaml | 17 ++++++++------- roles/ssh_copy_id/tasks/main.yaml | 2 +- 5 files changed, 23 insertions(+), 31 deletions(-) diff --git a/inventory b/inventory index cd608040..74d02d28 100644 --- a/inventory +++ b/inventory @@ -1,5 +1,5 @@ -#[workstation] -#{{ workstation_ip }} +[localhost] +127.0.0.1 [kvm_host] 9.60.87.132 diff --git a/main.yaml b/main.yaml index 1669ec54..90e80942 100644 --- a/main.yaml +++ b/main.yaml @@ -1,33 +1,24 @@ --- - hosts: localhost - become: true - vars_prompt: - - name: Confirm start of OpenShift cluster automated provisioning process. - prompt: "Type 'YES' to confirm automated provisioning of OpenShift cluster." - default: 'NO' - private: no - when: confirmation != "YES" - pre_tasks: - - name: "Check Confirmation" - fail: msg="Exiting... You must type 'YES' to continue." - when: confirmation != "YES" + connection: local + become: false vars: - - [ package_1: ansible_galaxy, package_2: sshpass, package_3: ssh-keygen, package_4: ssh-copy-id ] - - remote_host_ip: host - #- other vars needed: ssh_key_filename, remote_machine_password, remote_machine_username, remote_host_ip + - packages: [ 'ansible_galaxy', 'sshpass', 'ssh-keygen', 'ssh-copy-id' ] + #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right + #- other vars needed: ssh_key_filename, ssh_target_password, ssh_target_username, ssh_target_ip roles: #- ansible_setup (check vars file for undefined vars, run ansible_setup, and fill necessary env vars files) - install_packages - #- ssh_key_gen + - ssh_key_gen #- ssh_copy_id - hosts: kvm_host become: true vars: - - [ package_1: libvirt, package_2: libvirt-devel, package_3: libvirt-daemon-kvm, package_4: qemu-kvm, package_5: virt-manager, package_6: libvirt-daemon-config-network, package_7: libvirt-client, package_8: qemu-img ] + - packages: [ 'libvirt', 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img' ] roles: - #- check_ssh + - check_ssh - install_packages #- enable libvirt - macvtap @@ -37,15 +28,16 @@ become: true gather_facts: no vars: - - target: bastion_server + #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right roles: - ssh_copy_id - hosts: bastion_server become: true vars: - - [ package_1: haproxy, package_2: httpd, package_3: mod_ssl, package_4: bind, package_5: bind_utils ] #reminder that "bind" is called "named" after install + - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] #reminder that "bind" is called "named" after install roles: + - check_ssh #- firewall (whatever that may be) #- set_selinux_permissive - dns diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index 35adcb55..bf632676 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -1,6 +1,5 @@ --- - - name: check ssh to remote hosts works shell: "hostname; id" register: ssh_connection_test diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index 80394f93..e0c03d19 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,14 +1,15 @@ --- -- name: Installing required packages on {{ ansible_facts['host'] }} +- name: Installing required packages on "{{ ansible_facts['host'] }}" ansible.builtin.package: name: - - {{ package_1 }} - - {{ package_2 or Default(omit) }} - - {{ package_3 or Default(omit) }} - - {{ package_5 or Default(omit) }} - - {{ package_6 or Default(omit) }} - - {{ package_7 or Default(omit) }} - - {{ package_8 or Default(omit) }} + - "{{ packages[0] }}" + - "{{ packages[1] | default(omit) }}" + - "{{ packages[2] | default(omit) }}" + - "{{ packages[3] | default(omit) }}" + - "{{ packages[4] | default(omit) }}" + - "{{ packages[5] | default(omit) }}" + - "{{ packages[6] | default(omit) }}" + - "{{ packages[6] | default(omit) }}" state: present update_cache: yes diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index 623edc7e..a951d33f 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: distribute the ssh key to the remote hosts - shell: "/usr/local/bin/sshpass -p \"{{remote_machine_password}}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ssh_key_filename}}.pub -p {{ remote_host_ip}}.22 \"{{remote_machine_username}}@{{ remote_host_ip }}\"" + shell: "/usr/local/bin/sshpass -p \"{{ssh_target_password}}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ssh_key_filename}}.pub -p {{ ssh_target_ip}}.22 \"{{ssh_target_username}}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 From 7b1e34bc3908e31988635cbc07b46885ede88174 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 16 Aug 2021 20:15:01 -0500 Subject: [PATCH 266/885] Playbook tweaks for the implementation of automating ssh key gen and copying to remote servers. --- inventory | 4 ++-- main.yaml | 30 ++++++++++---------------- roles/check_ssh/tasks/main.yaml | 1 - roles/install_packages/tasks/main.yaml | 17 ++++++++------- roles/ssh_copy_id/tasks/main.yaml | 2 +- 5 files changed, 23 insertions(+), 31 deletions(-) diff --git a/inventory b/inventory index cd608040..74d02d28 100644 --- a/inventory +++ b/inventory @@ -1,5 +1,5 @@ -#[workstation] -#{{ workstation_ip }} +[localhost] +127.0.0.1 [kvm_host] 9.60.87.132 diff --git a/main.yaml b/main.yaml index 1669ec54..90e80942 100644 --- a/main.yaml +++ b/main.yaml @@ -1,33 +1,24 @@ --- - hosts: localhost - become: true - vars_prompt: - - name: Confirm start of OpenShift cluster automated provisioning process. - prompt: "Type 'YES' to confirm automated provisioning of OpenShift cluster." - default: 'NO' - private: no - when: confirmation != "YES" - pre_tasks: - - name: "Check Confirmation" - fail: msg="Exiting... You must type 'YES' to continue." - when: confirmation != "YES" + connection: local + become: false vars: - - [ package_1: ansible_galaxy, package_2: sshpass, package_3: ssh-keygen, package_4: ssh-copy-id ] - - remote_host_ip: host - #- other vars needed: ssh_key_filename, remote_machine_password, remote_machine_username, remote_host_ip + - packages: [ 'ansible_galaxy', 'sshpass', 'ssh-keygen', 'ssh-copy-id' ] + #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right + #- other vars needed: ssh_key_filename, ssh_target_password, ssh_target_username, ssh_target_ip roles: #- ansible_setup (check vars file for undefined vars, run ansible_setup, and fill necessary env vars files) - install_packages - #- ssh_key_gen + - ssh_key_gen #- ssh_copy_id - hosts: kvm_host become: true vars: - - [ package_1: libvirt, package_2: libvirt-devel, package_3: libvirt-daemon-kvm, package_4: qemu-kvm, package_5: virt-manager, package_6: libvirt-daemon-config-network, package_7: libvirt-client, package_8: qemu-img ] + - packages: [ 'libvirt', 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img' ] roles: - #- check_ssh + - check_ssh - install_packages #- enable libvirt - macvtap @@ -37,15 +28,16 @@ become: true gather_facts: no vars: - - target: bastion_server + #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right roles: - ssh_copy_id - hosts: bastion_server become: true vars: - - [ package_1: haproxy, package_2: httpd, package_3: mod_ssl, package_4: bind, package_5: bind_utils ] #reminder that "bind" is called "named" after install + - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] #reminder that "bind" is called "named" after install roles: + - check_ssh #- firewall (whatever that may be) #- set_selinux_permissive - dns diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index 35adcb55..bf632676 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -1,6 +1,5 @@ --- - - name: check ssh to remote hosts works shell: "hostname; id" register: ssh_connection_test diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index 80394f93..e0c03d19 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,14 +1,15 @@ --- -- name: Installing required packages on {{ ansible_facts['host'] }} +- name: Installing required packages on "{{ ansible_facts['host'] }}" ansible.builtin.package: name: - - {{ package_1 }} - - {{ package_2 or Default(omit) }} - - {{ package_3 or Default(omit) }} - - {{ package_5 or Default(omit) }} - - {{ package_6 or Default(omit) }} - - {{ package_7 or Default(omit) }} - - {{ package_8 or Default(omit) }} + - "{{ packages[0] }}" + - "{{ packages[1] | default(omit) }}" + - "{{ packages[2] | default(omit) }}" + - "{{ packages[3] | default(omit) }}" + - "{{ packages[4] | default(omit) }}" + - "{{ packages[5] | default(omit) }}" + - "{{ packages[6] | default(omit) }}" + - "{{ packages[6] | default(omit) }}" state: present update_cache: yes diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index 623edc7e..a951d33f 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: distribute the ssh key to the remote hosts - shell: "/usr/local/bin/sshpass -p \"{{remote_machine_password}}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ssh_key_filename}}.pub -p {{ remote_host_ip}}.22 \"{{remote_machine_username}}@{{ remote_host_ip }}\"" + shell: "/usr/local/bin/sshpass -p \"{{ssh_target_password}}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ssh_key_filename}}.pub -p {{ ssh_target_ip}}.22 \"{{ssh_target_username}}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 From 8ffa189e37a6717e860f1464fbbe37725d36efd4 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 17 Aug 2021 09:29:15 -0500 Subject: [PATCH 267/885] Updating roles with tags for more granularity --- roles/bastion_server/tasks/main.yaml | 31 ++++++++++++++++++++++ roles/check_ssh/tasks/main.yaml | 2 ++ roles/create_bastion/tasks/main.yaml | 2 ++ roles/create_bootstrap/tasks/main.yaml | 1 + roles/create_compute_nodes/tasks/main.yaml | 4 +++ roles/create_control_nodes/tasks/main.yaml | 6 +++++ roles/dns/tasks/main.yaml | 6 +++++ roles/get-ocp/tasks/main.yaml | 19 ++++++++++++- roles/haproxy/tasks/main.yaml | 3 +++ roles/httpd/tasks/main.yaml | 4 +++ roles/kvm_host/tasks/main.yaml | 3 +++ roles/macvtap/tasks/main.yaml | 3 +++ roles/prep_kvm_guests/tasks/main.yaml | 5 ++++ roles/ssh_config_jump/tasks/main.yaml | 3 +++ roles/ssh_copy_id/tasks/main.yaml | 2 ++ roles/ssh_key_gen/tasks/main.yaml | 10 +++++++ 16 files changed, 103 insertions(+), 1 deletion(-) diff --git a/roles/bastion_server/tasks/main.yaml b/roles/bastion_server/tasks/main.yaml index cb328f74..0d6d8024 100644 --- a/roles/bastion_server/tasks/main.yaml +++ b/roles/bastion_server/tasks/main.yaml @@ -7,6 +7,7 @@ # - haproxy - name: move haproxy config file to bastion + tags: bastion copy: src: haproxy.cfg dest: /etc/haproxy/haproxy.cfg @@ -14,11 +15,13 @@ backup: yes - name: enable haproxy + tags: bastion systemd: state: enabled named: haproxy - name: Start haproxy + tags: bastion systemd: state: restarted name: haproxy @@ -33,6 +36,7 @@ ## state: latest - name: Ensure the default Apache port is 8080 + tags: bastion replace: path: /etc/httpd/conf/httpd.conf regexp: '^Listen 80' @@ -40,6 +44,7 @@ backup: yes - name: Ensure the SSL default port is 4443 + tags: bastion replace: path: /etc/httpd/conf.d/ssl.conf regexp: '^Listen 443 https' @@ -47,47 +52,55 @@ backup: yes - name: restart httpd to reflect changes to port + tags: bastion service: name: httpd state: restarted # - name: Allow all access to tcp port 8080 +# tags: bastion # community.general.ufw: # rule: allow # port: '8080' # proto: tcp # # - name: Allow all access to tcp port 80 +# tags: bastion # community.general.ufw: # rule: allow # port: '80' # proto: tcp # # - name: Allow all access to tcp port 443 +# tags: bastion # community.general.ufw: # rule: allow # port: '443' # proto: tcp # # - name: Allow all access to tcp port 4443 +# tags: bastion # community.general.ufw: # rule: allow # port: '4443' # proto: tcp - name: create directory bin for mirrors + tags: bastion file: path: /var/www/html/bin state: directory mode: '0755' - name: create directory bootstrap for mirrors + tags: bastion file: path: /var/www/html/bootstrap state: directory mode: '0755' - name: get mirrors 1 + tags: bastion get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin @@ -95,6 +108,7 @@ mode: '0755' - name: get mirrors 2 + tags: bastion get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin @@ -102,6 +116,7 @@ mode: '0755' - name: get mirrors 3 + tags: bastion get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin @@ -109,33 +124,39 @@ mode: '0755' - name: check to make sure httpd is started + tags: bastion service: name: httpd state: started - name: check httpd status + tags: bastion service: state: started name: httpd - name: create OCP download landing directory + tags: bastion file: path: /ocpinst/ state: directory - name: Unzip OCP Client + tags: bastion ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz dest: /ocpinst/ remote_src: yes - name: Unzip OCP Installer + tags: bastion ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz dest: /ocpinst/ remote_src: yes - name: Copy kubectl file + tags: bastion ansible.builtin.copy: src: /ocpinst/kubectl dest: /usr/local/bin/kubectl @@ -145,6 +166,7 @@ mode: '0755' - name: Copy oc file + tags: bastion ansible.builtin.copy: src: /ocpinst/oc dest: /usr/local/bin/oc @@ -154,6 +176,7 @@ mode: '0755' - name: Copy openshift-install file + tags: bastion ansible.builtin.copy: src: /ocpinst/openshift-install dest: /usr/local/bin/openshift-install @@ -163,42 +186,50 @@ mode: '0755' - name: Copy install-config.yaml to ocp install directory + tags: bastion copy: src: install-config.yaml dest: /ocpinst/install-config.yaml - name: Create Manifests + tags: bastion command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes - name: Set mastersSchedulable parameter to False + tags: bastion replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml regexp: ': true' replace: ': false' - name: Create Ignition files + tags: bastion command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes - name: create Ignition directory on webserver + tags: bastion file: path: /var/www/html/ignition state: directory - name: Copy bootstrap Ignition file to web server + tags: bastion copy: src: /ocpinst/bootstrap.ign dest: /var/www/html/ignition remote_src: yes - name: Copy control plane Ignition file to web server + tags: bastion copy: src: /ocpinst/master.ign dest: /var/www/html/ignition remote_src: yes - name: Copy worker Ignition file to web server + tags: bastion copy: src: /ocpinst/worker.ign dest: /var/www/html/ignition diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index 35adcb55..64a8ac70 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -2,10 +2,12 @@ - name: check ssh to remote hosts works + tags: keymastr shell: "hostname; id" register: ssh_connection_test failed_when: ssh_connection_test.rc != 0 - name: print the connectivity test results + tags: keymastr debug: var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index e748fe5b..6cbf9875 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -23,8 +23,10 @@ # command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: start bastion install + tags: bastionvm command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - name: wait for bastion to install + tags: bastionvm pause: minutes: 60 \ No newline at end of file diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index ed6a6dc4..5a3833ff 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: boot bootstrap + tags: bootstrap command: | virt-install --name bootstrap --disk size=100 --ram 16000 --cpu host --vcpus 4 diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index 8d9af69f..6fe941f8 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: install CoreOS on compute-0 node + tags: compute command: | virt-install --name compute-0 --disk size=100 --ram 16000 --cpu host --vcpus 4 @@ -11,10 +12,12 @@ --noautoconsole - name: pause 15 minutes + tags: compute pause: minutes: 15 - name: install CoreOS on compute-1 node + tags: compute command: | virt-install --name compute-1 --disk size=100 --ram 16000 --cpu host --vcpus 4 @@ -25,5 +28,6 @@ --noautoconsole - name: pause 15 minutes + tags: compute pause: minutes: 15 \ No newline at end of file diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index f92cfe4f..6f94ce6c 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: install CoreOS on control-0 node + tags: control command: | virt-install --name control-0 --disk size=100 --ram 16000 --cpu host --vcpus 4 @@ -11,10 +12,12 @@ --noautoconsole - name: pause 15 minutes + tags: control pause: minutes: 15 - name: install CoreOS on control-1 node + tags: control command: | virt-install --name control-1 --disk size=100 --ram 16000 --cpu host --vcpus 4 @@ -25,10 +28,12 @@ --noautoconsole - name: pause 15 minutes + tags: control pause: minutes: 15 - name: install CoreOS on control-2 node + tags: control command: | virt-install --name control-2 --disk size=100 --ram 16000 --cpu host --vcpus 4 @@ -39,5 +44,6 @@ --noautoconsole - name: pause 15 minutes + tags: control pause: minutes: 15 diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index 88adf33c..ed9807cf 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -1,16 +1,19 @@ --- - name: enable named + tags: dns ansible.builtin.systemd: name: named enabled: yes - name: start named + tags: dns ansible.builtin.systemd: name: named state: started - name: Copy named.conf file to bastion + tags: dns ansible.builtin.copy: src: named.conf dest: /etc/ @@ -20,6 +23,7 @@ backup: yes - name: Copy distribution.db file to bastion + tags: dns ansible.builtin.copy: src: distribution.db dest: /var/named @@ -29,6 +33,7 @@ backup: yes - name: Copy distribution.rev file to bastion + tags: dns ansible.builtin.copy: src: distribution.rev dest: /var/named @@ -38,6 +43,7 @@ backup: yes - name: restart named to update changes made to DNS + tags: dns ansible.builtin.systemd: name: named state: restarted \ No newline at end of file diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 4e6c5a26..20816eb5 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -1,35 +1,41 @@ --- - name: create directory bin for mirrors + tags: setocp file: path: /var/www/html/bin state: directory mode: '0755' - name: get ocp kernel + tags: setocp get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin mode: '0755' - name: get ocp initramfs + tags: setocp get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin mode: '0755' - name: get ocp rootfs + tags: setocp get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin mode: '0755' - name: create OCP download landing directory + tags: setocp file: path: /ocpinst/ state: directory - name: Unzip OCP Client + tags: setocp ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz dest: /ocpinst/ @@ -41,7 +47,8 @@ dest: /ocpinst/ remote_src: yes -- name: Copy kubectl file +- name: Copy kubectl file + tags: setocp ansible.builtin.copy: src: /ocpinst/kubectl dest: /usr/local/bin/kubectl @@ -51,6 +58,7 @@ mode: '0755' - name: Copy oc file + tags: setocp ansible.builtin.copy: src: /ocpinst/oc dest: /usr/local/bin/oc @@ -60,6 +68,7 @@ mode: '0755' - name: Copy openshift-install file + tags: setocp ansible.builtin.copy: src: /ocpinst/openshift-install dest: /usr/local/bin/openshift-install @@ -69,42 +78,50 @@ mode: '0755' - name: Copy install-config.yaml to ocp install directory + tags: setocp copy: src: install-config.yaml dest: /ocpinst/install-config.yaml - name: Create Manifests + tags: setocp command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes - name: Set mastersSchedulable parameter to False + tags: setocp replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml regexp: ': true' replace: ': false' - name: Create Ignition files + tags: setocp command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes - name: create Ignition directory on webserver + tags: setocp file: path: /var/www/html/ignition state: directory - name: Copy bootstrap Ignition file to web server + tags: setocp copy: src: /ocpinst/bootstrap.ign dest: /var/www/html/ignition remote_src: yes - name: Copy control plane Ignition file to web server + tags: setocp copy: src: /ocpinst/master.ign dest: /var/www/html/ignition remote_src: yes - name: Copy worker Ignition file to web server + tags: setocp copy: src: /ocpinst/worker.ign dest: /var/www/html/ignition diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index a3099c51..0a1903d3 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: move haproxy config file to bastion + tags: haproxy copy: src: haproxy.cfg dest: /etc/haproxy/haproxy.cfg @@ -8,11 +9,13 @@ backup: yes - name: enable haproxy + tags: haproxy systemd: enabled: yes name: haproxy - name: Restart haproxy + tags: haproxy systemd: state: restarted name: haproxy diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 0a276a9d..0ac1d7fc 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -1,11 +1,13 @@ --- - name: enable httpd + tags: httpconf systemd: name: httpd enabled: yes - name: Ensure the default Apache port is 8080 + tags: httpconf replace: path: /etc/httpd/conf/httpd.conf regexp: '^Listen 80' @@ -13,6 +15,7 @@ backup: yes - name: Ensure the SSL default port is 4443 + tags: httpconf replace: path: /etc/httpd/conf.d/ssl.conf regexp: '^Listen 443 https' @@ -20,6 +23,7 @@ backup: yes - name: restart httpd to reflect changes to port + tags: httpconf service: name: httpd state: restarted diff --git a/roles/kvm_host/tasks/main.yaml b/roles/kvm_host/tasks/main.yaml index 5ba167c8..8c3c8296 100644 --- a/roles/kvm_host/tasks/main.yaml +++ b/roles/kvm_host/tasks/main.yaml @@ -1,4 +1,5 @@ - name: Set up macvtap bridge + tags: kvmhost community.libvirt.virt_net: command: define name: macvtap-net @@ -6,12 +7,14 @@ xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" - name: Start macvtap-net + tags: kvmhost community.libvirt.virt_net: autostart: yes command: start name: macvtap-net - name: Set autostart for macvtap-net + tags: kvmhost community.libvirt.virt_net: autostart: yes name: macvtap-net diff --git a/roles/macvtap/tasks/main.yaml b/roles/macvtap/tasks/main.yaml index af7daa8b..1ae4d245 100644 --- a/roles/macvtap/tasks/main.yaml +++ b/roles/macvtap/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: Set up macvtap bridge + tags: kvmhost community.libvirt.virt_net: command: define name: macvtap-net @@ -8,12 +9,14 @@ xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" - name: Start macvtap-net + tags: kvmhost community.libvirt.virt_net: autostart: yes command: start name: macvtap-net - name: Set autostart for macvtap-net + tags: kvmhost community.libvirt.virt_net: autostart: yes name: macvtap-net diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index da0d795d..f37119d7 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: get rhcos qcow2 file + tags: kvmhost get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images/ @@ -8,9 +9,11 @@ mode: '0755' - name: Unzip rhcos qcow2 files + tags: kvmhost command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz - name: get rhcos initramfs image + tags: kvmhost get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/lib/libvirt/images/ @@ -18,6 +21,7 @@ force: yes - name: get rhcos kernel + tags: kvmhost get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/lib/libvirt/images/ @@ -25,6 +29,7 @@ force: yes - name: get rhcos rootfs image + tags: kvmhost get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/lib/libvirt/images/ diff --git a/roles/ssh_config_jump/tasks/main.yaml b/roles/ssh_config_jump/tasks/main.yaml index a556b0af..16a59fa9 100644 --- a/roles/ssh_config_jump/tasks/main.yaml +++ b/roles/ssh_config_jump/tasks/main.yaml @@ -3,17 +3,20 @@ --- - name: Check that the ssh_config exists + tags: keymastr stat: path: ~/.ssh/config register: ssh_config - name: Create ssh config file, if it doesnt exist already + tags: keymastr file: path: ~/.ssh/config state: touch when: not ssh_config.stat.exists - name: Insert ssh keys for jump host configuration in /ssh/config + tags: keymastr blockinfile: path: ~/.ssh/config block: | diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index 623edc7e..accbe2d4 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,10 +1,12 @@ --- - name: distribute the ssh key to the remote hosts + tags: keymastr shell: "/usr/local/bin/sshpass -p \"{{remote_machine_password}}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ssh_key_filename}}.pub -p {{ remote_host_ip}}.22 \"{{remote_machine_username}}@{{ remote_host_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 - name: Print results of copying ssh id to remote host. + tags: keymastr debug: var: ssh_copy_id_execution diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index 92d95d72..ee593633 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -1,15 +1,18 @@ --- - name: Check to see if local .ssh directory exists + tags: keymastr stat: path: "~/.ssh" register: ssh_directory_exists_check - name: Print results of .ssh directory check + tags: keymastr debug: var: ssh_directory_exists_check - name: Create .ssh local directory if it doesn't already exist + tags: keymastr file: path: "~/.ssh" state: directory @@ -18,10 +21,12 @@ when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false - name: Print results of ssh directory creation + tags: keymastr debug: var: ssh_directory_creation - name: check .ssh key pair files exist + tags: keymastr stat: path: "~/.ssh/{{item}}" register: ssh_key_file_exists_check @@ -30,10 +35,12 @@ - "{{ssh_key_filename}}.pub" - name: Print results of ssh key pair files check + tags: keymastr debug: var: ssh_key_file_exists_check.results[1].stat.exists - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key + tags: keymastr community.crypto.openssh_keypair: path: ~/.ssh/{{ ssh_key_filename }} passphrase: "" @@ -42,10 +49,12 @@ when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: Print results of ssh key pair creation + tags: keymastr debug: var: ssh_key_creation - name: add the new ssh key to the ansible.cfg file + tags: keymastr lineinfile: path: ansible.cfg line: "private_key_file = ~/.ssh/{{ssh_key_filename}}" @@ -54,5 +63,6 @@ register: ssh_config_file_key_addition - name: Print results of adding ssh key to ansible.cfg file + tags: keymastr debug: var: ssh_config_file_key_addition From e981db5bf8af194ab7a874476bef5671e7d24485 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 17 Aug 2021 09:29:15 -0500 Subject: [PATCH 268/885] Updating roles with tags for more granularity --- roles/bastion_server/tasks/main.yaml | 31 ++++++++++++++++++++++ roles/check_ssh/tasks/main.yaml | 2 ++ roles/create_bastion/tasks/main.yaml | 2 ++ roles/create_bootstrap/tasks/main.yaml | 1 + roles/create_compute_nodes/tasks/main.yaml | 4 +++ roles/create_control_nodes/tasks/main.yaml | 6 +++++ roles/dns/tasks/main.yaml | 6 +++++ roles/get-ocp/tasks/main.yaml | 19 ++++++++++++- roles/haproxy/tasks/main.yaml | 3 +++ roles/httpd/tasks/main.yaml | 4 +++ roles/kvm_host/tasks/main.yaml | 3 +++ roles/macvtap/tasks/main.yaml | 3 +++ roles/prep_kvm_guests/tasks/main.yaml | 5 ++++ roles/ssh_config_jump/tasks/main.yaml | 3 +++ roles/ssh_copy_id/tasks/main.yaml | 2 ++ roles/ssh_key_gen/tasks/main.yaml | 10 +++++++ 16 files changed, 103 insertions(+), 1 deletion(-) diff --git a/roles/bastion_server/tasks/main.yaml b/roles/bastion_server/tasks/main.yaml index cb328f74..0d6d8024 100644 --- a/roles/bastion_server/tasks/main.yaml +++ b/roles/bastion_server/tasks/main.yaml @@ -7,6 +7,7 @@ # - haproxy - name: move haproxy config file to bastion + tags: bastion copy: src: haproxy.cfg dest: /etc/haproxy/haproxy.cfg @@ -14,11 +15,13 @@ backup: yes - name: enable haproxy + tags: bastion systemd: state: enabled named: haproxy - name: Start haproxy + tags: bastion systemd: state: restarted name: haproxy @@ -33,6 +36,7 @@ ## state: latest - name: Ensure the default Apache port is 8080 + tags: bastion replace: path: /etc/httpd/conf/httpd.conf regexp: '^Listen 80' @@ -40,6 +44,7 @@ backup: yes - name: Ensure the SSL default port is 4443 + tags: bastion replace: path: /etc/httpd/conf.d/ssl.conf regexp: '^Listen 443 https' @@ -47,47 +52,55 @@ backup: yes - name: restart httpd to reflect changes to port + tags: bastion service: name: httpd state: restarted # - name: Allow all access to tcp port 8080 +# tags: bastion # community.general.ufw: # rule: allow # port: '8080' # proto: tcp # # - name: Allow all access to tcp port 80 +# tags: bastion # community.general.ufw: # rule: allow # port: '80' # proto: tcp # # - name: Allow all access to tcp port 443 +# tags: bastion # community.general.ufw: # rule: allow # port: '443' # proto: tcp # # - name: Allow all access to tcp port 4443 +# tags: bastion # community.general.ufw: # rule: allow # port: '4443' # proto: tcp - name: create directory bin for mirrors + tags: bastion file: path: /var/www/html/bin state: directory mode: '0755' - name: create directory bootstrap for mirrors + tags: bastion file: path: /var/www/html/bootstrap state: directory mode: '0755' - name: get mirrors 1 + tags: bastion get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin @@ -95,6 +108,7 @@ mode: '0755' - name: get mirrors 2 + tags: bastion get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin @@ -102,6 +116,7 @@ mode: '0755' - name: get mirrors 3 + tags: bastion get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin @@ -109,33 +124,39 @@ mode: '0755' - name: check to make sure httpd is started + tags: bastion service: name: httpd state: started - name: check httpd status + tags: bastion service: state: started name: httpd - name: create OCP download landing directory + tags: bastion file: path: /ocpinst/ state: directory - name: Unzip OCP Client + tags: bastion ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz dest: /ocpinst/ remote_src: yes - name: Unzip OCP Installer + tags: bastion ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz dest: /ocpinst/ remote_src: yes - name: Copy kubectl file + tags: bastion ansible.builtin.copy: src: /ocpinst/kubectl dest: /usr/local/bin/kubectl @@ -145,6 +166,7 @@ mode: '0755' - name: Copy oc file + tags: bastion ansible.builtin.copy: src: /ocpinst/oc dest: /usr/local/bin/oc @@ -154,6 +176,7 @@ mode: '0755' - name: Copy openshift-install file + tags: bastion ansible.builtin.copy: src: /ocpinst/openshift-install dest: /usr/local/bin/openshift-install @@ -163,42 +186,50 @@ mode: '0755' - name: Copy install-config.yaml to ocp install directory + tags: bastion copy: src: install-config.yaml dest: /ocpinst/install-config.yaml - name: Create Manifests + tags: bastion command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes - name: Set mastersSchedulable parameter to False + tags: bastion replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml regexp: ': true' replace: ': false' - name: Create Ignition files + tags: bastion command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes - name: create Ignition directory on webserver + tags: bastion file: path: /var/www/html/ignition state: directory - name: Copy bootstrap Ignition file to web server + tags: bastion copy: src: /ocpinst/bootstrap.ign dest: /var/www/html/ignition remote_src: yes - name: Copy control plane Ignition file to web server + tags: bastion copy: src: /ocpinst/master.ign dest: /var/www/html/ignition remote_src: yes - name: Copy worker Ignition file to web server + tags: bastion copy: src: /ocpinst/worker.ign dest: /var/www/html/ignition diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index 35adcb55..64a8ac70 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -2,10 +2,12 @@ - name: check ssh to remote hosts works + tags: keymastr shell: "hostname; id" register: ssh_connection_test failed_when: ssh_connection_test.rc != 0 - name: print the connectivity test results + tags: keymastr debug: var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index e748fe5b..6cbf9875 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -23,8 +23,10 @@ # command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: start bastion install + tags: bastionvm command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - name: wait for bastion to install + tags: bastionvm pause: minutes: 60 \ No newline at end of file diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index ed6a6dc4..5a3833ff 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: boot bootstrap + tags: bootstrap command: | virt-install --name bootstrap --disk size=100 --ram 16000 --cpu host --vcpus 4 diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index 8d9af69f..6fe941f8 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: install CoreOS on compute-0 node + tags: compute command: | virt-install --name compute-0 --disk size=100 --ram 16000 --cpu host --vcpus 4 @@ -11,10 +12,12 @@ --noautoconsole - name: pause 15 minutes + tags: compute pause: minutes: 15 - name: install CoreOS on compute-1 node + tags: compute command: | virt-install --name compute-1 --disk size=100 --ram 16000 --cpu host --vcpus 4 @@ -25,5 +28,6 @@ --noautoconsole - name: pause 15 minutes + tags: compute pause: minutes: 15 \ No newline at end of file diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index f92cfe4f..6f94ce6c 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: install CoreOS on control-0 node + tags: control command: | virt-install --name control-0 --disk size=100 --ram 16000 --cpu host --vcpus 4 @@ -11,10 +12,12 @@ --noautoconsole - name: pause 15 minutes + tags: control pause: minutes: 15 - name: install CoreOS on control-1 node + tags: control command: | virt-install --name control-1 --disk size=100 --ram 16000 --cpu host --vcpus 4 @@ -25,10 +28,12 @@ --noautoconsole - name: pause 15 minutes + tags: control pause: minutes: 15 - name: install CoreOS on control-2 node + tags: control command: | virt-install --name control-2 --disk size=100 --ram 16000 --cpu host --vcpus 4 @@ -39,5 +44,6 @@ --noautoconsole - name: pause 15 minutes + tags: control pause: minutes: 15 diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index 88adf33c..ed9807cf 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -1,16 +1,19 @@ --- - name: enable named + tags: dns ansible.builtin.systemd: name: named enabled: yes - name: start named + tags: dns ansible.builtin.systemd: name: named state: started - name: Copy named.conf file to bastion + tags: dns ansible.builtin.copy: src: named.conf dest: /etc/ @@ -20,6 +23,7 @@ backup: yes - name: Copy distribution.db file to bastion + tags: dns ansible.builtin.copy: src: distribution.db dest: /var/named @@ -29,6 +33,7 @@ backup: yes - name: Copy distribution.rev file to bastion + tags: dns ansible.builtin.copy: src: distribution.rev dest: /var/named @@ -38,6 +43,7 @@ backup: yes - name: restart named to update changes made to DNS + tags: dns ansible.builtin.systemd: name: named state: restarted \ No newline at end of file diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 4e6c5a26..20816eb5 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -1,35 +1,41 @@ --- - name: create directory bin for mirrors + tags: setocp file: path: /var/www/html/bin state: directory mode: '0755' - name: get ocp kernel + tags: setocp get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin mode: '0755' - name: get ocp initramfs + tags: setocp get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin mode: '0755' - name: get ocp rootfs + tags: setocp get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin mode: '0755' - name: create OCP download landing directory + tags: setocp file: path: /ocpinst/ state: directory - name: Unzip OCP Client + tags: setocp ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz dest: /ocpinst/ @@ -41,7 +47,8 @@ dest: /ocpinst/ remote_src: yes -- name: Copy kubectl file +- name: Copy kubectl file + tags: setocp ansible.builtin.copy: src: /ocpinst/kubectl dest: /usr/local/bin/kubectl @@ -51,6 +58,7 @@ mode: '0755' - name: Copy oc file + tags: setocp ansible.builtin.copy: src: /ocpinst/oc dest: /usr/local/bin/oc @@ -60,6 +68,7 @@ mode: '0755' - name: Copy openshift-install file + tags: setocp ansible.builtin.copy: src: /ocpinst/openshift-install dest: /usr/local/bin/openshift-install @@ -69,42 +78,50 @@ mode: '0755' - name: Copy install-config.yaml to ocp install directory + tags: setocp copy: src: install-config.yaml dest: /ocpinst/install-config.yaml - name: Create Manifests + tags: setocp command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes - name: Set mastersSchedulable parameter to False + tags: setocp replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml regexp: ': true' replace: ': false' - name: Create Ignition files + tags: setocp command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes - name: create Ignition directory on webserver + tags: setocp file: path: /var/www/html/ignition state: directory - name: Copy bootstrap Ignition file to web server + tags: setocp copy: src: /ocpinst/bootstrap.ign dest: /var/www/html/ignition remote_src: yes - name: Copy control plane Ignition file to web server + tags: setocp copy: src: /ocpinst/master.ign dest: /var/www/html/ignition remote_src: yes - name: Copy worker Ignition file to web server + tags: setocp copy: src: /ocpinst/worker.ign dest: /var/www/html/ignition diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index a3099c51..0a1903d3 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: move haproxy config file to bastion + tags: haproxy copy: src: haproxy.cfg dest: /etc/haproxy/haproxy.cfg @@ -8,11 +9,13 @@ backup: yes - name: enable haproxy + tags: haproxy systemd: enabled: yes name: haproxy - name: Restart haproxy + tags: haproxy systemd: state: restarted name: haproxy diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 0a276a9d..0ac1d7fc 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -1,11 +1,13 @@ --- - name: enable httpd + tags: httpconf systemd: name: httpd enabled: yes - name: Ensure the default Apache port is 8080 + tags: httpconf replace: path: /etc/httpd/conf/httpd.conf regexp: '^Listen 80' @@ -13,6 +15,7 @@ backup: yes - name: Ensure the SSL default port is 4443 + tags: httpconf replace: path: /etc/httpd/conf.d/ssl.conf regexp: '^Listen 443 https' @@ -20,6 +23,7 @@ backup: yes - name: restart httpd to reflect changes to port + tags: httpconf service: name: httpd state: restarted diff --git a/roles/kvm_host/tasks/main.yaml b/roles/kvm_host/tasks/main.yaml index 5ba167c8..8c3c8296 100644 --- a/roles/kvm_host/tasks/main.yaml +++ b/roles/kvm_host/tasks/main.yaml @@ -1,4 +1,5 @@ - name: Set up macvtap bridge + tags: kvmhost community.libvirt.virt_net: command: define name: macvtap-net @@ -6,12 +7,14 @@ xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" - name: Start macvtap-net + tags: kvmhost community.libvirt.virt_net: autostart: yes command: start name: macvtap-net - name: Set autostart for macvtap-net + tags: kvmhost community.libvirt.virt_net: autostart: yes name: macvtap-net diff --git a/roles/macvtap/tasks/main.yaml b/roles/macvtap/tasks/main.yaml index af7daa8b..1ae4d245 100644 --- a/roles/macvtap/tasks/main.yaml +++ b/roles/macvtap/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: Set up macvtap bridge + tags: kvmhost community.libvirt.virt_net: command: define name: macvtap-net @@ -8,12 +9,14 @@ xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" - name: Start macvtap-net + tags: kvmhost community.libvirt.virt_net: autostart: yes command: start name: macvtap-net - name: Set autostart for macvtap-net + tags: kvmhost community.libvirt.virt_net: autostart: yes name: macvtap-net diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index da0d795d..f37119d7 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: get rhcos qcow2 file + tags: kvmhost get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images/ @@ -8,9 +9,11 @@ mode: '0755' - name: Unzip rhcos qcow2 files + tags: kvmhost command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz - name: get rhcos initramfs image + tags: kvmhost get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/lib/libvirt/images/ @@ -18,6 +21,7 @@ force: yes - name: get rhcos kernel + tags: kvmhost get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/lib/libvirt/images/ @@ -25,6 +29,7 @@ force: yes - name: get rhcos rootfs image + tags: kvmhost get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/lib/libvirt/images/ diff --git a/roles/ssh_config_jump/tasks/main.yaml b/roles/ssh_config_jump/tasks/main.yaml index a556b0af..16a59fa9 100644 --- a/roles/ssh_config_jump/tasks/main.yaml +++ b/roles/ssh_config_jump/tasks/main.yaml @@ -3,17 +3,20 @@ --- - name: Check that the ssh_config exists + tags: keymastr stat: path: ~/.ssh/config register: ssh_config - name: Create ssh config file, if it doesnt exist already + tags: keymastr file: path: ~/.ssh/config state: touch when: not ssh_config.stat.exists - name: Insert ssh keys for jump host configuration in /ssh/config + tags: keymastr blockinfile: path: ~/.ssh/config block: | diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index 623edc7e..accbe2d4 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,10 +1,12 @@ --- - name: distribute the ssh key to the remote hosts + tags: keymastr shell: "/usr/local/bin/sshpass -p \"{{remote_machine_password}}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ssh_key_filename}}.pub -p {{ remote_host_ip}}.22 \"{{remote_machine_username}}@{{ remote_host_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 - name: Print results of copying ssh id to remote host. + tags: keymastr debug: var: ssh_copy_id_execution diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index 92d95d72..ee593633 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -1,15 +1,18 @@ --- - name: Check to see if local .ssh directory exists + tags: keymastr stat: path: "~/.ssh" register: ssh_directory_exists_check - name: Print results of .ssh directory check + tags: keymastr debug: var: ssh_directory_exists_check - name: Create .ssh local directory if it doesn't already exist + tags: keymastr file: path: "~/.ssh" state: directory @@ -18,10 +21,12 @@ when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false - name: Print results of ssh directory creation + tags: keymastr debug: var: ssh_directory_creation - name: check .ssh key pair files exist + tags: keymastr stat: path: "~/.ssh/{{item}}" register: ssh_key_file_exists_check @@ -30,10 +35,12 @@ - "{{ssh_key_filename}}.pub" - name: Print results of ssh key pair files check + tags: keymastr debug: var: ssh_key_file_exists_check.results[1].stat.exists - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key + tags: keymastr community.crypto.openssh_keypair: path: ~/.ssh/{{ ssh_key_filename }} passphrase: "" @@ -42,10 +49,12 @@ when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: Print results of ssh key pair creation + tags: keymastr debug: var: ssh_key_creation - name: add the new ssh key to the ansible.cfg file + tags: keymastr lineinfile: path: ansible.cfg line: "private_key_file = ~/.ssh/{{ssh_key_filename}}" @@ -54,5 +63,6 @@ register: ssh_config_file_key_addition - name: Print results of adding ssh key to ansible.cfg file + tags: keymastr debug: var: ssh_config_file_key_addition From 7ed3f4f6139d090709a87f1639a7c47e9aa9a680 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 17 Aug 2021 11:26:30 -0500 Subject: [PATCH 269/885] Updating Readme as to the purpose of the project --- README.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 10d0a3ae..d1962aeb 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,19 @@ # Ansible-OpenShift-Provisioning -Phillip adding comment via lesson 3 +The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method + +Tags: +bastion = configuration of bastion for OCP +keymastr = ssh key configuration and testing +bastionvm = creation of Bastion KVM guest +boostrap = creation of Boostrap KVM guest +compute = creation of the Compute nodes KVM guests (2) +control = creation of the Control nodes KVM guests (3 min) +dns = configuration of dns server on bastion +setocp = download of OCP installer and http server configuration +haproxy = configuration of haproxy on bastion kvm guest +httpconf = configuration of httpd server on bastion kvm guest +kvmhost = tasks to apply to KVM host for OCP cluster +setocp = get ocp playbook + From 9cca497453f09237606e6245e610dd8d5da465a8 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 17 Aug 2021 11:26:30 -0500 Subject: [PATCH 270/885] Updating Readme as to the purpose of the project --- README.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 10d0a3ae..d1962aeb 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,19 @@ # Ansible-OpenShift-Provisioning -Phillip adding comment via lesson 3 +The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method + +Tags: +bastion = configuration of bastion for OCP +keymastr = ssh key configuration and testing +bastionvm = creation of Bastion KVM guest +boostrap = creation of Boostrap KVM guest +compute = creation of the Compute nodes KVM guests (2) +control = creation of the Control nodes KVM guests (3 min) +dns = configuration of dns server on bastion +setocp = download of OCP installer and http server configuration +haproxy = configuration of haproxy on bastion kvm guest +httpconf = configuration of httpd server on bastion kvm guest +kvmhost = tasks to apply to KVM host for OCP cluster +setocp = get ocp playbook + From f8ddfa644dc9cf079d17ded0062d76b22c93cd97 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 17 Aug 2021 11:30:15 -0500 Subject: [PATCH 271/885] Delete pull-secret.txt Remove duplicate pull-secret.txt from the main directory. Copy located in files directory --- pull-secret.txt | 1 - 1 file changed, 1 deletion(-) delete mode 100644 pull-secret.txt diff --git a/pull-secret.txt b/pull-secret.txt deleted file mode 100644 index ee0cd16d..00000000 --- a/pull-secret.txt +++ /dev/null @@ -1 +0,0 @@ -{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}} From 4256c23a50a81775292d86d2ae5204300ed775e7 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 17 Aug 2021 11:30:15 -0500 Subject: [PATCH 272/885] Delete pull-secret.txt Remove duplicate pull-secret.txt from the main directory. Copy located in files directory --- pull-secret.txt | 1 - 1 file changed, 1 deletion(-) delete mode 100644 pull-secret.txt diff --git a/pull-secret.txt b/pull-secret.txt deleted file mode 100644 index ee0cd16d..00000000 --- a/pull-secret.txt +++ /dev/null @@ -1 +0,0 @@ -{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}} From f34d202a1f2d3b913153d41f9a2c7f4ec9ba1d7d Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 17 Aug 2021 13:04:02 -0500 Subject: [PATCH 273/885] Starting to make updates for the implementation of variables. --- README.md | 2 +- ansible.cfg | 2 +- inventory | 20 -------------- inventory.yaml | 21 +++++++++++++++ main.yaml | 10 ++++--- roles/ansible_setup/tasks/main.yaml | 2 ++ roles/httpd/handlers/main.yaml | 6 +++++ roles/httpd/tasks/main.yaml | 36 ++------------------------ roles/install_packages/tasks/main.yaml | 23 ++++++++++++++-- roles/set_firewall/tasks/main.yaml | 25 ++++++++++++++++++ roles/ssh_key_gen/tasks/main.yaml | 2 +- 11 files changed, 86 insertions(+), 63 deletions(-) delete mode 100644 inventory create mode 100644 inventory.yaml create mode 100644 roles/ansible_setup/tasks/main.yaml create mode 100644 roles/httpd/handlers/main.yaml create mode 100644 roles/set_firewall/tasks/main.yaml diff --git a/README.md b/README.md index 10d0a3ae..d3268050 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ # Ansible-OpenShift-Provisioning -Phillip adding comment via lesson 3 +Localhost supported operating systems: Linux, Unix and Unix-like (i.e. MacOS X) diff --git a/ansible.cfg b/ansible.cfg index 5d2ee146..70f980f1 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,5 +1,5 @@ [defaults] -inventory = inventory +inventory = inventory.yaml private_key_file = ~/.ssh/ansible diff --git a/inventory b/inventory deleted file mode 100644 index 74d02d28..00000000 --- a/inventory +++ /dev/null @@ -1,20 +0,0 @@ -[localhost] -127.0.0.1 - -[kvm_host] -9.60.87.132 - -[bootstrap_server] -9.60.87.133 - -[bastion_server] -9.60.87.139 - -[control_nodes] -9.60.87.138 -9.60.87.137 -9.60.87.136 - -[worker_nodes] -9.60.87.135 -9.60.87.134 diff --git a/inventory.yaml b/inventory.yaml new file mode 100644 index 00000000..d51cfe6f --- /dev/null +++ b/inventory.yaml @@ -0,0 +1,21 @@ +all: + children: + kvm_host: + hosts: + 9.60.87.132 + bastion: + hosts: + 9.60.87.139 + bootstrap: + hosts: + 9.60.87.133 + control_nodes: + hosts: + 9.60.87.136 + 9.60.87.137 + 9.60.87.138 + compute_nodes: + hosts: + 9.60.87.134 + 9.60.87.135 + diff --git a/main.yaml b/main.yaml index 90e80942..28dd3b40 100644 --- a/main.yaml +++ b/main.yaml @@ -25,14 +25,15 @@ - create_bastion #(reminder to eventually use boot instructions) - hosts: localhost - become: true + connection: local + become: false gather_facts: no vars: #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right roles: - ssh_copy_id -- hosts: bastion_server +- hosts: bastion become: true vars: - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] #reminder that "bind" is called "named" after install @@ -55,12 +56,13 @@ - create_compute_nodes #- hosts: localhost - #become: true + #connection: local + #become: false #gather_facts: yes #roles: #- ssh_config_jump -#- hosts: bastion_server +#- hosts: bastion #become: true #gather_facts: no #roles: diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml new file mode 100644 index 00000000..cd21505a --- /dev/null +++ b/roles/ansible_setup/tasks/main.yaml @@ -0,0 +1,2 @@ +--- + diff --git a/roles/httpd/handlers/main.yaml b/roles/httpd/handlers/main.yaml new file mode 100644 index 00000000..f86d6901 --- /dev/null +++ b/roles/httpd/handlers/main.yaml @@ -0,0 +1,6 @@ +--- + +- name: restart httpd + service: + name: httpd + state: restarted \ No newline at end of file diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 0a276a9d..c0801bd7 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -11,6 +11,7 @@ regexp: '^Listen 80' replace: 'Listen 8080' backup: yes + notify: restart httpd - name: Ensure the SSL default port is 4443 replace: @@ -18,37 +19,4 @@ regexp: '^Listen 443 https' replace: 'Listen 4443 https' backup: yes - -- name: restart httpd to reflect changes to port - service: - name: httpd - state: restarted - -#- name: Allow all access to tcp port 8080 -# community.general.ufw: -# rule: allow -# port: '8080' -# proto: tcp -# -#- name: Allow all access to tcp port 80 -# community.general.ufw: -# rule: allow -# port: '80' -# proto: tcp -# -#- name: Allow all access to tcp port 443 -# community.general.ufw: -# rule: allow -# port: '443' -# proto: tcp -# -#- name: Allow all access to tcp port 4443 -# community.general.ufw: -# rule: allow -# port: '4443' -# proto: tcp - -#- name: check to make sure httpd is started -# service: -# name: httpd -# state: restarted + notify: restart httpd diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index e0c03d19..cddd772b 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,6 +1,9 @@ --- +- name: get host os family + vars: + host_os_family: "{{ ansible_facts['os_family'] }}" -- name: Installing required packages on "{{ ansible_facts['host'] }}" +- name: Installing required packages on Linux machine ansible.builtin.package: name: - "{{ packages[0] }}" @@ -10,6 +13,22 @@ - "{{ packages[4] | default(omit) }}" - "{{ packages[5] | default(omit) }}" - "{{ packages[6] | default(omit) }}" - - "{{ packages[6] | default(omit) }}" + - "{{ packages[7] | default(omit) }}" state: present update_cache: yes + when: host_os_family == "RedHat" or host_os_family == "Debian" + +- name: Installing required packages on Mac machine + community.general.homebrew: + name: + - "{{ packages[0] }}" + - "{{ packages[1] | default(omit) }}" + - "{{ packages[2] | default(omit) }}" + - "{{ packages[3] | default(omit) }}" + - "{{ packages[4] | default(omit) }}" + - "{{ packages[5] | default(omit) }}" + - "{{ packages[6] | default(omit) }}" + - "{{ packages[7] | default(omit) }}" + state: present + update_homebrew: yes + when: host_os_family == "Darwin" \ No newline at end of file diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml new file mode 100644 index 00000000..9e2ec953 --- /dev/null +++ b/roles/set_firewall/tasks/main.yaml @@ -0,0 +1,25 @@ +--- + +- name: Allow all access to tcp port 8080 + community.general.ufw: + rule: allow + port: '8080' + proto: tcp + +- name: Allow all access to tcp port 80 + community.general.ufw: + rule: allow + port: '80' + proto: tcp + +- name: Allow all access to tcp port 443 + community.general.ufw: + rule: allow + port: '443' + proto: tcp + +- name: Allow all access to tcp port 4443 + community.general.ufw: + rule: allow + port: '4443' + proto: tcp \ No newline at end of file diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index 92d95d72..ff8ffe4c 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -21,7 +21,7 @@ debug: var: ssh_directory_creation -- name: check .ssh key pair files exist +- name: Check .ssh key pair files exist stat: path: "~/.ssh/{{item}}" register: ssh_key_file_exists_check From 253dbbba90d7acb5ade6e98ee1351943c27dbdeb Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 17 Aug 2021 13:04:02 -0500 Subject: [PATCH 274/885] Starting to make updates for the implementation of variables. --- README.md | 2 +- ansible.cfg | 2 +- inventory | 20 -------------- inventory.yaml | 21 +++++++++++++++ main.yaml | 10 ++++--- roles/ansible_setup/tasks/main.yaml | 2 ++ roles/httpd/handlers/main.yaml | 6 +++++ roles/httpd/tasks/main.yaml | 36 ++------------------------ roles/install_packages/tasks/main.yaml | 23 ++++++++++++++-- roles/set_firewall/tasks/main.yaml | 25 ++++++++++++++++++ roles/ssh_key_gen/tasks/main.yaml | 2 +- 11 files changed, 86 insertions(+), 63 deletions(-) delete mode 100644 inventory create mode 100644 inventory.yaml create mode 100644 roles/ansible_setup/tasks/main.yaml create mode 100644 roles/httpd/handlers/main.yaml create mode 100644 roles/set_firewall/tasks/main.yaml diff --git a/README.md b/README.md index 10d0a3ae..d3268050 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ # Ansible-OpenShift-Provisioning -Phillip adding comment via lesson 3 +Localhost supported operating systems: Linux, Unix and Unix-like (i.e. MacOS X) diff --git a/ansible.cfg b/ansible.cfg index 5d2ee146..70f980f1 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,5 +1,5 @@ [defaults] -inventory = inventory +inventory = inventory.yaml private_key_file = ~/.ssh/ansible diff --git a/inventory b/inventory deleted file mode 100644 index 74d02d28..00000000 --- a/inventory +++ /dev/null @@ -1,20 +0,0 @@ -[localhost] -127.0.0.1 - -[kvm_host] -9.60.87.132 - -[bootstrap_server] -9.60.87.133 - -[bastion_server] -9.60.87.139 - -[control_nodes] -9.60.87.138 -9.60.87.137 -9.60.87.136 - -[worker_nodes] -9.60.87.135 -9.60.87.134 diff --git a/inventory.yaml b/inventory.yaml new file mode 100644 index 00000000..d51cfe6f --- /dev/null +++ b/inventory.yaml @@ -0,0 +1,21 @@ +all: + children: + kvm_host: + hosts: + 9.60.87.132 + bastion: + hosts: + 9.60.87.139 + bootstrap: + hosts: + 9.60.87.133 + control_nodes: + hosts: + 9.60.87.136 + 9.60.87.137 + 9.60.87.138 + compute_nodes: + hosts: + 9.60.87.134 + 9.60.87.135 + diff --git a/main.yaml b/main.yaml index 90e80942..28dd3b40 100644 --- a/main.yaml +++ b/main.yaml @@ -25,14 +25,15 @@ - create_bastion #(reminder to eventually use boot instructions) - hosts: localhost - become: true + connection: local + become: false gather_facts: no vars: #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right roles: - ssh_copy_id -- hosts: bastion_server +- hosts: bastion become: true vars: - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] #reminder that "bind" is called "named" after install @@ -55,12 +56,13 @@ - create_compute_nodes #- hosts: localhost - #become: true + #connection: local + #become: false #gather_facts: yes #roles: #- ssh_config_jump -#- hosts: bastion_server +#- hosts: bastion #become: true #gather_facts: no #roles: diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml new file mode 100644 index 00000000..cd21505a --- /dev/null +++ b/roles/ansible_setup/tasks/main.yaml @@ -0,0 +1,2 @@ +--- + diff --git a/roles/httpd/handlers/main.yaml b/roles/httpd/handlers/main.yaml new file mode 100644 index 00000000..f86d6901 --- /dev/null +++ b/roles/httpd/handlers/main.yaml @@ -0,0 +1,6 @@ +--- + +- name: restart httpd + service: + name: httpd + state: restarted \ No newline at end of file diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 0a276a9d..c0801bd7 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -11,6 +11,7 @@ regexp: '^Listen 80' replace: 'Listen 8080' backup: yes + notify: restart httpd - name: Ensure the SSL default port is 4443 replace: @@ -18,37 +19,4 @@ regexp: '^Listen 443 https' replace: 'Listen 4443 https' backup: yes - -- name: restart httpd to reflect changes to port - service: - name: httpd - state: restarted - -#- name: Allow all access to tcp port 8080 -# community.general.ufw: -# rule: allow -# port: '8080' -# proto: tcp -# -#- name: Allow all access to tcp port 80 -# community.general.ufw: -# rule: allow -# port: '80' -# proto: tcp -# -#- name: Allow all access to tcp port 443 -# community.general.ufw: -# rule: allow -# port: '443' -# proto: tcp -# -#- name: Allow all access to tcp port 4443 -# community.general.ufw: -# rule: allow -# port: '4443' -# proto: tcp - -#- name: check to make sure httpd is started -# service: -# name: httpd -# state: restarted + notify: restart httpd diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index e0c03d19..cddd772b 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,6 +1,9 @@ --- +- name: get host os family + vars: + host_os_family: "{{ ansible_facts['os_family'] }}" -- name: Installing required packages on "{{ ansible_facts['host'] }}" +- name: Installing required packages on Linux machine ansible.builtin.package: name: - "{{ packages[0] }}" @@ -10,6 +13,22 @@ - "{{ packages[4] | default(omit) }}" - "{{ packages[5] | default(omit) }}" - "{{ packages[6] | default(omit) }}" - - "{{ packages[6] | default(omit) }}" + - "{{ packages[7] | default(omit) }}" state: present update_cache: yes + when: host_os_family == "RedHat" or host_os_family == "Debian" + +- name: Installing required packages on Mac machine + community.general.homebrew: + name: + - "{{ packages[0] }}" + - "{{ packages[1] | default(omit) }}" + - "{{ packages[2] | default(omit) }}" + - "{{ packages[3] | default(omit) }}" + - "{{ packages[4] | default(omit) }}" + - "{{ packages[5] | default(omit) }}" + - "{{ packages[6] | default(omit) }}" + - "{{ packages[7] | default(omit) }}" + state: present + update_homebrew: yes + when: host_os_family == "Darwin" \ No newline at end of file diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml new file mode 100644 index 00000000..9e2ec953 --- /dev/null +++ b/roles/set_firewall/tasks/main.yaml @@ -0,0 +1,25 @@ +--- + +- name: Allow all access to tcp port 8080 + community.general.ufw: + rule: allow + port: '8080' + proto: tcp + +- name: Allow all access to tcp port 80 + community.general.ufw: + rule: allow + port: '80' + proto: tcp + +- name: Allow all access to tcp port 443 + community.general.ufw: + rule: allow + port: '443' + proto: tcp + +- name: Allow all access to tcp port 4443 + community.general.ufw: + rule: allow + port: '4443' + proto: tcp \ No newline at end of file diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index 92d95d72..ff8ffe4c 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -21,7 +21,7 @@ debug: var: ssh_directory_creation -- name: check .ssh key pair files exist +- name: Check .ssh key pair files exist stat: path: "~/.ssh/{{item}}" register: ssh_key_file_exists_check From 7424396c7daf9310608ceb416d04e4db8cde2bcb Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 17 Aug 2021 13:17:08 -0500 Subject: [PATCH 275/885] Merged conflicting changes to README file. --- README.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index b786df72..a9eda851 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,7 @@ # Ansible-OpenShift-Provisioning -<<<<<<< HEAD -Localhost supported operating systems: Linux, Unix and Unix-like (i.e. MacOS X) -======= -The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method +The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method. +To start this process, the supported operating systems for the localhost (the starting workstation) are: Linux (RedHat and Debian families) and Unix/Unix-like (i.e. MacOS X). Tags: bastion = configuration of bastion for OCP @@ -19,5 +17,4 @@ httpconf = configuration of httpd server on bastion kvm guest kvmhost = tasks to apply to KVM host for OCP cluster setocp = get ocp playbook ->>>>>>> f8ddfa644dc9cf079d17ded0062d76b22c93cd97 From 557aad763dbe630c7f99cb12b6ed409bcc0f0295 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 17 Aug 2021 13:17:08 -0500 Subject: [PATCH 276/885] Merged conflicting changes to README file. --- README.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index b786df72..a9eda851 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,7 @@ # Ansible-OpenShift-Provisioning -<<<<<<< HEAD -Localhost supported operating systems: Linux, Unix and Unix-like (i.e. MacOS X) -======= -The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method +The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method. +To start this process, the supported operating systems for the localhost (the starting workstation) are: Linux (RedHat and Debian families) and Unix/Unix-like (i.e. MacOS X). Tags: bastion = configuration of bastion for OCP @@ -19,5 +17,4 @@ httpconf = configuration of httpd server on bastion kvm guest kvmhost = tasks to apply to KVM host for OCP cluster setocp = get ocp playbook ->>>>>>> f8ddfa644dc9cf079d17ded0062d76b22c93cd97 From 6fe85fa16b8d7ecd4bad9ddd0d9e2f9c0e323685 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 17 Aug 2021 17:10:20 -0500 Subject: [PATCH 277/885] Updated README, created env.yaml file for user-input of variables. Added commented-out variables to install-config and inventory. --- README.md | 33 ++++++++++++++++++++--- env.yaml | 32 ++++++++++++++++++++++ inventory.yaml | 16 +++++------ main.yaml | 27 ++++++++++--------- roles/ansible_setup/tasks/main.yaml | 9 +++++++ roles/get-ocp/files/install-config.yaml | 36 +++++++++++++------------ 6 files changed, 111 insertions(+), 42 deletions(-) create mode 100644 env.yaml diff --git a/README.md b/README.md index a9eda851..b119bc47 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,34 @@ # Ansible-OpenShift-Provisioning The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method. -To start this process, the supported operating systems for the localhost (the starting workstation) are: Linux (RedHat and Debian families) and Unix/Unix-like (i.e. MacOS X). +Supported operating systems for the localhost (the starting workstation) are: +- Linux (RedHat and Debian families) +- Unix and Unix-like (i.e. MacOS X) + +Pre-requisites: +- Python3 intalled on your local computer (how-to: https://realpython.com/installing-python/) +- Ansible installed on your local computer (how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) +- A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: + - 6 Integrated Facilities for Linux (IFLs) + - 75 GB of RAM + - 1 TB of disk space +- On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed +- On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses +- Fully Qualified Domain Names (FQDN) names for all IPv4 addresses + +When you are ready: +Step 1: Download this Git repository to a folder on your local computer +Step 2: Fill out the required variables for your specific installation in the env.yaml file +Step 3: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: + "ansible-playbook main.yaml --ask-become-pass" +Step 4: Watch Ansible as it completes the installation, correcting errors if they arise. +Step 5: When create_bastion playbook runs, open cockpit at < URL > and complete installation with these options: + - list options here + - list options here + - list options here +Step 6: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. +Step 7: Verify installation by running: + "./openshift-install --dir=/ocpinst wait-for install-complete" Tags: bastion = configuration of bastion for OCP @@ -15,6 +42,4 @@ setocp = download of OCP installer and http server configuration haproxy = configuration of haproxy on bastion kvm guest httpconf = configuration of httpd server on bastion kvm guest kvmhost = tasks to apply to KVM host for OCP cluster -setocp = get ocp playbook - - +localhost = for playbooks that apply to the local machine running ansible \ No newline at end of file diff --git a/env.yaml b/env.yaml new file mode 100644 index 00000000..adceb97a --- /dev/null +++ b/env.yaml @@ -0,0 +1,32 @@ + +#to populate install-config +env-api-version: v1 #set default 1 +env-baseDomain: ocpz.wsclab.endicott.ibm.com +env-compute-hyperthreading: Enabled #set default enabled +env-compute-name: worker #set default compute +env-compute-count: 0 #is this supposed to be 2? +env-compute-arch: s390x #set default to s390x +env-control-hyperthreading: Enabled #set default enabled +env-control-name: master #set default control +env-control-count: 3 +env-control-arch: s390x #set default s390x +env-metadata-name: distribution +env-cidr: 10.128.0.0/14 #what is this? +env-host-prefix: 23 +env-network-type: OpenShiftSDN #set default OpenShiftSDN +env-service-network: 172.30.0.0/16 +env-fips: false #true or false, set default false +env-pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +env-sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' + +# to fill inventory +env-ip-kvm-host: 9.60.87.132 +env-ip-bastion: 9.60.87.139 +env-ip-bootstrap: 9.60.87.133 +env-ip-control-0: 9.60.87.136 +env-ip-control-1: 9.60.87.137 +env-ip-control-2: 9.60.87.138 +env-ip-compute-0: 9.60.87.134 +env-ip-compute-1: 9.60.87.135 + +# to populate DNS configuration files diff --git a/inventory.yaml b/inventory.yaml index d51cfe6f..631091bd 100644 --- a/inventory.yaml +++ b/inventory.yaml @@ -2,20 +2,20 @@ all: children: kvm_host: hosts: - 9.60.87.132 + 9.60.87.132 # "{{ env-ip-kvm-host }}" bastion: hosts: - 9.60.87.139 + 9.60.87.139 # "{{ env-ip-bastion }}" bootstrap: hosts: - 9.60.87.133 + 9.60.87.133 # "{{ env-ip-bootstrap }}" control_nodes: hosts: - 9.60.87.136 - 9.60.87.137 - 9.60.87.138 + 9.60.87.136 # "{{ env-ip-control-0 }}" + 9.60.87.137 # "{{ env-ip-control-1 }}" + 9.60.87.138 # "{{ env-ip-control-2 }}" compute_nodes: hosts: - 9.60.87.134 - 9.60.87.135 + 9.60.87.134 # "{{ env-ip-compute-0 }}" + 9.60.87.135 # "{{ env-ip-compute-1 }}" diff --git a/main.yaml b/main.yaml index 28dd3b40..fc914d29 100644 --- a/main.yaml +++ b/main.yaml @@ -7,8 +7,10 @@ - packages: [ 'ansible_galaxy', 'sshpass', 'ssh-keygen', 'ssh-copy-id' ] #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right #- other vars needed: ssh_key_filename, ssh_target_password, ssh_target_username, ssh_target_ip + vars_files: + - ./env.yml roles: - #- ansible_setup (check vars file for undefined vars, run ansible_setup, and fill necessary env vars files) + #- ansible_setup (check vars file for undefined vars, run ansible_setup, and fill necessary env vars files, run "ansible-inventory --list" and print to terminal (failed_when rc != 0) to check inventory is set up properly) - install_packages - ssh_key_gen #- ssh_copy_id @@ -22,7 +24,7 @@ - install_packages #- enable libvirt - macvtap - - create_bastion #(reminder to eventually use boot instructions) + - create_bastion #(reminder to eventually use kickstart installation files) - hosts: localhost connection: local @@ -38,22 +40,22 @@ vars: - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] #reminder that "bind" is called "named" after install roles: - - check_ssh + - check_ssh #- firewall (whatever that may be) #- set_selinux_permissive - - dns - - haproxy - - httpd - - get-ocp + - dns + - haproxy + - httpd + - get-ocp - hosts: kvm_host become: true gather_facts: no roles: - - prep_kvm_guests - - create_bootstrap - - create_control_nodes - - create_compute_nodes + - prep_kvm_guests + - create_bootstrap + - create_control_nodes + - create_compute_nodes #- hosts: localhost #connection: local @@ -69,5 +71,4 @@ #- wait_for_bootkube # - remove_bootstrap # - approve_certs -# - verify_installation - +# - verify_installation \ No newline at end of file diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index cd21505a..1e1b1a71 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,2 +1,11 @@ --- +- name: Check user-input variables in env.yaml for undefined variables + +- name: fill inventory with user-input vars + +- name: fill install config with user-input vars +- name: Gather ansible facts about local host +- name: create group_vars files with user-input group names + +- name: fill dns files with user-input dns names \ No newline at end of file diff --git a/roles/get-ocp/files/install-config.yaml b/roles/get-ocp/files/install-config.yaml index d016fa9c..1d0fa8c7 100644 --- a/roles/get-ocp/files/install-config.yaml +++ b/roles/get-ocp/files/install-config.yaml @@ -1,26 +1,28 @@ -apiVersion: v1 -baseDomain: ocpz.wsclab.endicott.ibm.com +apiVersion: v1 # "{{ env-api-version | default(v1) }}" +baseDomain: ocpz.wsclab.endicott.ibm.com # "{{ env-baseDomain }}" compute: -- hyperthreading: Enabled - name: worker - replicas: 0 - architecture : s390x +- hyperthreading: Enabled # "{{ env-compute-hyperthreading | default(Enabled) }}" + name: worker # "{{ env-compute-name | default(worker) }}" + replicas: 0 # "{{ env-compute-count | default(0) }}" + architecture : s390x # "{{ env-compute-arch | default(s390x) }}" controlPlane: - hyperthreading: Enabled - name: master - replicas: 3 - architecture : s390x + hyperthreading: Enabled # "{{ env-control-hyperthreading | default(Enabled) }}" + name: master # "{{ env-control-name | default(compute) }}" + replicas: 3 # "{{ env-control-count | default(3) }}" + architecture : s390x # "{{ env-control-arch | default(s390x) }}" metadata: - name: distribution + name: distribution # "{{ env-metadata-name }}" networking: clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN + - cidr: 10.128.0.0/14 # "{{ env-cidr }}" + hostPrefix: 23 # "{{ env-host-prefix | default(23) }}" + networkType: OpenShiftSDN # "{{ env-network-type | default(OpenShiftSDN) }}" serviceNetwork: - - 172.30.0.0/16 + - 172.30.0.0/16 # "{{ env-service-network }}" platform: none: {} -fips: false +fips: false # "{{ env-fips | default(false) }}" pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file +# "{{ env-pullSecret }}" +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' +# "{{ env-sshKey }}" \ No newline at end of file From 54555322d9f719b02183d805e1c83f96df9f3f54 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 17 Aug 2021 17:10:20 -0500 Subject: [PATCH 278/885] Updated README, created env.yaml file for user-input of variables. Added commented-out variables to install-config and inventory. --- README.md | 33 ++++++++++++++++++++--- inventory.yaml | 16 +++++------ main.yaml | 27 ++++++++++--------- roles/ansible_setup/tasks/main.yaml | 9 +++++++ roles/get-ocp/files/install-config.yaml | 36 +++++++++++++------------ 5 files changed, 79 insertions(+), 42 deletions(-) diff --git a/README.md b/README.md index a9eda851..b119bc47 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,34 @@ # Ansible-OpenShift-Provisioning The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method. -To start this process, the supported operating systems for the localhost (the starting workstation) are: Linux (RedHat and Debian families) and Unix/Unix-like (i.e. MacOS X). +Supported operating systems for the localhost (the starting workstation) are: +- Linux (RedHat and Debian families) +- Unix and Unix-like (i.e. MacOS X) + +Pre-requisites: +- Python3 intalled on your local computer (how-to: https://realpython.com/installing-python/) +- Ansible installed on your local computer (how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) +- A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: + - 6 Integrated Facilities for Linux (IFLs) + - 75 GB of RAM + - 1 TB of disk space +- On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed +- On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses +- Fully Qualified Domain Names (FQDN) names for all IPv4 addresses + +When you are ready: +Step 1: Download this Git repository to a folder on your local computer +Step 2: Fill out the required variables for your specific installation in the env.yaml file +Step 3: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: + "ansible-playbook main.yaml --ask-become-pass" +Step 4: Watch Ansible as it completes the installation, correcting errors if they arise. +Step 5: When create_bastion playbook runs, open cockpit at < URL > and complete installation with these options: + - list options here + - list options here + - list options here +Step 6: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. +Step 7: Verify installation by running: + "./openshift-install --dir=/ocpinst wait-for install-complete" Tags: bastion = configuration of bastion for OCP @@ -15,6 +42,4 @@ setocp = download of OCP installer and http server configuration haproxy = configuration of haproxy on bastion kvm guest httpconf = configuration of httpd server on bastion kvm guest kvmhost = tasks to apply to KVM host for OCP cluster -setocp = get ocp playbook - - +localhost = for playbooks that apply to the local machine running ansible \ No newline at end of file diff --git a/inventory.yaml b/inventory.yaml index d51cfe6f..631091bd 100644 --- a/inventory.yaml +++ b/inventory.yaml @@ -2,20 +2,20 @@ all: children: kvm_host: hosts: - 9.60.87.132 + 9.60.87.132 # "{{ env-ip-kvm-host }}" bastion: hosts: - 9.60.87.139 + 9.60.87.139 # "{{ env-ip-bastion }}" bootstrap: hosts: - 9.60.87.133 + 9.60.87.133 # "{{ env-ip-bootstrap }}" control_nodes: hosts: - 9.60.87.136 - 9.60.87.137 - 9.60.87.138 + 9.60.87.136 # "{{ env-ip-control-0 }}" + 9.60.87.137 # "{{ env-ip-control-1 }}" + 9.60.87.138 # "{{ env-ip-control-2 }}" compute_nodes: hosts: - 9.60.87.134 - 9.60.87.135 + 9.60.87.134 # "{{ env-ip-compute-0 }}" + 9.60.87.135 # "{{ env-ip-compute-1 }}" diff --git a/main.yaml b/main.yaml index 28dd3b40..fc914d29 100644 --- a/main.yaml +++ b/main.yaml @@ -7,8 +7,10 @@ - packages: [ 'ansible_galaxy', 'sshpass', 'ssh-keygen', 'ssh-copy-id' ] #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right #- other vars needed: ssh_key_filename, ssh_target_password, ssh_target_username, ssh_target_ip + vars_files: + - ./env.yml roles: - #- ansible_setup (check vars file for undefined vars, run ansible_setup, and fill necessary env vars files) + #- ansible_setup (check vars file for undefined vars, run ansible_setup, and fill necessary env vars files, run "ansible-inventory --list" and print to terminal (failed_when rc != 0) to check inventory is set up properly) - install_packages - ssh_key_gen #- ssh_copy_id @@ -22,7 +24,7 @@ - install_packages #- enable libvirt - macvtap - - create_bastion #(reminder to eventually use boot instructions) + - create_bastion #(reminder to eventually use kickstart installation files) - hosts: localhost connection: local @@ -38,22 +40,22 @@ vars: - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] #reminder that "bind" is called "named" after install roles: - - check_ssh + - check_ssh #- firewall (whatever that may be) #- set_selinux_permissive - - dns - - haproxy - - httpd - - get-ocp + - dns + - haproxy + - httpd + - get-ocp - hosts: kvm_host become: true gather_facts: no roles: - - prep_kvm_guests - - create_bootstrap - - create_control_nodes - - create_compute_nodes + - prep_kvm_guests + - create_bootstrap + - create_control_nodes + - create_compute_nodes #- hosts: localhost #connection: local @@ -69,5 +71,4 @@ #- wait_for_bootkube # - remove_bootstrap # - approve_certs -# - verify_installation - +# - verify_installation \ No newline at end of file diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index cd21505a..1e1b1a71 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,2 +1,11 @@ --- +- name: Check user-input variables in env.yaml for undefined variables + +- name: fill inventory with user-input vars + +- name: fill install config with user-input vars +- name: Gather ansible facts about local host +- name: create group_vars files with user-input group names + +- name: fill dns files with user-input dns names \ No newline at end of file diff --git a/roles/get-ocp/files/install-config.yaml b/roles/get-ocp/files/install-config.yaml index d016fa9c..1d0fa8c7 100644 --- a/roles/get-ocp/files/install-config.yaml +++ b/roles/get-ocp/files/install-config.yaml @@ -1,26 +1,28 @@ -apiVersion: v1 -baseDomain: ocpz.wsclab.endicott.ibm.com +apiVersion: v1 # "{{ env-api-version | default(v1) }}" +baseDomain: ocpz.wsclab.endicott.ibm.com # "{{ env-baseDomain }}" compute: -- hyperthreading: Enabled - name: worker - replicas: 0 - architecture : s390x +- hyperthreading: Enabled # "{{ env-compute-hyperthreading | default(Enabled) }}" + name: worker # "{{ env-compute-name | default(worker) }}" + replicas: 0 # "{{ env-compute-count | default(0) }}" + architecture : s390x # "{{ env-compute-arch | default(s390x) }}" controlPlane: - hyperthreading: Enabled - name: master - replicas: 3 - architecture : s390x + hyperthreading: Enabled # "{{ env-control-hyperthreading | default(Enabled) }}" + name: master # "{{ env-control-name | default(compute) }}" + replicas: 3 # "{{ env-control-count | default(3) }}" + architecture : s390x # "{{ env-control-arch | default(s390x) }}" metadata: - name: distribution + name: distribution # "{{ env-metadata-name }}" networking: clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN + - cidr: 10.128.0.0/14 # "{{ env-cidr }}" + hostPrefix: 23 # "{{ env-host-prefix | default(23) }}" + networkType: OpenShiftSDN # "{{ env-network-type | default(OpenShiftSDN) }}" serviceNetwork: - - 172.30.0.0/16 + - 172.30.0.0/16 # "{{ env-service-network }}" platform: none: {} -fips: false +fips: false # "{{ env-fips | default(false) }}" pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file +# "{{ env-pullSecret }}" +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' +# "{{ env-sshKey }}" \ No newline at end of file From 4fb50ce43e078943bd8f5f225f0492ba07c79380 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 18 Aug 2021 08:44:57 -0500 Subject: [PATCH 279/885] Updated firewall and SELinux playbooks for httpd and haproxy operation --- roles/set_firewall/tasks/main.yaml | 17 ++++++++++++++++- roles/set_selinux_permissive/tasks/main.yaml | 13 +++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 9e2ec953..99bba5c9 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -22,4 +22,19 @@ community.general.ufw: rule: allow port: '4443' - proto: tcp \ No newline at end of file + proto: tcp + +- name: Permit traffic in default zone for http + ansible.posix.firewalld: + service: http + permanent: yes + state: enabled + +- name: Permit traffic in default zone for https + ansible.posix.firewalld: + service: https + permanent: yes + state: enabled + + + \ No newline at end of file diff --git a/roles/set_selinux_permissive/tasks/main.yaml b/roles/set_selinux_permissive/tasks/main.yaml index a30e361c..97cb1cc4 100644 --- a/roles/set_selinux_permissive/tasks/main.yaml +++ b/roles/set_selinux_permissive/tasks/main.yaml @@ -4,3 +4,16 @@ ansible.posix.selinux: policy: targeted state: permissive + +# Below requires community.general ansible package to be installed + +- name: Change permissive domain for httpd + selinux_permissive: + name: httpd_t + permissive: true + +- name: Change permissive domain for haproxy + selinux_permissive: + name: haproxy_t + permissive: true + \ No newline at end of file From 153be5ef02beb608068b8a41649b8648a041aaa0 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 18 Aug 2021 08:44:57 -0500 Subject: [PATCH 280/885] Updated firewall and SELinux playbooks for httpd and haproxy operation --- roles/set_firewall/tasks/main.yaml | 17 ++++++++++++++++- roles/set_selinux_permissive/tasks/main.yaml | 13 +++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 9e2ec953..99bba5c9 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -22,4 +22,19 @@ community.general.ufw: rule: allow port: '4443' - proto: tcp \ No newline at end of file + proto: tcp + +- name: Permit traffic in default zone for http + ansible.posix.firewalld: + service: http + permanent: yes + state: enabled + +- name: Permit traffic in default zone for https + ansible.posix.firewalld: + service: https + permanent: yes + state: enabled + + + \ No newline at end of file diff --git a/roles/set_selinux_permissive/tasks/main.yaml b/roles/set_selinux_permissive/tasks/main.yaml index a30e361c..97cb1cc4 100644 --- a/roles/set_selinux_permissive/tasks/main.yaml +++ b/roles/set_selinux_permissive/tasks/main.yaml @@ -4,3 +4,16 @@ ansible.posix.selinux: policy: targeted state: permissive + +# Below requires community.general ansible package to be installed + +- name: Change permissive domain for httpd + selinux_permissive: + name: httpd_t + permissive: true + +- name: Change permissive domain for haproxy + selinux_permissive: + name: haproxy_t + permissive: true + \ No newline at end of file From ec624d7b2e2ff57e6145d47768d052aece40b793 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 18 Aug 2021 10:59:19 -0500 Subject: [PATCH 281/885] Updates from this morning's conversation with Phillip for the implementation of variables. --- README.md | 22 ++++++---- env.yaml | 5 +-- inventory.yaml | 3 +- main.yaml | 12 +++--- roles/ansible_setup/tasks/main.yaml | 6 +-- roles/get-ocp/files/tmp-install-config.yaml | 28 +++++++++++++ roles/haproxy/tasks/main.yaml | 6 +++ roles/httpd/tasks/main.yaml | 44 +++----------------- roles/set_firewall/tasks/main.yaml | 6 +++ roles/set_selinux_permissive/tasks/main.yaml | 16 +------ 10 files changed, 73 insertions(+), 75 deletions(-) create mode 100644 roles/get-ocp/files/tmp-install-config.yaml diff --git a/README.md b/README.md index b119bc47..b36b4264 100644 --- a/README.md +++ b/README.md @@ -19,27 +19,33 @@ Pre-requisites: When you are ready: Step 1: Download this Git repository to a folder on your local computer Step 2: Fill out the required variables for your specific installation in the env.yaml file -Step 3: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: +Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in roles/dns/files folder. +Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" -Step 4: Watch Ansible as it completes the installation, correcting errors if they arise. -Step 5: When create_bastion playbook runs, open cockpit at < URL > and complete installation with these options: +Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. +Step 6: When create_bastion playbook runs, open cockpit at < URL > and complete installation with these options: - list options here - list options here - list options here -Step 6: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. -Step 7: Verify installation by running: +Step 7: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. +Step 8: approve certs... need more detail +Step 9: Shutdown and destroy bootstrap (or optionally convert bootstrap to worker node) +Step 8: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" +Step 9: Tags: bastion = configuration of bastion for OCP keymastr = ssh key configuration and testing bastionvm = creation of Bastion KVM guest boostrap = creation of Boostrap KVM guest -compute = creation of the Compute nodes KVM guests (2) -control = creation of the Control nodes KVM guests (3 min) +compute = creation of the Compute nodes KVM guests (minimum 2) +control = creation of the Control nodes KVM guests (minimum 3) dns = configuration of dns server on bastion setocp = download of OCP installer and http server configuration haproxy = configuration of haproxy on bastion kvm guest httpconf = configuration of httpd server on bastion kvm guest kvmhost = tasks to apply to KVM host for OCP cluster -localhost = for playbooks that apply to the local machine running ansible \ No newline at end of file +localhost = for tasks that apply to the local machine running Ansible +firewall = for tasks related to firewall settings +selinux = for tasks related to SELinux settings \ No newline at end of file diff --git a/env.yaml b/env.yaml index adceb97a..2c9f2b32 100644 --- a/env.yaml +++ b/env.yaml @@ -1,8 +1,7 @@ #to populate install-config -env-api-version: v1 #set default 1 env-baseDomain: ocpz.wsclab.endicott.ibm.com -env-compute-hyperthreading: Enabled #set default enabled +env-compute-hyperthreading: Enabled env-compute-name: worker #set default compute env-compute-count: 0 #is this supposed to be 2? env-compute-arch: s390x #set default to s390x @@ -29,4 +28,4 @@ env-ip-control-2: 9.60.87.138 env-ip-compute-0: 9.60.87.134 env-ip-compute-1: 9.60.87.135 -# to populate DNS configuration files +# ssh diff --git a/inventory.yaml b/inventory.yaml index 631091bd..ced2eb42 100644 --- a/inventory.yaml +++ b/inventory.yaml @@ -17,5 +17,4 @@ all: compute_nodes: hosts: 9.60.87.134 # "{{ env-ip-compute-0 }}" - 9.60.87.135 # "{{ env-ip-compute-1 }}" - + 9.60.87.135 # "{{ env-ip-compute-1 }}" \ No newline at end of file diff --git a/main.yaml b/main.yaml index fc914d29..efcb8c25 100644 --- a/main.yaml +++ b/main.yaml @@ -4,7 +4,7 @@ connection: local become: false vars: - - packages: [ 'ansible_galaxy', 'sshpass', 'ssh-keygen', 'ssh-copy-id' ] + - packages: [ 'ansible_galaxy', 'sshpass', 'ssh-keygen', 'ssh-copy-id', 'community.general' ] #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right #- other vars needed: ssh_key_filename, ssh_target_password, ssh_target_username, ssh_target_ip vars_files: @@ -22,6 +22,7 @@ roles: - check_ssh - install_packages + - set_selinux_permissive #- enable libvirt - macvtap - create_bastion #(reminder to eventually use kickstart installation files) @@ -41,8 +42,8 @@ - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] #reminder that "bind" is called "named" after install roles: - check_ssh - #- firewall (whatever that may be) - #- set_selinux_permissive + - firewall + - set_selinux_permissive - dns - haproxy - httpd @@ -68,7 +69,4 @@ #become: true #gather_facts: no #roles: - #- wait_for_bootkube -# - remove_bootstrap -# - approve_certs -# - verify_installation \ No newline at end of file + #- wait_for_bootkube \ No newline at end of file diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 1e1b1a71..4bb15888 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -5,7 +5,7 @@ - name: fill inventory with user-input vars - name: fill install config with user-input vars + - name: Gather ansible facts about local host -- name: create group_vars files with user-input group names - -- name: fill dns files with user-input dns names \ No newline at end of file + +- name: create group_vars files with user-input group names \ No newline at end of file diff --git a/roles/get-ocp/files/tmp-install-config.yaml b/roles/get-ocp/files/tmp-install-config.yaml new file mode 100644 index 00000000..3df6c54e --- /dev/null +++ b/roles/get-ocp/files/tmp-install-config.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +baseDomain: ocpz.wsclab.endicott.ibm.com # "{{ env-baseDomain }}" +compute: +- hyperthreading: Enabled + name: worker + replicas: 0 + architecture : s390x # "{{ env-compute-arch | default(s390x) }}" +controlPlane: + hyperthreading: Enabled + name: master + replicas: 3 # "{{ env-control-count | default(3) }}" + architecture : s390x # "{{ env-control-arch | default(s390x) }}" +metadata: + name: distribution # "{{ env-metadata-name }}" +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 # "{{ env-cidr | default(10.128.0.0/14)}}" + hostPrefix: 23 # "{{ env-host-prefix | default(23) }}" + networkType: OpenShiftSDN # "{{ env-network-type | default(OpenShiftSDN) }}" + serviceNetwork: + - 172.30.0.0/16 # "{{ env-service-network | default(172.30.0.0/16) }}" +platform: + none: {} +fips: false # "{{ env-fips | default(false) }}" +pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +# "{{ env-pullSecret }}" +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' +# "{{ env-sshKey }}" diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index 0a1903d3..32f3322e 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -1,5 +1,11 @@ --- +- name: Change permissive domain for haproxy + tags: selinux,haproxy + selinux_permissive: + name: haproxy_t + permissive: true + - name: move haproxy config file to bastion tags: haproxy copy: diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 15122531..ed01ef53 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -1,5 +1,11 @@ --- +- name: Change permissive domain for httpd + tags: selinux,httpconf + selinux_permissive: + name: httpd_t + permissive: true + - name: enable httpd tags: httpconf systemd: @@ -22,42 +28,4 @@ regexp: '^Listen 443 https' replace: 'Listen 4443 https' backup: yes -<<<<<<< HEAD notify: restart httpd -======= - -- name: restart httpd to reflect changes to port - tags: httpconf - service: - name: httpd - state: restarted - -#- name: Allow all access to tcp port 8080 -# community.general.ufw: -# rule: allow -# port: '8080' -# proto: tcp -# -#- name: Allow all access to tcp port 80 -# community.general.ufw: -# rule: allow -# port: '80' -# proto: tcp -# -#- name: Allow all access to tcp port 443 -# community.general.ufw: -# rule: allow -# port: '443' -# proto: tcp -# -#- name: Allow all access to tcp port 4443 -# community.general.ufw: -# rule: allow -# port: '4443' -# proto: tcp - -#- name: check to make sure httpd is started -# service: -# name: httpd -# state: restarted ->>>>>>> f8ddfa644dc9cf079d17ded0062d76b22c93cd97 diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 99bba5c9..be02d15d 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -1,36 +1,42 @@ --- - name: Allow all access to tcp port 8080 + tags: firewall community.general.ufw: rule: allow port: '8080' proto: tcp - name: Allow all access to tcp port 80 + tags: firewall community.general.ufw: rule: allow port: '80' proto: tcp - name: Allow all access to tcp port 443 + tags: firewall community.general.ufw: rule: allow port: '443' proto: tcp - name: Allow all access to tcp port 4443 + tags: firewall community.general.ufw: rule: allow port: '4443' proto: tcp - name: Permit traffic in default zone for http + tags: firewall ansible.posix.firewalld: service: http permanent: yes state: enabled - name: Permit traffic in default zone for https + tags: firewall ansible.posix.firewalld: service: https permanent: yes diff --git a/roles/set_selinux_permissive/tasks/main.yaml b/roles/set_selinux_permissive/tasks/main.yaml index 97cb1cc4..71bd211a 100644 --- a/roles/set_selinux_permissive/tasks/main.yaml +++ b/roles/set_selinux_permissive/tasks/main.yaml @@ -1,19 +1,7 @@ --- - name: Put SELinux in permissive mode, logging actions that would be blocked. + tags: selinux,kvmhost,bastion ansible.posix.selinux: policy: targeted - state: permissive - -# Below requires community.general ansible package to be installed - -- name: Change permissive domain for httpd - selinux_permissive: - name: httpd_t - permissive: true - -- name: Change permissive domain for haproxy - selinux_permissive: - name: haproxy_t - permissive: true - \ No newline at end of file + state: permissive \ No newline at end of file From 5fc1e1ca3db5e8e8ef344b4ba53aee809f2254fd Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 18 Aug 2021 10:59:19 -0500 Subject: [PATCH 282/885] Updates from this morning's conversation with Phillip for the implementation of variables. --- README.md | 22 ++++++---- inventory.yaml | 3 +- main.yaml | 12 +++--- roles/ansible_setup/tasks/main.yaml | 6 +-- roles/get-ocp/files/tmp-install-config.yaml | 28 +++++++++++++ roles/haproxy/tasks/main.yaml | 6 +++ roles/httpd/tasks/main.yaml | 44 +++----------------- roles/set_firewall/tasks/main.yaml | 6 +++ roles/set_selinux_permissive/tasks/main.yaml | 16 +------ 9 files changed, 71 insertions(+), 72 deletions(-) create mode 100644 roles/get-ocp/files/tmp-install-config.yaml diff --git a/README.md b/README.md index b119bc47..b36b4264 100644 --- a/README.md +++ b/README.md @@ -19,27 +19,33 @@ Pre-requisites: When you are ready: Step 1: Download this Git repository to a folder on your local computer Step 2: Fill out the required variables for your specific installation in the env.yaml file -Step 3: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: +Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in roles/dns/files folder. +Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" -Step 4: Watch Ansible as it completes the installation, correcting errors if they arise. -Step 5: When create_bastion playbook runs, open cockpit at < URL > and complete installation with these options: +Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. +Step 6: When create_bastion playbook runs, open cockpit at < URL > and complete installation with these options: - list options here - list options here - list options here -Step 6: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. -Step 7: Verify installation by running: +Step 7: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. +Step 8: approve certs... need more detail +Step 9: Shutdown and destroy bootstrap (or optionally convert bootstrap to worker node) +Step 8: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" +Step 9: Tags: bastion = configuration of bastion for OCP keymastr = ssh key configuration and testing bastionvm = creation of Bastion KVM guest boostrap = creation of Boostrap KVM guest -compute = creation of the Compute nodes KVM guests (2) -control = creation of the Control nodes KVM guests (3 min) +compute = creation of the Compute nodes KVM guests (minimum 2) +control = creation of the Control nodes KVM guests (minimum 3) dns = configuration of dns server on bastion setocp = download of OCP installer and http server configuration haproxy = configuration of haproxy on bastion kvm guest httpconf = configuration of httpd server on bastion kvm guest kvmhost = tasks to apply to KVM host for OCP cluster -localhost = for playbooks that apply to the local machine running ansible \ No newline at end of file +localhost = for tasks that apply to the local machine running Ansible +firewall = for tasks related to firewall settings +selinux = for tasks related to SELinux settings \ No newline at end of file diff --git a/inventory.yaml b/inventory.yaml index 631091bd..ced2eb42 100644 --- a/inventory.yaml +++ b/inventory.yaml @@ -17,5 +17,4 @@ all: compute_nodes: hosts: 9.60.87.134 # "{{ env-ip-compute-0 }}" - 9.60.87.135 # "{{ env-ip-compute-1 }}" - + 9.60.87.135 # "{{ env-ip-compute-1 }}" \ No newline at end of file diff --git a/main.yaml b/main.yaml index fc914d29..efcb8c25 100644 --- a/main.yaml +++ b/main.yaml @@ -4,7 +4,7 @@ connection: local become: false vars: - - packages: [ 'ansible_galaxy', 'sshpass', 'ssh-keygen', 'ssh-copy-id' ] + - packages: [ 'ansible_galaxy', 'sshpass', 'ssh-keygen', 'ssh-copy-id', 'community.general' ] #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right #- other vars needed: ssh_key_filename, ssh_target_password, ssh_target_username, ssh_target_ip vars_files: @@ -22,6 +22,7 @@ roles: - check_ssh - install_packages + - set_selinux_permissive #- enable libvirt - macvtap - create_bastion #(reminder to eventually use kickstart installation files) @@ -41,8 +42,8 @@ - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] #reminder that "bind" is called "named" after install roles: - check_ssh - #- firewall (whatever that may be) - #- set_selinux_permissive + - firewall + - set_selinux_permissive - dns - haproxy - httpd @@ -68,7 +69,4 @@ #become: true #gather_facts: no #roles: - #- wait_for_bootkube -# - remove_bootstrap -# - approve_certs -# - verify_installation \ No newline at end of file + #- wait_for_bootkube \ No newline at end of file diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 1e1b1a71..4bb15888 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -5,7 +5,7 @@ - name: fill inventory with user-input vars - name: fill install config with user-input vars + - name: Gather ansible facts about local host -- name: create group_vars files with user-input group names - -- name: fill dns files with user-input dns names \ No newline at end of file + +- name: create group_vars files with user-input group names \ No newline at end of file diff --git a/roles/get-ocp/files/tmp-install-config.yaml b/roles/get-ocp/files/tmp-install-config.yaml new file mode 100644 index 00000000..3df6c54e --- /dev/null +++ b/roles/get-ocp/files/tmp-install-config.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +baseDomain: ocpz.wsclab.endicott.ibm.com # "{{ env-baseDomain }}" +compute: +- hyperthreading: Enabled + name: worker + replicas: 0 + architecture : s390x # "{{ env-compute-arch | default(s390x) }}" +controlPlane: + hyperthreading: Enabled + name: master + replicas: 3 # "{{ env-control-count | default(3) }}" + architecture : s390x # "{{ env-control-arch | default(s390x) }}" +metadata: + name: distribution # "{{ env-metadata-name }}" +networking: + clusterNetwork: + - cidr: 10.128.0.0/14 # "{{ env-cidr | default(10.128.0.0/14)}}" + hostPrefix: 23 # "{{ env-host-prefix | default(23) }}" + networkType: OpenShiftSDN # "{{ env-network-type | default(OpenShiftSDN) }}" + serviceNetwork: + - 172.30.0.0/16 # "{{ env-service-network | default(172.30.0.0/16) }}" +platform: + none: {} +fips: false # "{{ env-fips | default(false) }}" +pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +# "{{ env-pullSecret }}" +sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' +# "{{ env-sshKey }}" diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index 0a1903d3..32f3322e 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -1,5 +1,11 @@ --- +- name: Change permissive domain for haproxy + tags: selinux,haproxy + selinux_permissive: + name: haproxy_t + permissive: true + - name: move haproxy config file to bastion tags: haproxy copy: diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 15122531..ed01ef53 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -1,5 +1,11 @@ --- +- name: Change permissive domain for httpd + tags: selinux,httpconf + selinux_permissive: + name: httpd_t + permissive: true + - name: enable httpd tags: httpconf systemd: @@ -22,42 +28,4 @@ regexp: '^Listen 443 https' replace: 'Listen 4443 https' backup: yes -<<<<<<< HEAD notify: restart httpd -======= - -- name: restart httpd to reflect changes to port - tags: httpconf - service: - name: httpd - state: restarted - -#- name: Allow all access to tcp port 8080 -# community.general.ufw: -# rule: allow -# port: '8080' -# proto: tcp -# -#- name: Allow all access to tcp port 80 -# community.general.ufw: -# rule: allow -# port: '80' -# proto: tcp -# -#- name: Allow all access to tcp port 443 -# community.general.ufw: -# rule: allow -# port: '443' -# proto: tcp -# -#- name: Allow all access to tcp port 4443 -# community.general.ufw: -# rule: allow -# port: '4443' -# proto: tcp - -#- name: check to make sure httpd is started -# service: -# name: httpd -# state: restarted ->>>>>>> f8ddfa644dc9cf079d17ded0062d76b22c93cd97 diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 99bba5c9..be02d15d 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -1,36 +1,42 @@ --- - name: Allow all access to tcp port 8080 + tags: firewall community.general.ufw: rule: allow port: '8080' proto: tcp - name: Allow all access to tcp port 80 + tags: firewall community.general.ufw: rule: allow port: '80' proto: tcp - name: Allow all access to tcp port 443 + tags: firewall community.general.ufw: rule: allow port: '443' proto: tcp - name: Allow all access to tcp port 4443 + tags: firewall community.general.ufw: rule: allow port: '4443' proto: tcp - name: Permit traffic in default zone for http + tags: firewall ansible.posix.firewalld: service: http permanent: yes state: enabled - name: Permit traffic in default zone for https + tags: firewall ansible.posix.firewalld: service: https permanent: yes diff --git a/roles/set_selinux_permissive/tasks/main.yaml b/roles/set_selinux_permissive/tasks/main.yaml index 97cb1cc4..71bd211a 100644 --- a/roles/set_selinux_permissive/tasks/main.yaml +++ b/roles/set_selinux_permissive/tasks/main.yaml @@ -1,19 +1,7 @@ --- - name: Put SELinux in permissive mode, logging actions that would be blocked. + tags: selinux,kvmhost,bastion ansible.posix.selinux: policy: targeted - state: permissive - -# Below requires community.general ansible package to be installed - -- name: Change permissive domain for httpd - selinux_permissive: - name: httpd_t - permissive: true - -- name: Change permissive domain for haproxy - selinux_permissive: - name: haproxy_t - permissive: true - \ No newline at end of file + state: permissive \ No newline at end of file From 0f04edd0d44cb6a31d1965525b9739bfa5184db8 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 18 Aug 2021 15:51:49 -0500 Subject: [PATCH 283/885] Swapped out static parameters for variables ones. --- env.yaml | 58 ++++++----- host_vars/9.60.87.132.yaml | 1 - host_vars/host_vars_file | 12 --- inventory.yaml | 16 +-- main.yaml | 22 ++-- roles/ansible_setup/tasks/main.yaml | 107 ++++++++++++++++++-- roles/create_bastion/tasks/main.yaml | 2 +- roles/create_bootstrap/tasks/main.yaml | 2 +- roles/enable_packages/defaults/main.yaml | 1 + roles/enable_packages/tasks/main.yaml | 14 +++ roles/get-ocp/files/install-config.yaml | 38 ++++--- roles/get-ocp/files/tmp-install-config.yaml | 18 ++-- roles/install_packages/tasks/main.yaml | 27 +---- roles/ssh_copy_id/tasks/main.yaml | 10 ++ roles/ssh_key_gen/tasks/main.yaml | 13 +-- 15 files changed, 214 insertions(+), 127 deletions(-) delete mode 100644 host_vars/9.60.87.132.yaml delete mode 100644 host_vars/host_vars_file create mode 100644 roles/enable_packages/defaults/main.yaml create mode 100644 roles/enable_packages/tasks/main.yaml create mode 100644 roles/ssh_copy_id/tasks/main.yaml diff --git a/env.yaml b/env.yaml index 2c9f2b32..8bd2c3d7 100644 --- a/env.yaml +++ b/env.yaml @@ -1,31 +1,37 @@ -#to populate install-config -env-baseDomain: ocpz.wsclab.endicott.ibm.com -env-compute-hyperthreading: Enabled -env-compute-name: worker #set default compute -env-compute-count: 0 #is this supposed to be 2? -env-compute-arch: s390x #set default to s390x -env-control-hyperthreading: Enabled #set default enabled -env-control-name: master #set default control -env-control-count: 3 -env-control-arch: s390x #set default s390x -env-metadata-name: distribution -env-cidr: 10.128.0.0/14 #what is this? -env-host-prefix: 23 -env-network-type: OpenShiftSDN #set default OpenShiftSDN -env-service-network: 172.30.0.0/16 -env-fips: false #true or false, set default false -env-pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -env-sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' +# to populate install_config +env_baseDomain: ocpz.wsclab.endicott.ibm.com +env_compute_arch: s390x #default to s390x +env_control_count: 3 #default 3 +env_control_arch: s390x #default s390x +env_metadata_name: distribution +env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now +env_host_prefix: 23 #default 23 for now +env_network_type: OpenShiftSDN #set default OpenShiftSDN +env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 +env_fips: false #true or false, set default false +env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +env_sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' # to fill inventory -env-ip-kvm-host: 9.60.87.132 -env-ip-bastion: 9.60.87.139 -env-ip-bootstrap: 9.60.87.133 -env-ip-control-0: 9.60.87.136 -env-ip-control-1: 9.60.87.137 -env-ip-control-2: 9.60.87.138 -env-ip-compute-0: 9.60.87.134 -env-ip-compute-1: 9.60.87.135 +env_ip_kvm_host: 9.60.87.132 +env_ip_bastion: 9.60.87.139 +env_ip_bootstrap: 9.60.87.133 +env_ip_control_0: 9.60.87.136 +env_ip_control_1: 9.60.87.137 +env_ip_control_2: 9.60.87.138 +env_ip_compute_0: 9.60.87.134 +env_ip_compute_1: 9.60.87.135 # ssh +env_ssh_username: jacob #Username to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_pass: ibmzrocks #Password to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. + +# Ansible passwordless ssh +env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible +env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) + +# networking +dns_nameserver: 9.60.87.139 +default_gateway: 9.60.86.1 +netmask: 255.255.254.0 diff --git a/host_vars/9.60.87.132.yaml b/host_vars/9.60.87.132.yaml deleted file mode 100644 index 8b137891..00000000 --- a/host_vars/9.60.87.132.yaml +++ /dev/null @@ -1 +0,0 @@ - diff --git a/host_vars/host_vars_file b/host_vars/host_vars_file deleted file mode 100644 index 97570a25..00000000 --- a/host_vars/host_vars_file +++ /dev/null @@ -1,12 +0,0 @@ -##placeholder until ready to simplify playbooks - -##list of needed variables: - -##in bastion main.yaml: -##baseDomain -##cluster_name -##pullsecret -##ssh-public-key -##installation_directory - - diff --git a/inventory.yaml b/inventory.yaml index ced2eb42..017c9f58 100644 --- a/inventory.yaml +++ b/inventory.yaml @@ -2,19 +2,19 @@ all: children: kvm_host: hosts: - 9.60.87.132 # "{{ env-ip-kvm-host }}" + "{{ env_ip_kvm_host }}": bastion: hosts: - 9.60.87.139 # "{{ env-ip-bastion }}" + "{{ env_ip_bastion }}": bootstrap: hosts: - 9.60.87.133 # "{{ env-ip-bootstrap }}" + "{{ env_ip_bootstrap }}": control_nodes: hosts: - 9.60.87.136 # "{{ env-ip-control-0 }}" - 9.60.87.137 # "{{ env-ip-control-1 }}" - 9.60.87.138 # "{{ env-ip-control-2 }}" + "{{ env_ip_control_0 }}": + "{{ env_ip_control_1 }}": + "{{ env_ip_control_2 }}": compute_nodes: hosts: - 9.60.87.134 # "{{ env-ip-compute-0 }}" - 9.60.87.135 # "{{ env-ip-compute-1 }}" \ No newline at end of file + "{{ env_ip_compute_0 }}": + "{{ env_ip_compute_1 }}": \ No newline at end of file diff --git a/main.yaml b/main.yaml index efcb8c25..ef0ab037 100644 --- a/main.yaml +++ b/main.yaml @@ -3,17 +3,16 @@ - hosts: localhost connection: local become: false - vars: - - packages: [ 'ansible_galaxy', 'sshpass', 'ssh-keygen', 'ssh-copy-id', 'community.general' ] - #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right - #- other vars needed: ssh_key_filename, ssh_target_password, ssh_target_username, ssh_target_ip vars_files: - - ./env.yml + - env.yaml + vars: + - packages: [ 'ssh-keygen', 'ssh-copy-id', ] + - ssh_target_ip: "{{ env_ip_kvm_host }}" roles: - #- ansible_setup (check vars file for undefined vars, run ansible_setup, and fill necessary env vars files, run "ansible-inventory --list" and print to terminal (failed_when rc != 0) to check inventory is set up properly) + - ansible_setup - install_packages - ssh_key_gen - #- ssh_copy_id + - ssh_copy_id - hosts: kvm_host become: true @@ -23,27 +22,28 @@ - check_ssh - install_packages - set_selinux_permissive - #- enable libvirt + - enable_packages - macvtap - - create_bastion #(reminder to eventually use kickstart installation files) + - create_bastion - hosts: localhost connection: local become: false gather_facts: no vars: - #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right + - ssh_target_ip: "{{ env_ip_bastion }}" roles: - ssh_copy_id - hosts: bastion become: true vars: - - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] #reminder that "bind" is called "named" after install + - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] roles: - check_ssh - firewall - set_selinux_permissive + - enable_packages - dns - haproxy - httpd diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 4bb15888..e7a96d40 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,11 +1,106 @@ --- -- name: Check user-input variables in env.yaml for undefined variables - -- name: fill inventory with user-input vars +- name: Collect only facts returned by facter + ansible.builtin.setup: + +- name: Load in variables from env.yaml + include_vars: env.yaml + +- name: Check for any undefined user-input variables in env.yaml. Fail if true. + fail: + msg: "Required variable {{item}} has not been provided in env.yaml file." + when: vars[item] is undefined + loop: + - env_baseDomain + - env_compute_arch + - env_control_count + - env_control_arch + - env_metadata_name + - env_cidr + - env_host_prefix + - env_network_type + - env_service_network + - env_fips + - env_pullSecret + - env_sshKey + - env_ip_kvm_host + - env_ip_bastion + - env_ip_bootstrap + - env_ip_control_0 + - env_ip_control_1 + - env_ip_control_2 + - env_ip_compute_0 + - env_ip_compute_1 + - env_ssh_username + - env_ssh_pass + - env_ssh_ans_name + - env_ssh_ans_pass -- name: fill install config with user-input vars +- name: Set facts from env.yaml so they can be used in other playbooks + set_fact: + env_baseDomain: "{{ env_baseDomain }}" + env_compute_arch: "{{ env_compute_arch }}" + env_control_count: "{{ env_control_count }}" + env_control_arch: "{{ env_control_arch }}" + env_metadata_name: "{{ env_metadata_name }}" + env_cidr: "{{ env_cidr }}" + env_host_prefix: "{{ env_host_prefix }}" + env_network_type: "{{ env_network_type }}" + env_service_network: "{{ env_service_network }}" + env_fips: "{{ env_fips }}" + env_pullSecret: "{{ env_pullSecret }}" + env_sshKey: "{{ env_sshKey }}" + env_ip_kvm_host: "{{ env_ip_kvm_host }}" + env_ip_bastion: "{{ env_ip_bastion }}" + env_ip_bootstrap: "{{ env_ip_bootstrap }}" + env_ip_control_0: "{{ env_ip_control_0 }}" + env_ip_control_1: "{{ env_ip_control_1 }}" + env_ip_control_2: "{{ env_ip_control_2 }}" + env_ip_compute_0: "{{ env_ip_compute_0 }}" + env_ip_compute_1: "{{ env_ip_compute_1 }}" + env_ssh_username: "{{ env_ssh_username }}" + env_ssh_pass: "{{ env_ssh_pass }}" + env_ssh_ans_name: "{{ env_ssh_ans_name }}" + env_ssh_ans_pass: "{{ env_ssh_ans_pass }}" + dns_nameserver: "{{ dns_nameserver }}" + default_gateway: "{{ default_gateway }}" + netmask: "{{ netmask }}" + cacheable: yes + +- name: check inventory setup + command: ansible-inventory --list + register: inv_check + failed_when: "inv.check.rc != 0" + +- name: install homebrew package manager if localhost is running Mac OS X and doesn't already have it. + command: /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + when: ansible_os_family == 'Darwin' + +- name: Install sshpass via homebrew, using github workaround because the makers of homebrew block sshpass download + command: brew install http://git.io/sshpass.rb + when: ansible_os_family == 'Darwin' + +- name: Install ssh packages on Linux localhost machines + ansible.builtin.package: + name: + - sshpass + - openssh + - ssh-copy-id + state: latest + update_cache: yes + when: host_os_family == "RedHat" or host_os_family == "Debian" + +- name: Install ssh packages on Mac machine via homebrew + community.general.homebrew: + name: + - openssh + - ssh-copy-id + state: latest + update_homebrew: yes + when: host_os_family == "Darwin" -- name: Gather ansible facts about local host +- name: install ansible.community.general collection for use later + command: ansible-galaxy collection install community.general -- name: create group_vars files with user-input group names \ No newline at end of file +- name: install ansible.community.crypto collection for use later + command: ansible-galaxy collection install community.crypto \ No newline at end of file diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 6cbf9875..dd48d14e 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -26,7 +26,7 @@ tags: bastionvm command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - - name: wait for bastion to install + - name: Pause 15 minutes for installation. Once you see the login prompt on the bastion's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. tags: bastionvm pause: minutes: 60 \ No newline at end of file diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 5a3833ff..495c3a54 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -11,6 +11,6 @@ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.133::9.60.86.1:255.255.254.0:bootstrap:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign" --noautoconsole -- name: pause 15 minutes +- name: Pause 15 minutes for installation. Once you see the login prompt on the bootstrap's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. pause: minutes: 15 \ No newline at end of file diff --git a/roles/enable_packages/defaults/main.yaml b/roles/enable_packages/defaults/main.yaml new file mode 100644 index 00000000..ccfaa59c --- /dev/null +++ b/roles/enable_packages/defaults/main.yaml @@ -0,0 +1 @@ +packages[4]: "named" \ No newline at end of file diff --git a/roles/enable_packages/tasks/main.yaml b/roles/enable_packages/tasks/main.yaml new file mode 100644 index 00000000..d8a4eca2 --- /dev/null +++ b/roles/enable_packages/tasks/main.yaml @@ -0,0 +1,14 @@ +--- + +- name: enable packages + ansible.builtin.service: + name: + - "{{ packages[0] | default(omit) }}" + - "{{ packages[1] | default(omit) }}" + - "{{ packages[2] | default(omit) }}" + - "{{ packages[3] | default(omit) }}" + - "{{ packages[4] | default(omit) }}" + - "{{ packages[5] | default(omit) }}" + - "{{ packages[6] | default(omit) }}" + - "{{ packages[7] | default(omit) }}" + state: enabled \ No newline at end of file diff --git a/roles/get-ocp/files/install-config.yaml b/roles/get-ocp/files/install-config.yaml index 1d0fa8c7..f72ce8cd 100644 --- a/roles/get-ocp/files/install-config.yaml +++ b/roles/get-ocp/files/install-config.yaml @@ -1,28 +1,26 @@ -apiVersion: v1 # "{{ env-api-version | default(v1) }}" -baseDomain: ocpz.wsclab.endicott.ibm.com # "{{ env-baseDomain }}" +apiVersion: v1 +baseDomain: "{{ env_baseDomain }}" compute: -- hyperthreading: Enabled # "{{ env-compute-hyperthreading | default(Enabled) }}" - name: worker # "{{ env-compute-name | default(worker) }}" - replicas: 0 # "{{ env-compute-count | default(0) }}" - architecture : s390x # "{{ env-compute-arch | default(s390x) }}" +- hyperthreading: Enabled + name: worker + replicas: 0 + architecture : "{{ env_compute_arch | default(s390x) }}" controlPlane: - hyperthreading: Enabled # "{{ env-control-hyperthreading | default(Enabled) }}" - name: master # "{{ env-control-name | default(compute) }}" - replicas: 3 # "{{ env-control-count | default(3) }}" - architecture : s390x # "{{ env-control-arch | default(s390x) }}" + hyperthreading: Enabled + name: master + replicas: "{{ env_control_count | default(3) }}" + architecture: "{{ env_control_arch | default(s390x) }}" metadata: - name: distribution # "{{ env-metadata-name }}" + name: "{{ env_metadata_name }}" networking: clusterNetwork: - - cidr: 10.128.0.0/14 # "{{ env-cidr }}" - hostPrefix: 23 # "{{ env-host-prefix | default(23) }}" - networkType: OpenShiftSDN # "{{ env-network-type | default(OpenShiftSDN) }}" + - cidr: "{{ env_cidr | default(10.128.0.0/14)}}" + hostPrefix: "{{ env_host_prefix | default(23) }}" + networkType: "{{ env_network_type | default(OpenShiftSDN) }}" serviceNetwork: - - 172.30.0.0/16 # "{{ env-service-network }}" + - "{{ env_service_network | default(172.30.0.0/16) }}" platform: none: {} -fips: false # "{{ env-fips | default(false) }}" -pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -# "{{ env-pullSecret }}" -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' -# "{{ env-sshKey }}" \ No newline at end of file +fips: false "{{ env_fips | default(false) }}" +pullSecret: "{{ env_pullSecret }}" +sshKey: "{{ env_sshKey }}" diff --git a/roles/get-ocp/files/tmp-install-config.yaml b/roles/get-ocp/files/tmp-install-config.yaml index 3df6c54e..1d0fa8c7 100644 --- a/roles/get-ocp/files/tmp-install-config.yaml +++ b/roles/get-ocp/files/tmp-install-config.yaml @@ -1,28 +1,28 @@ -apiVersion: v1 +apiVersion: v1 # "{{ env-api-version | default(v1) }}" baseDomain: ocpz.wsclab.endicott.ibm.com # "{{ env-baseDomain }}" compute: -- hyperthreading: Enabled - name: worker - replicas: 0 +- hyperthreading: Enabled # "{{ env-compute-hyperthreading | default(Enabled) }}" + name: worker # "{{ env-compute-name | default(worker) }}" + replicas: 0 # "{{ env-compute-count | default(0) }}" architecture : s390x # "{{ env-compute-arch | default(s390x) }}" controlPlane: - hyperthreading: Enabled - name: master + hyperthreading: Enabled # "{{ env-control-hyperthreading | default(Enabled) }}" + name: master # "{{ env-control-name | default(compute) }}" replicas: 3 # "{{ env-control-count | default(3) }}" architecture : s390x # "{{ env-control-arch | default(s390x) }}" metadata: name: distribution # "{{ env-metadata-name }}" networking: clusterNetwork: - - cidr: 10.128.0.0/14 # "{{ env-cidr | default(10.128.0.0/14)}}" + - cidr: 10.128.0.0/14 # "{{ env-cidr }}" hostPrefix: 23 # "{{ env-host-prefix | default(23) }}" networkType: OpenShiftSDN # "{{ env-network-type | default(OpenShiftSDN) }}" serviceNetwork: - - 172.30.0.0/16 # "{{ env-service-network | default(172.30.0.0/16) }}" + - 172.30.0.0/16 # "{{ env-service-network }}" platform: none: {} fips: false # "{{ env-fips | default(false) }}" pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' # "{{ env-pullSecret }}" sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' -# "{{ env-sshKey }}" +# "{{ env-sshKey }}" \ No newline at end of file diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index cddd772b..ce342bab 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,12 +1,9 @@ --- -- name: get host os family - vars: - host_os_family: "{{ ansible_facts['os_family'] }}" -- name: Installing required packages on Linux machine +- name: Installing required packages ansible.builtin.package: name: - - "{{ packages[0] }}" + - "{{ packages[0] | default(omit) }}" - "{{ packages[1] | default(omit) }}" - "{{ packages[2] | default(omit) }}" - "{{ packages[3] | default(omit) }}" @@ -14,21 +11,5 @@ - "{{ packages[5] | default(omit) }}" - "{{ packages[6] | default(omit) }}" - "{{ packages[7] | default(omit) }}" - state: present - update_cache: yes - when: host_os_family == "RedHat" or host_os_family == "Debian" - -- name: Installing required packages on Mac machine - community.general.homebrew: - name: - - "{{ packages[0] }}" - - "{{ packages[1] | default(omit) }}" - - "{{ packages[2] | default(omit) }}" - - "{{ packages[3] | default(omit) }}" - - "{{ packages[4] | default(omit) }}" - - "{{ packages[5] | default(omit) }}" - - "{{ packages[6] | default(omit) }}" - - "{{ packages[7] | default(omit) }}" - state: present - update_homebrew: yes - when: host_os_family == "Darwin" \ No newline at end of file + state: latest + update_cache: yes \ No newline at end of file diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml new file mode 100644 index 00000000..b79a29ab --- /dev/null +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -0,0 +1,10 @@ +--- + +- name: distribute the ssh key to a remote host + shell: "/usr/local/bin/sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p {{ ssh_target_ip }}.22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" + register: ssh_copy_id_execution + failed_when: ssh_copy_id_execution.rc != 0 + +- name: Print results of copying ssh id to remote host. + debug: + var: ssh_copy_id_execution diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index fa432f45..45188412 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -25,18 +25,13 @@ debug: var: ssh_directory_creation -<<<<<<< HEAD - name: Check .ssh key pair files exist -======= -- name: check .ssh key pair files exist - tags: keymastr ->>>>>>> f8ddfa644dc9cf079d17ded0062d76b22c93cd97 stat: path: "~/.ssh/{{item}}" register: ssh_key_file_exists_check with_items: - - "{{ssh_key_filename}}" - - "{{ssh_key_filename}}.pub" + - "{{env-ssh-ans-name}}" + - "{{env-ssh-ans-name}}.pub" - name: Print results of ssh key pair files check tags: keymastr @@ -46,7 +41,7 @@ - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key tags: keymastr community.crypto.openssh_keypair: - path: ~/.ssh/{{ ssh_key_filename }} + path: ~/.ssh/{{ env-ssh-ans-name }} passphrase: "" register: ssh_key_creation failed_when: ssh_key_creation.rc != 0 @@ -61,7 +56,7 @@ tags: keymastr lineinfile: path: ansible.cfg - line: "private_key_file = ~/.ssh/{{ssh_key_filename}}" + line: "private_key_file = ~/.ssh/{{env-ssh-ans-name}}" state: present backup: yes register: ssh_config_file_key_addition From ac9ac0efca8569c3f0112f624526edf3811545d7 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 18 Aug 2021 15:51:49 -0500 Subject: [PATCH 284/885] Swapped out static parameters for variables ones. --- host_vars/9.60.87.132.yaml | 1 - host_vars/host_vars_file | 12 --- inventory.yaml | 16 +-- main.yaml | 22 ++-- roles/ansible_setup/tasks/main.yaml | 107 ++++++++++++++++++-- roles/create_bastion/tasks/main.yaml | 2 +- roles/create_bootstrap/tasks/main.yaml | 2 +- roles/enable_packages/defaults/main.yaml | 1 + roles/enable_packages/tasks/main.yaml | 14 +++ roles/get-ocp/files/install-config.yaml | 38 ++++--- roles/get-ocp/files/tmp-install-config.yaml | 18 ++-- roles/install_packages/tasks/main.yaml | 27 +---- roles/ssh_copy_id/tasks/main.yaml | 10 ++ roles/ssh_key_gen/tasks/main.yaml | 13 +-- 14 files changed, 182 insertions(+), 101 deletions(-) delete mode 100644 host_vars/9.60.87.132.yaml delete mode 100644 host_vars/host_vars_file create mode 100644 roles/enable_packages/defaults/main.yaml create mode 100644 roles/enable_packages/tasks/main.yaml create mode 100644 roles/ssh_copy_id/tasks/main.yaml diff --git a/host_vars/9.60.87.132.yaml b/host_vars/9.60.87.132.yaml deleted file mode 100644 index 8b137891..00000000 --- a/host_vars/9.60.87.132.yaml +++ /dev/null @@ -1 +0,0 @@ - diff --git a/host_vars/host_vars_file b/host_vars/host_vars_file deleted file mode 100644 index 97570a25..00000000 --- a/host_vars/host_vars_file +++ /dev/null @@ -1,12 +0,0 @@ -##placeholder until ready to simplify playbooks - -##list of needed variables: - -##in bastion main.yaml: -##baseDomain -##cluster_name -##pullsecret -##ssh-public-key -##installation_directory - - diff --git a/inventory.yaml b/inventory.yaml index ced2eb42..017c9f58 100644 --- a/inventory.yaml +++ b/inventory.yaml @@ -2,19 +2,19 @@ all: children: kvm_host: hosts: - 9.60.87.132 # "{{ env-ip-kvm-host }}" + "{{ env_ip_kvm_host }}": bastion: hosts: - 9.60.87.139 # "{{ env-ip-bastion }}" + "{{ env_ip_bastion }}": bootstrap: hosts: - 9.60.87.133 # "{{ env-ip-bootstrap }}" + "{{ env_ip_bootstrap }}": control_nodes: hosts: - 9.60.87.136 # "{{ env-ip-control-0 }}" - 9.60.87.137 # "{{ env-ip-control-1 }}" - 9.60.87.138 # "{{ env-ip-control-2 }}" + "{{ env_ip_control_0 }}": + "{{ env_ip_control_1 }}": + "{{ env_ip_control_2 }}": compute_nodes: hosts: - 9.60.87.134 # "{{ env-ip-compute-0 }}" - 9.60.87.135 # "{{ env-ip-compute-1 }}" \ No newline at end of file + "{{ env_ip_compute_0 }}": + "{{ env_ip_compute_1 }}": \ No newline at end of file diff --git a/main.yaml b/main.yaml index efcb8c25..ef0ab037 100644 --- a/main.yaml +++ b/main.yaml @@ -3,17 +3,16 @@ - hosts: localhost connection: local become: false - vars: - - packages: [ 'ansible_galaxy', 'sshpass', 'ssh-keygen', 'ssh-copy-id', 'community.general' ] - #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right - #- other vars needed: ssh_key_filename, ssh_target_password, ssh_target_username, ssh_target_ip vars_files: - - ./env.yml + - env.yaml + vars: + - packages: [ 'ssh-keygen', 'ssh-copy-id', ] + - ssh_target_ip: "{{ env_ip_kvm_host }}" roles: - #- ansible_setup (check vars file for undefined vars, run ansible_setup, and fill necessary env vars files, run "ansible-inventory --list" and print to terminal (failed_when rc != 0) to check inventory is set up properly) + - ansible_setup - install_packages - ssh_key_gen - #- ssh_copy_id + - ssh_copy_id - hosts: kvm_host become: true @@ -23,27 +22,28 @@ - check_ssh - install_packages - set_selinux_permissive - #- enable libvirt + - enable_packages - macvtap - - create_bastion #(reminder to eventually use kickstart installation files) + - create_bastion - hosts: localhost connection: local become: false gather_facts: no vars: - #- ssh_target_ip: host_vars[kvm_host][ip] #this is not right + - ssh_target_ip: "{{ env_ip_bastion }}" roles: - ssh_copy_id - hosts: bastion become: true vars: - - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] #reminder that "bind" is called "named" after install + - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] roles: - check_ssh - firewall - set_selinux_permissive + - enable_packages - dns - haproxy - httpd diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 4bb15888..e7a96d40 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,11 +1,106 @@ --- -- name: Check user-input variables in env.yaml for undefined variables - -- name: fill inventory with user-input vars +- name: Collect only facts returned by facter + ansible.builtin.setup: + +- name: Load in variables from env.yaml + include_vars: env.yaml + +- name: Check for any undefined user-input variables in env.yaml. Fail if true. + fail: + msg: "Required variable {{item}} has not been provided in env.yaml file." + when: vars[item] is undefined + loop: + - env_baseDomain + - env_compute_arch + - env_control_count + - env_control_arch + - env_metadata_name + - env_cidr + - env_host_prefix + - env_network_type + - env_service_network + - env_fips + - env_pullSecret + - env_sshKey + - env_ip_kvm_host + - env_ip_bastion + - env_ip_bootstrap + - env_ip_control_0 + - env_ip_control_1 + - env_ip_control_2 + - env_ip_compute_0 + - env_ip_compute_1 + - env_ssh_username + - env_ssh_pass + - env_ssh_ans_name + - env_ssh_ans_pass -- name: fill install config with user-input vars +- name: Set facts from env.yaml so they can be used in other playbooks + set_fact: + env_baseDomain: "{{ env_baseDomain }}" + env_compute_arch: "{{ env_compute_arch }}" + env_control_count: "{{ env_control_count }}" + env_control_arch: "{{ env_control_arch }}" + env_metadata_name: "{{ env_metadata_name }}" + env_cidr: "{{ env_cidr }}" + env_host_prefix: "{{ env_host_prefix }}" + env_network_type: "{{ env_network_type }}" + env_service_network: "{{ env_service_network }}" + env_fips: "{{ env_fips }}" + env_pullSecret: "{{ env_pullSecret }}" + env_sshKey: "{{ env_sshKey }}" + env_ip_kvm_host: "{{ env_ip_kvm_host }}" + env_ip_bastion: "{{ env_ip_bastion }}" + env_ip_bootstrap: "{{ env_ip_bootstrap }}" + env_ip_control_0: "{{ env_ip_control_0 }}" + env_ip_control_1: "{{ env_ip_control_1 }}" + env_ip_control_2: "{{ env_ip_control_2 }}" + env_ip_compute_0: "{{ env_ip_compute_0 }}" + env_ip_compute_1: "{{ env_ip_compute_1 }}" + env_ssh_username: "{{ env_ssh_username }}" + env_ssh_pass: "{{ env_ssh_pass }}" + env_ssh_ans_name: "{{ env_ssh_ans_name }}" + env_ssh_ans_pass: "{{ env_ssh_ans_pass }}" + dns_nameserver: "{{ dns_nameserver }}" + default_gateway: "{{ default_gateway }}" + netmask: "{{ netmask }}" + cacheable: yes + +- name: check inventory setup + command: ansible-inventory --list + register: inv_check + failed_when: "inv.check.rc != 0" + +- name: install homebrew package manager if localhost is running Mac OS X and doesn't already have it. + command: /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + when: ansible_os_family == 'Darwin' + +- name: Install sshpass via homebrew, using github workaround because the makers of homebrew block sshpass download + command: brew install http://git.io/sshpass.rb + when: ansible_os_family == 'Darwin' + +- name: Install ssh packages on Linux localhost machines + ansible.builtin.package: + name: + - sshpass + - openssh + - ssh-copy-id + state: latest + update_cache: yes + when: host_os_family == "RedHat" or host_os_family == "Debian" + +- name: Install ssh packages on Mac machine via homebrew + community.general.homebrew: + name: + - openssh + - ssh-copy-id + state: latest + update_homebrew: yes + when: host_os_family == "Darwin" -- name: Gather ansible facts about local host +- name: install ansible.community.general collection for use later + command: ansible-galaxy collection install community.general -- name: create group_vars files with user-input group names \ No newline at end of file +- name: install ansible.community.crypto collection for use later + command: ansible-galaxy collection install community.crypto \ No newline at end of file diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 6cbf9875..dd48d14e 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -26,7 +26,7 @@ tags: bastionvm command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - - name: wait for bastion to install + - name: Pause 15 minutes for installation. Once you see the login prompt on the bastion's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. tags: bastionvm pause: minutes: 60 \ No newline at end of file diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 5a3833ff..495c3a54 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -11,6 +11,6 @@ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.133::9.60.86.1:255.255.254.0:bootstrap:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign" --noautoconsole -- name: pause 15 minutes +- name: Pause 15 minutes for installation. Once you see the login prompt on the bootstrap's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. pause: minutes: 15 \ No newline at end of file diff --git a/roles/enable_packages/defaults/main.yaml b/roles/enable_packages/defaults/main.yaml new file mode 100644 index 00000000..ccfaa59c --- /dev/null +++ b/roles/enable_packages/defaults/main.yaml @@ -0,0 +1 @@ +packages[4]: "named" \ No newline at end of file diff --git a/roles/enable_packages/tasks/main.yaml b/roles/enable_packages/tasks/main.yaml new file mode 100644 index 00000000..d8a4eca2 --- /dev/null +++ b/roles/enable_packages/tasks/main.yaml @@ -0,0 +1,14 @@ +--- + +- name: enable packages + ansible.builtin.service: + name: + - "{{ packages[0] | default(omit) }}" + - "{{ packages[1] | default(omit) }}" + - "{{ packages[2] | default(omit) }}" + - "{{ packages[3] | default(omit) }}" + - "{{ packages[4] | default(omit) }}" + - "{{ packages[5] | default(omit) }}" + - "{{ packages[6] | default(omit) }}" + - "{{ packages[7] | default(omit) }}" + state: enabled \ No newline at end of file diff --git a/roles/get-ocp/files/install-config.yaml b/roles/get-ocp/files/install-config.yaml index 1d0fa8c7..f72ce8cd 100644 --- a/roles/get-ocp/files/install-config.yaml +++ b/roles/get-ocp/files/install-config.yaml @@ -1,28 +1,26 @@ -apiVersion: v1 # "{{ env-api-version | default(v1) }}" -baseDomain: ocpz.wsclab.endicott.ibm.com # "{{ env-baseDomain }}" +apiVersion: v1 +baseDomain: "{{ env_baseDomain }}" compute: -- hyperthreading: Enabled # "{{ env-compute-hyperthreading | default(Enabled) }}" - name: worker # "{{ env-compute-name | default(worker) }}" - replicas: 0 # "{{ env-compute-count | default(0) }}" - architecture : s390x # "{{ env-compute-arch | default(s390x) }}" +- hyperthreading: Enabled + name: worker + replicas: 0 + architecture : "{{ env_compute_arch | default(s390x) }}" controlPlane: - hyperthreading: Enabled # "{{ env-control-hyperthreading | default(Enabled) }}" - name: master # "{{ env-control-name | default(compute) }}" - replicas: 3 # "{{ env-control-count | default(3) }}" - architecture : s390x # "{{ env-control-arch | default(s390x) }}" + hyperthreading: Enabled + name: master + replicas: "{{ env_control_count | default(3) }}" + architecture: "{{ env_control_arch | default(s390x) }}" metadata: - name: distribution # "{{ env-metadata-name }}" + name: "{{ env_metadata_name }}" networking: clusterNetwork: - - cidr: 10.128.0.0/14 # "{{ env-cidr }}" - hostPrefix: 23 # "{{ env-host-prefix | default(23) }}" - networkType: OpenShiftSDN # "{{ env-network-type | default(OpenShiftSDN) }}" + - cidr: "{{ env_cidr | default(10.128.0.0/14)}}" + hostPrefix: "{{ env_host_prefix | default(23) }}" + networkType: "{{ env_network_type | default(OpenShiftSDN) }}" serviceNetwork: - - 172.30.0.0/16 # "{{ env-service-network }}" + - "{{ env_service_network | default(172.30.0.0/16) }}" platform: none: {} -fips: false # "{{ env-fips | default(false) }}" -pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -# "{{ env-pullSecret }}" -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' -# "{{ env-sshKey }}" \ No newline at end of file +fips: false "{{ env_fips | default(false) }}" +pullSecret: "{{ env_pullSecret }}" +sshKey: "{{ env_sshKey }}" diff --git a/roles/get-ocp/files/tmp-install-config.yaml b/roles/get-ocp/files/tmp-install-config.yaml index 3df6c54e..1d0fa8c7 100644 --- a/roles/get-ocp/files/tmp-install-config.yaml +++ b/roles/get-ocp/files/tmp-install-config.yaml @@ -1,28 +1,28 @@ -apiVersion: v1 +apiVersion: v1 # "{{ env-api-version | default(v1) }}" baseDomain: ocpz.wsclab.endicott.ibm.com # "{{ env-baseDomain }}" compute: -- hyperthreading: Enabled - name: worker - replicas: 0 +- hyperthreading: Enabled # "{{ env-compute-hyperthreading | default(Enabled) }}" + name: worker # "{{ env-compute-name | default(worker) }}" + replicas: 0 # "{{ env-compute-count | default(0) }}" architecture : s390x # "{{ env-compute-arch | default(s390x) }}" controlPlane: - hyperthreading: Enabled - name: master + hyperthreading: Enabled # "{{ env-control-hyperthreading | default(Enabled) }}" + name: master # "{{ env-control-name | default(compute) }}" replicas: 3 # "{{ env-control-count | default(3) }}" architecture : s390x # "{{ env-control-arch | default(s390x) }}" metadata: name: distribution # "{{ env-metadata-name }}" networking: clusterNetwork: - - cidr: 10.128.0.0/14 # "{{ env-cidr | default(10.128.0.0/14)}}" + - cidr: 10.128.0.0/14 # "{{ env-cidr }}" hostPrefix: 23 # "{{ env-host-prefix | default(23) }}" networkType: OpenShiftSDN # "{{ env-network-type | default(OpenShiftSDN) }}" serviceNetwork: - - 172.30.0.0/16 # "{{ env-service-network | default(172.30.0.0/16) }}" + - 172.30.0.0/16 # "{{ env-service-network }}" platform: none: {} fips: false # "{{ env-fips | default(false) }}" pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' # "{{ env-pullSecret }}" sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' -# "{{ env-sshKey }}" +# "{{ env-sshKey }}" \ No newline at end of file diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index cddd772b..ce342bab 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,12 +1,9 @@ --- -- name: get host os family - vars: - host_os_family: "{{ ansible_facts['os_family'] }}" -- name: Installing required packages on Linux machine +- name: Installing required packages ansible.builtin.package: name: - - "{{ packages[0] }}" + - "{{ packages[0] | default(omit) }}" - "{{ packages[1] | default(omit) }}" - "{{ packages[2] | default(omit) }}" - "{{ packages[3] | default(omit) }}" @@ -14,21 +11,5 @@ - "{{ packages[5] | default(omit) }}" - "{{ packages[6] | default(omit) }}" - "{{ packages[7] | default(omit) }}" - state: present - update_cache: yes - when: host_os_family == "RedHat" or host_os_family == "Debian" - -- name: Installing required packages on Mac machine - community.general.homebrew: - name: - - "{{ packages[0] }}" - - "{{ packages[1] | default(omit) }}" - - "{{ packages[2] | default(omit) }}" - - "{{ packages[3] | default(omit) }}" - - "{{ packages[4] | default(omit) }}" - - "{{ packages[5] | default(omit) }}" - - "{{ packages[6] | default(omit) }}" - - "{{ packages[7] | default(omit) }}" - state: present - update_homebrew: yes - when: host_os_family == "Darwin" \ No newline at end of file + state: latest + update_cache: yes \ No newline at end of file diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml new file mode 100644 index 00000000..b79a29ab --- /dev/null +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -0,0 +1,10 @@ +--- + +- name: distribute the ssh key to a remote host + shell: "/usr/local/bin/sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p {{ ssh_target_ip }}.22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" + register: ssh_copy_id_execution + failed_when: ssh_copy_id_execution.rc != 0 + +- name: Print results of copying ssh id to remote host. + debug: + var: ssh_copy_id_execution diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index fa432f45..45188412 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -25,18 +25,13 @@ debug: var: ssh_directory_creation -<<<<<<< HEAD - name: Check .ssh key pair files exist -======= -- name: check .ssh key pair files exist - tags: keymastr ->>>>>>> f8ddfa644dc9cf079d17ded0062d76b22c93cd97 stat: path: "~/.ssh/{{item}}" register: ssh_key_file_exists_check with_items: - - "{{ssh_key_filename}}" - - "{{ssh_key_filename}}.pub" + - "{{env-ssh-ans-name}}" + - "{{env-ssh-ans-name}}.pub" - name: Print results of ssh key pair files check tags: keymastr @@ -46,7 +41,7 @@ - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key tags: keymastr community.crypto.openssh_keypair: - path: ~/.ssh/{{ ssh_key_filename }} + path: ~/.ssh/{{ env-ssh-ans-name }} passphrase: "" register: ssh_key_creation failed_when: ssh_key_creation.rc != 0 @@ -61,7 +56,7 @@ tags: keymastr lineinfile: path: ansible.cfg - line: "private_key_file = ~/.ssh/{{ssh_key_filename}}" + line: "private_key_file = ~/.ssh/{{env-ssh-ans-name}}" state: present backup: yes register: ssh_config_file_key_addition From eb6c4f830f6d40e37e1acf7b2564a6dd6c6cf48b Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 19 Aug 2021 07:36:24 -0500 Subject: [PATCH 285/885] Fixed bugs related to the implementation of variables. --- README.md | 8 ++++-- env.yaml | 2 +- main.yaml | 5 ++-- roles/ansible_setup/tasks/main.yaml | 35 ++++++++++++--------------- roles/enable_packages/tasks/main.yaml | 4 +-- roles/ssh_key_gen/tasks/main.yaml | 8 +++--- 6 files changed, 30 insertions(+), 32 deletions(-) diff --git a/README.md b/README.md index b36b4264..3496056f 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,12 @@ Supported operating systems for the localhost (the starting workstation) are: - Unix and Unix-like (i.e. MacOS X) Pre-requisites: -- Python3 intalled on your local computer (how-to: https://realpython.com/installing-python/) -- Ansible installed on your local computer (how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) +- Python3 intalled on your local computer ( how-to: https://realpython.com/installing-python/ ) +- Ansible installed on your local computer ( how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html ) +- If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: + - homebrew package manager installed ( how-to: https://brew.sh/ ) + - Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) + - sshpass package ( run "brew install esolitos/ipa/sshpass" in your terminal ) - A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: - 6 Integrated Facilities for Linux (IFLs) - 75 GB of RAM diff --git a/env.yaml b/env.yaml index 8bd2c3d7..59b2b7e7 100644 --- a/env.yaml +++ b/env.yaml @@ -14,7 +14,7 @@ env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2 env_sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' # to fill inventory -env_ip_kvm_host: 9.60.87.132 +env_ip_kvm_host: 9.60.87.132 env_ip_bastion: 9.60.87.139 env_ip_bootstrap: 9.60.87.133 env_ip_control_0: 9.60.87.136 diff --git a/main.yaml b/main.yaml index ef0ab037..a4cf5cfb 100644 --- a/main.yaml +++ b/main.yaml @@ -6,11 +6,9 @@ vars_files: - env.yaml vars: - - packages: [ 'ssh-keygen', 'ssh-copy-id', ] - ssh_target_ip: "{{ env_ip_kvm_host }}" roles: - ansible_setup - - install_packages - ssh_key_gen - ssh_copy_id @@ -41,7 +39,8 @@ - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] roles: - check_ssh - - firewall + - install_packages + - set_firewall - set_selinux_permissive - enable_packages - dns diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index e7a96d40..59988ba3 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,8 +1,17 @@ --- -- name: Collect only facts returned by facter +- name: Ansible generic setup ansible.builtin.setup: +- name: install ansible.community.general collection for use later + command: ansible-galaxy collection install community.general + +- name: install ansible.community.crypto collection for use later + command: ansible-galaxy collection install community.crypto + +- name: install ansible.posix collection for use later + command: ansible-galaxy collection install ansible.posix + - name: Load in variables from env.yaml include_vars: env.yaml @@ -70,15 +79,7 @@ - name: check inventory setup command: ansible-inventory --list register: inv_check - failed_when: "inv.check.rc != 0" - -- name: install homebrew package manager if localhost is running Mac OS X and doesn't already have it. - command: /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" - when: ansible_os_family == 'Darwin' - -- name: Install sshpass via homebrew, using github workaround because the makers of homebrew block sshpass download - command: brew install http://git.io/sshpass.rb - when: ansible_os_family == 'Darwin' + failed_when: "inv_check.rc != 0" - name: Install ssh packages on Linux localhost machines ansible.builtin.package: @@ -88,19 +89,13 @@ - ssh-copy-id state: latest update_cache: yes - when: host_os_family == "RedHat" or host_os_family == "Debian" + when: ansible_os_family == "RedHat" or ansible_os_family == "Debian" - name: Install ssh packages on Mac machine via homebrew community.general.homebrew: name: - openssh - ssh-copy-id - state: latest - update_homebrew: yes - when: host_os_family == "Darwin" - -- name: install ansible.community.general collection for use later - command: ansible-galaxy collection install community.general - -- name: install ansible.community.crypto collection for use later - command: ansible-galaxy collection install community.crypto \ No newline at end of file + state: latest + update_homebrew: yes + when: ansible_os_family == "Darwin" \ No newline at end of file diff --git a/roles/enable_packages/tasks/main.yaml b/roles/enable_packages/tasks/main.yaml index d8a4eca2..e1caacbe 100644 --- a/roles/enable_packages/tasks/main.yaml +++ b/roles/enable_packages/tasks/main.yaml @@ -2,7 +2,7 @@ - name: enable packages ansible.builtin.service: - name: + name: - "{{ packages[0] | default(omit) }}" - "{{ packages[1] | default(omit) }}" - "{{ packages[2] | default(omit) }}" @@ -11,4 +11,4 @@ - "{{ packages[5] | default(omit) }}" - "{{ packages[6] | default(omit) }}" - "{{ packages[7] | default(omit) }}" - state: enabled \ No newline at end of file + state: enabled \ No newline at end of file diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index 45188412..562b7091 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -30,8 +30,8 @@ path: "~/.ssh/{{item}}" register: ssh_key_file_exists_check with_items: - - "{{env-ssh-ans-name}}" - - "{{env-ssh-ans-name}}.pub" + - "{{env_ssh_ans_name}}" + - "{{env_ssh_ans_name}}.pub" - name: Print results of ssh key pair files check tags: keymastr @@ -41,7 +41,7 @@ - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key tags: keymastr community.crypto.openssh_keypair: - path: ~/.ssh/{{ env-ssh-ans-name }} + path: ~/.ssh/{{ env_ssh_ans_name }} passphrase: "" register: ssh_key_creation failed_when: ssh_key_creation.rc != 0 @@ -56,7 +56,7 @@ tags: keymastr lineinfile: path: ansible.cfg - line: "private_key_file = ~/.ssh/{{env-ssh-ans-name}}" + line: "private_key_file = ~/.ssh/{{env_ssh_ans_name}}" state: present backup: yes register: ssh_config_file_key_addition From 535d86a74a63f3c4912a1c87e3d9a44b0f4837b6 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 19 Aug 2021 07:36:24 -0500 Subject: [PATCH 286/885] Fixed bugs related to the implementation of variables. --- README.md | 8 ++++-- main.yaml | 5 ++-- roles/ansible_setup/tasks/main.yaml | 35 ++++++++++++--------------- roles/enable_packages/tasks/main.yaml | 4 +-- roles/ssh_key_gen/tasks/main.yaml | 8 +++--- 5 files changed, 29 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index b36b4264..3496056f 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,12 @@ Supported operating systems for the localhost (the starting workstation) are: - Unix and Unix-like (i.e. MacOS X) Pre-requisites: -- Python3 intalled on your local computer (how-to: https://realpython.com/installing-python/) -- Ansible installed on your local computer (how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) +- Python3 intalled on your local computer ( how-to: https://realpython.com/installing-python/ ) +- Ansible installed on your local computer ( how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html ) +- If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: + - homebrew package manager installed ( how-to: https://brew.sh/ ) + - Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) + - sshpass package ( run "brew install esolitos/ipa/sshpass" in your terminal ) - A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: - 6 Integrated Facilities for Linux (IFLs) - 75 GB of RAM diff --git a/main.yaml b/main.yaml index ef0ab037..a4cf5cfb 100644 --- a/main.yaml +++ b/main.yaml @@ -6,11 +6,9 @@ vars_files: - env.yaml vars: - - packages: [ 'ssh-keygen', 'ssh-copy-id', ] - ssh_target_ip: "{{ env_ip_kvm_host }}" roles: - ansible_setup - - install_packages - ssh_key_gen - ssh_copy_id @@ -41,7 +39,8 @@ - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] roles: - check_ssh - - firewall + - install_packages + - set_firewall - set_selinux_permissive - enable_packages - dns diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index e7a96d40..59988ba3 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,8 +1,17 @@ --- -- name: Collect only facts returned by facter +- name: Ansible generic setup ansible.builtin.setup: +- name: install ansible.community.general collection for use later + command: ansible-galaxy collection install community.general + +- name: install ansible.community.crypto collection for use later + command: ansible-galaxy collection install community.crypto + +- name: install ansible.posix collection for use later + command: ansible-galaxy collection install ansible.posix + - name: Load in variables from env.yaml include_vars: env.yaml @@ -70,15 +79,7 @@ - name: check inventory setup command: ansible-inventory --list register: inv_check - failed_when: "inv.check.rc != 0" - -- name: install homebrew package manager if localhost is running Mac OS X and doesn't already have it. - command: /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" - when: ansible_os_family == 'Darwin' - -- name: Install sshpass via homebrew, using github workaround because the makers of homebrew block sshpass download - command: brew install http://git.io/sshpass.rb - when: ansible_os_family == 'Darwin' + failed_when: "inv_check.rc != 0" - name: Install ssh packages on Linux localhost machines ansible.builtin.package: @@ -88,19 +89,13 @@ - ssh-copy-id state: latest update_cache: yes - when: host_os_family == "RedHat" or host_os_family == "Debian" + when: ansible_os_family == "RedHat" or ansible_os_family == "Debian" - name: Install ssh packages on Mac machine via homebrew community.general.homebrew: name: - openssh - ssh-copy-id - state: latest - update_homebrew: yes - when: host_os_family == "Darwin" - -- name: install ansible.community.general collection for use later - command: ansible-galaxy collection install community.general - -- name: install ansible.community.crypto collection for use later - command: ansible-galaxy collection install community.crypto \ No newline at end of file + state: latest + update_homebrew: yes + when: ansible_os_family == "Darwin" \ No newline at end of file diff --git a/roles/enable_packages/tasks/main.yaml b/roles/enable_packages/tasks/main.yaml index d8a4eca2..e1caacbe 100644 --- a/roles/enable_packages/tasks/main.yaml +++ b/roles/enable_packages/tasks/main.yaml @@ -2,7 +2,7 @@ - name: enable packages ansible.builtin.service: - name: + name: - "{{ packages[0] | default(omit) }}" - "{{ packages[1] | default(omit) }}" - "{{ packages[2] | default(omit) }}" @@ -11,4 +11,4 @@ - "{{ packages[5] | default(omit) }}" - "{{ packages[6] | default(omit) }}" - "{{ packages[7] | default(omit) }}" - state: enabled \ No newline at end of file + state: enabled \ No newline at end of file diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index 45188412..562b7091 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -30,8 +30,8 @@ path: "~/.ssh/{{item}}" register: ssh_key_file_exists_check with_items: - - "{{env-ssh-ans-name}}" - - "{{env-ssh-ans-name}}.pub" + - "{{env_ssh_ans_name}}" + - "{{env_ssh_ans_name}}.pub" - name: Print results of ssh key pair files check tags: keymastr @@ -41,7 +41,7 @@ - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key tags: keymastr community.crypto.openssh_keypair: - path: ~/.ssh/{{ env-ssh-ans-name }} + path: ~/.ssh/{{ env_ssh_ans_name }} passphrase: "" register: ssh_key_creation failed_when: ssh_key_creation.rc != 0 @@ -56,7 +56,7 @@ tags: keymastr lineinfile: path: ansible.cfg - line: "private_key_file = ~/.ssh/{{env-ssh-ans-name}}" + line: "private_key_file = ~/.ssh/{{env_ssh_ans_name}}" state: present backup: yes register: ssh_config_file_key_addition From 47a683103b565df739e78fc4f48406582c5bb09a Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 19 Aug 2021 11:37:51 -0500 Subject: [PATCH 287/885] Updating Readme for readability --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 3496056f..20bcd449 100644 --- a/README.md +++ b/README.md @@ -2,12 +2,12 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method. Supported operating systems for the localhost (the starting workstation) are: -- Linux (RedHat and Debian families) -- Unix and Unix-like (i.e. MacOS X) +* Linux (RedHat and Debian families) +* Unix and Unix-like (i.e. MacOS X) Pre-requisites: -- Python3 intalled on your local computer ( how-to: https://realpython.com/installing-python/ ) -- Ansible installed on your local computer ( how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html ) +- Python3 intalled on your local computer [how-to:] ( https://realpython.com/installing-python/ ) +- Ansible installed on your local computer [how-to:] ( https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html ) - If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: - homebrew package manager installed ( how-to: https://brew.sh/ ) - Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) From bf7a8861384152a2083fc72fed527007179bc1d2 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 19 Aug 2021 11:37:51 -0500 Subject: [PATCH 288/885] Updating Readme for readability --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 3496056f..20bcd449 100644 --- a/README.md +++ b/README.md @@ -2,12 +2,12 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method. Supported operating systems for the localhost (the starting workstation) are: -- Linux (RedHat and Debian families) -- Unix and Unix-like (i.e. MacOS X) +* Linux (RedHat and Debian families) +* Unix and Unix-like (i.e. MacOS X) Pre-requisites: -- Python3 intalled on your local computer ( how-to: https://realpython.com/installing-python/ ) -- Ansible installed on your local computer ( how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html ) +- Python3 intalled on your local computer [how-to:] ( https://realpython.com/installing-python/ ) +- Ansible installed on your local computer [how-to:] ( https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html ) - If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: - homebrew package manager installed ( how-to: https://brew.sh/ ) - Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) From 61eedd0656d41547b893fd50058cf0b5e0abb821 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 19 Aug 2021 11:39:36 -0500 Subject: [PATCH 289/885] Updating Readme for readability --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 20bcd449..c6bac09b 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,8 @@ Supported operating systems for the localhost (the starting workstation) are: * Unix and Unix-like (i.e. MacOS X) Pre-requisites: -- Python3 intalled on your local computer [how-to:] ( https://realpython.com/installing-python/ ) -- Ansible installed on your local computer [how-to:] ( https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html ) +- Python3 intalled on your local computer [how-to:] (https://realpython.com/installing-python/) +- Ansible installed on your local computer [how-to:] (https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) - If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: - homebrew package manager installed ( how-to: https://brew.sh/ ) - Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) From ff21c05705f96a249191713cb4be2aed6136410d Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 19 Aug 2021 11:39:36 -0500 Subject: [PATCH 290/885] Updating Readme for readability --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 20bcd449..c6bac09b 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,8 @@ Supported operating systems for the localhost (the starting workstation) are: * Unix and Unix-like (i.e. MacOS X) Pre-requisites: -- Python3 intalled on your local computer [how-to:] ( https://realpython.com/installing-python/ ) -- Ansible installed on your local computer [how-to:] ( https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html ) +- Python3 intalled on your local computer [how-to:] (https://realpython.com/installing-python/) +- Ansible installed on your local computer [how-to:] (https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) - If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: - homebrew package manager installed ( how-to: https://brew.sh/ ) - Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) From 03be8ac2b7e568542e996e732a0f201a3ef68d04 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 19 Aug 2021 11:41:21 -0500 Subject: [PATCH 291/885] Updating Readme for readability --- README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index c6bac09b..f971a4c2 100644 --- a/README.md +++ b/README.md @@ -8,17 +8,17 @@ Supported operating systems for the localhost (the starting workstation) are: Pre-requisites: - Python3 intalled on your local computer [how-to:] (https://realpython.com/installing-python/) - Ansible installed on your local computer [how-to:] (https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) -- If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: - - homebrew package manager installed ( how-to: https://brew.sh/ ) - - Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) - - sshpass package ( run "brew install esolitos/ipa/sshpass" in your terminal ) +* If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: + * homebrew package manager installed ( how-to: https://brew.sh/ ) + * Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) + * sshpass package ( run "brew install esolitos/ipa/sshpass" in your terminal ) - A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: - - 6 Integrated Facilities for Linux (IFLs) - - 75 GB of RAM - - 1 TB of disk space -- On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed -- On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses -- Fully Qualified Domain Names (FQDN) names for all IPv4 addresses + * 6 Integrated Facilities for Linux (IFLs) + * 75 GB of RAM + * 1 TB of disk space +* On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed +* On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses +* Fully Qualified Domain Names (FQDN) names for all IPv4 addresses When you are ready: Step 1: Download this Git repository to a folder on your local computer From c7c13c23f7268073a0592ec0c8fd224381e336c1 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 19 Aug 2021 11:41:21 -0500 Subject: [PATCH 292/885] Updating Readme for readability --- README.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index c6bac09b..f971a4c2 100644 --- a/README.md +++ b/README.md @@ -8,17 +8,17 @@ Supported operating systems for the localhost (the starting workstation) are: Pre-requisites: - Python3 intalled on your local computer [how-to:] (https://realpython.com/installing-python/) - Ansible installed on your local computer [how-to:] (https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) -- If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: - - homebrew package manager installed ( how-to: https://brew.sh/ ) - - Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) - - sshpass package ( run "brew install esolitos/ipa/sshpass" in your terminal ) +* If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: + * homebrew package manager installed ( how-to: https://brew.sh/ ) + * Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) + * sshpass package ( run "brew install esolitos/ipa/sshpass" in your terminal ) - A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: - - 6 Integrated Facilities for Linux (IFLs) - - 75 GB of RAM - - 1 TB of disk space -- On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed -- On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses -- Fully Qualified Domain Names (FQDN) names for all IPv4 addresses + * 6 Integrated Facilities for Linux (IFLs) + * 75 GB of RAM + * 1 TB of disk space +* On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed +* On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses +* Fully Qualified Domain Names (FQDN) names for all IPv4 addresses When you are ready: Step 1: Download this Git repository to a folder on your local computer From 93a35e7083e5aa5783d894bb744a758d0b501f27 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 19 Aug 2021 11:43:23 -0500 Subject: [PATCH 293/885] Updating Readme for readability --- README.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index f971a4c2..ae0fd3d7 100644 --- a/README.md +++ b/README.md @@ -6,13 +6,13 @@ Supported operating systems for the localhost (the starting workstation) are: * Unix and Unix-like (i.e. MacOS X) Pre-requisites: -- Python3 intalled on your local computer [how-to:] (https://realpython.com/installing-python/) -- Ansible installed on your local computer [how-to:] (https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) +* Python3 intalled on your local computer [how-to:] (https://realpython.com/installing-python/) +* Ansible installed on your local computer [how-to:] (https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) * If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: * homebrew package manager installed ( how-to: https://brew.sh/ ) * Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) * sshpass package ( run "brew install esolitos/ipa/sshpass" in your terminal ) -- A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: +* A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: * 6 Integrated Facilities for Linux (IFLs) * 75 GB of RAM * 1 TB of disk space @@ -21,20 +21,20 @@ Pre-requisites: * Fully Qualified Domain Names (FQDN) names for all IPv4 addresses When you are ready: -Step 1: Download this Git repository to a folder on your local computer -Step 2: Fill out the required variables for your specific installation in the env.yaml file -Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in roles/dns/files folder. -Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: +* Step 1: Download this Git repository to a folder on your local computer +* Step 2: Fill out the required variables for your specific installation in the env.yaml file +* Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in roles/dns/files folder. +* Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" -Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. -Step 6: When create_bastion playbook runs, open cockpit at < URL > and complete installation with these options: +* Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. +* Step 6: When create_bastion playbook runs, open cockpit at < URL > and complete installation with these options: - list options here - list options here - list options here -Step 7: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. -Step 8: approve certs... need more detail -Step 9: Shutdown and destroy bootstrap (or optionally convert bootstrap to worker node) -Step 8: Verify installation by running: +* Step 7: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. +* Step 8: approve certs... need more detail +* Step 9: Shutdown and destroy bootstrap (or optionally convert bootstrap to worker node) +* Step 8: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" Step 9: From 2e9eb95813214abe88cd83ad4560fbbf644bcc8e Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 19 Aug 2021 11:43:23 -0500 Subject: [PATCH 294/885] Updating Readme for readability --- README.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index f971a4c2..ae0fd3d7 100644 --- a/README.md +++ b/README.md @@ -6,13 +6,13 @@ Supported operating systems for the localhost (the starting workstation) are: * Unix and Unix-like (i.e. MacOS X) Pre-requisites: -- Python3 intalled on your local computer [how-to:] (https://realpython.com/installing-python/) -- Ansible installed on your local computer [how-to:] (https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) +* Python3 intalled on your local computer [how-to:] (https://realpython.com/installing-python/) +* Ansible installed on your local computer [how-to:] (https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) * If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: * homebrew package manager installed ( how-to: https://brew.sh/ ) * Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) * sshpass package ( run "brew install esolitos/ipa/sshpass" in your terminal ) -- A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: +* A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: * 6 Integrated Facilities for Linux (IFLs) * 75 GB of RAM * 1 TB of disk space @@ -21,20 +21,20 @@ Pre-requisites: * Fully Qualified Domain Names (FQDN) names for all IPv4 addresses When you are ready: -Step 1: Download this Git repository to a folder on your local computer -Step 2: Fill out the required variables for your specific installation in the env.yaml file -Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in roles/dns/files folder. -Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: +* Step 1: Download this Git repository to a folder on your local computer +* Step 2: Fill out the required variables for your specific installation in the env.yaml file +* Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in roles/dns/files folder. +* Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" -Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. -Step 6: When create_bastion playbook runs, open cockpit at < URL > and complete installation with these options: +* Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. +* Step 6: When create_bastion playbook runs, open cockpit at < URL > and complete installation with these options: - list options here - list options here - list options here -Step 7: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. -Step 8: approve certs... need more detail -Step 9: Shutdown and destroy bootstrap (or optionally convert bootstrap to worker node) -Step 8: Verify installation by running: +* Step 7: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. +* Step 8: approve certs... need more detail +* Step 9: Shutdown and destroy bootstrap (or optionally convert bootstrap to worker node) +* Step 8: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" Step 9: From b35f479103a13cb66d8c4367b0f69bc63adc1922 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 19 Aug 2021 11:45:01 -0500 Subject: [PATCH 295/885] Updating Readme for readability --- README.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index ae0fd3d7..73d713ea 100644 --- a/README.md +++ b/README.md @@ -39,17 +39,17 @@ When you are ready: Step 9: Tags: -bastion = configuration of bastion for OCP -keymastr = ssh key configuration and testing -bastionvm = creation of Bastion KVM guest -boostrap = creation of Boostrap KVM guest -compute = creation of the Compute nodes KVM guests (minimum 2) -control = creation of the Control nodes KVM guests (minimum 3) -dns = configuration of dns server on bastion -setocp = download of OCP installer and http server configuration -haproxy = configuration of haproxy on bastion kvm guest -httpconf = configuration of httpd server on bastion kvm guest -kvmhost = tasks to apply to KVM host for OCP cluster -localhost = for tasks that apply to the local machine running Ansible -firewall = for tasks related to firewall settings -selinux = for tasks related to SELinux settings \ No newline at end of file +* bastion = configuration of bastion for OCP +* keymastr = ssh key configuration and testing +* bastionvm = creation of Bastion KVM guest +* boostrap = creation of Boostrap KVM guest +* compute = creation of the Compute nodes KVM guests (minimum 2) +* control = creation of the Control nodes KVM guests (minimum 3) +* dns = configuration of dns server on bastion +* setocp = download of OCP installer and http server configuration +* haproxy = configuration of haproxy on bastion kvm guest +* httpconf = configuration of httpd server on bastion kvm guest +* kvmhost = tasks to apply to KVM host for OCP cluster +* localhost = for tasks that apply to the local machine running Ansible +* firewall = for tasks related to firewall settings +* selinux = for tasks related to SELinux settings From 518fd28a7a201a5dbadbf0a19d71b8be8778d6b1 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 19 Aug 2021 11:45:01 -0500 Subject: [PATCH 296/885] Updating Readme for readability --- README.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index ae0fd3d7..73d713ea 100644 --- a/README.md +++ b/README.md @@ -39,17 +39,17 @@ When you are ready: Step 9: Tags: -bastion = configuration of bastion for OCP -keymastr = ssh key configuration and testing -bastionvm = creation of Bastion KVM guest -boostrap = creation of Boostrap KVM guest -compute = creation of the Compute nodes KVM guests (minimum 2) -control = creation of the Control nodes KVM guests (minimum 3) -dns = configuration of dns server on bastion -setocp = download of OCP installer and http server configuration -haproxy = configuration of haproxy on bastion kvm guest -httpconf = configuration of httpd server on bastion kvm guest -kvmhost = tasks to apply to KVM host for OCP cluster -localhost = for tasks that apply to the local machine running Ansible -firewall = for tasks related to firewall settings -selinux = for tasks related to SELinux settings \ No newline at end of file +* bastion = configuration of bastion for OCP +* keymastr = ssh key configuration and testing +* bastionvm = creation of Bastion KVM guest +* boostrap = creation of Boostrap KVM guest +* compute = creation of the Compute nodes KVM guests (minimum 2) +* control = creation of the Control nodes KVM guests (minimum 3) +* dns = configuration of dns server on bastion +* setocp = download of OCP installer and http server configuration +* haproxy = configuration of haproxy on bastion kvm guest +* httpconf = configuration of httpd server on bastion kvm guest +* kvmhost = tasks to apply to KVM host for OCP cluster +* localhost = for tasks that apply to the local machine running Ansible +* firewall = for tasks related to firewall settings +* selinux = for tasks related to SELinux settings From f4ea9edc8f02320efba4b6fef53d8b628255ea94 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 19 Aug 2021 12:32:14 -0500 Subject: [PATCH 297/885] Fixed bugs related to the implementation of variables --- README.md | 1 + ansible.cfg | 8 +++-- group_vars/all/main.yaml | 40 +++++++++++++++++++++++ inventory | 23 ++++++++++++++ inventory.yaml | 20 ------------ main.yaml | 3 ++ roles/ansible_setup/tasks/main.yaml | 49 +++++++++++++++++++++++++++-- roles/ssh_copy_id/tasks/main.yaml | 2 +- 8 files changed, 119 insertions(+), 27 deletions(-) create mode 100644 group_vars/all/main.yaml create mode 100755 inventory delete mode 100644 inventory.yaml diff --git a/README.md b/README.md index 73d713ea..a21c50a4 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,7 @@ When you are ready: Step 9: Tags: +* setup = first-time setup of ansible * bastion = configuration of bastion for OCP * keymastr = ssh key configuration and testing * bastionvm = creation of Bastion KVM guest diff --git a/ansible.cfg b/ansible.cfg index 70f980f1..e2b3c25f 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,5 +1,7 @@ [defaults] -inventory = inventory.yaml +inventory = inventory private_key_file = ~/.ssh/ansible - - +# BEGIN ANSIBLE MANAGED BLOCK +[inventory] +cache=True +# END ANSIBLE MANAGED BLOCK diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml new file mode 100644 index 00000000..cdfa933b --- /dev/null +++ b/group_vars/all/main.yaml @@ -0,0 +1,40 @@ +# will populate with ansible_setup playbook +# BEGIN ANSIBLE MANAGED BLOCK + +# to populate install_config +env_baseDomain: ocpz.wsclab.endicott.ibm.com +env_compute_arch: s390x #default to s390x +env_control_count: 3 #default 3 +env_control_arch: s390x #default s390x +env_metadata_name: distribution +env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now +env_host_prefix: 23 #default 23 for now +env_network_type: OpenShiftSDN #set default OpenShiftSDN +env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 +env_fips: false #true or false, set default false +env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +env_sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' + +# to fill inventory +env_ip_kvm_host: 9.60.87.132 +env_ip_bastion: 9.60.87.139 +env_ip_bootstrap: 9.60.87.133 +env_ip_control_0: 9.60.87.136 +env_ip_control_1: 9.60.87.137 +env_ip_control_2: 9.60.87.138 +env_ip_compute_0: 9.60.87.134 +env_ip_compute_1: 9.60.87.135 + +# ssh +env_ssh_username: jacob #Username to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_pass: ibmzrocks #Password to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. + +# Ansible passwordless ssh +env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible +env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) + +# networking +dns_nameserver: 9.60.87.139 +default_gateway: 9.60.86.1 +netmask: 255.255.254.0 +# END ANSIBLE MANAGED BLOCK diff --git a/inventory b/inventory new file mode 100755 index 00000000..0e672395 --- /dev/null +++ b/inventory @@ -0,0 +1,23 @@ +# will populate from ansible_setup playbook + +[localhost] +127.0.0.1 +# BEGIN ANSIBLE MANAGED BLOCK +[kvm_host] +9.60.87.132 + +[bastion] +9.60.87.139 + +[bootstrap] +9.60.87.133 + +[control_nodes] +9.60.87.136 +9.60.87.137 +9.60.87.138 + +[compute_nodes] +9.60.87.134 +9.60.87.135 +# END ANSIBLE MANAGED BLOCK diff --git a/inventory.yaml b/inventory.yaml deleted file mode 100644 index 017c9f58..00000000 --- a/inventory.yaml +++ /dev/null @@ -1,20 +0,0 @@ -all: - children: - kvm_host: - hosts: - "{{ env_ip_kvm_host }}": - bastion: - hosts: - "{{ env_ip_bastion }}": - bootstrap: - hosts: - "{{ env_ip_bootstrap }}": - control_nodes: - hosts: - "{{ env_ip_control_0 }}": - "{{ env_ip_control_1 }}": - "{{ env_ip_control_2 }}": - compute_nodes: - hosts: - "{{ env_ip_compute_0 }}": - "{{ env_ip_compute_1 }}": \ No newline at end of file diff --git a/main.yaml b/main.yaml index a4cf5cfb..ac268345 100644 --- a/main.yaml +++ b/main.yaml @@ -3,6 +3,7 @@ - hosts: localhost connection: local become: false + gather_facts: no vars_files: - env.yaml vars: @@ -14,6 +15,8 @@ - hosts: kvm_host become: true + vars_files: + - env.yaml vars: - packages: [ 'libvirt', 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img' ] roles: diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 59988ba3..c2ed474a 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,21 +1,23 @@ --- -- name: Ansible generic setup - ansible.builtin.setup: - - name: install ansible.community.general collection for use later + tags: setup command: ansible-galaxy collection install community.general - name: install ansible.community.crypto collection for use later + tags: setup command: ansible-galaxy collection install community.crypto - name: install ansible.posix collection for use later + tags: setup command: ansible-galaxy collection install ansible.posix - name: Load in variables from env.yaml + tags: setup include_vars: env.yaml - name: Check for any undefined user-input variables in env.yaml. Fail if true. + tags: setup fail: msg: "Required variable {{item}} has not been provided in env.yaml file." when: vars[item] is undefined @@ -46,6 +48,7 @@ - env_ssh_ans_pass - name: Set facts from env.yaml so they can be used in other playbooks + tags: setup set_fact: env_baseDomain: "{{ env_baseDomain }}" env_compute_arch: "{{ env_compute_arch }}" @@ -76,12 +79,51 @@ netmask: "{{ netmask }}" cacheable: yes +- name: Add the contents of user-input variables from env.yaml to group_vars/all folder to persist across hosts + tags: setup + blockinfile: + path: group_vars/all/main.yaml + block: "{{ lookup('file', 'env.yaml') }}" + state: present + backup: yes + +- name: Populate inventory file with ip variables from env.yaml + tags: setup + blockinfile: + path: inventory + block: | + [kvm_host] + {{ env_ip_kvm_host }} + + [bastion] + {{ env_ip_bastion }} + + [bootstrap] + {{ env_ip_bootstrap }} + + [control_nodes] + {{ env_ip_control_0 }} + {{ env_ip_control_1 }} + {{ env_ip_control_2 }} + + [compute_nodes] + {{ env_ip_compute_0 }} + {{ env_ip_compute_1 }} + state: present + backup: yes + - name: check inventory setup + tags: setup command: ansible-inventory --list register: inv_check failed_when: "inv_check.rc != 0" +- name: Ansible generic setup to re-read inventory file after populated in previous tasks + tags: setup + ansible.builtin.gather_facts: + - name: Install ssh packages on Linux localhost machines + tags: setup ansible.builtin.package: name: - sshpass @@ -92,6 +134,7 @@ when: ansible_os_family == "RedHat" or ansible_os_family == "Debian" - name: Install ssh packages on Mac machine via homebrew + tags: setup community.general.homebrew: name: - openssh diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index b79a29ab..9916d57d 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: distribute the ssh key to a remote host - shell: "/usr/local/bin/sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p {{ ssh_target_ip }}.22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" + shell: "/usr/local/bin/sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 From 03d483713e971093515fcbebde938f6a252283d9 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 19 Aug 2021 12:32:14 -0500 Subject: [PATCH 298/885] Fixed bugs related to the implementation of variables --- README.md | 1 + ansible.cfg | 8 +++-- group_vars/all/main.yaml | 40 +++++++++++++++++++++++ inventory | 23 ++++++++++++++ inventory.yaml | 20 ------------ main.yaml | 3 ++ roles/ansible_setup/tasks/main.yaml | 49 +++++++++++++++++++++++++++-- roles/ssh_copy_id/tasks/main.yaml | 2 +- 8 files changed, 119 insertions(+), 27 deletions(-) create mode 100644 group_vars/all/main.yaml create mode 100755 inventory delete mode 100644 inventory.yaml diff --git a/README.md b/README.md index 73d713ea..a21c50a4 100644 --- a/README.md +++ b/README.md @@ -39,6 +39,7 @@ When you are ready: Step 9: Tags: +* setup = first-time setup of ansible * bastion = configuration of bastion for OCP * keymastr = ssh key configuration and testing * bastionvm = creation of Bastion KVM guest diff --git a/ansible.cfg b/ansible.cfg index 70f980f1..e2b3c25f 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,5 +1,7 @@ [defaults] -inventory = inventory.yaml +inventory = inventory private_key_file = ~/.ssh/ansible - - +# BEGIN ANSIBLE MANAGED BLOCK +[inventory] +cache=True +# END ANSIBLE MANAGED BLOCK diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml new file mode 100644 index 00000000..cdfa933b --- /dev/null +++ b/group_vars/all/main.yaml @@ -0,0 +1,40 @@ +# will populate with ansible_setup playbook +# BEGIN ANSIBLE MANAGED BLOCK + +# to populate install_config +env_baseDomain: ocpz.wsclab.endicott.ibm.com +env_compute_arch: s390x #default to s390x +env_control_count: 3 #default 3 +env_control_arch: s390x #default s390x +env_metadata_name: distribution +env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now +env_host_prefix: 23 #default 23 for now +env_network_type: OpenShiftSDN #set default OpenShiftSDN +env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 +env_fips: false #true or false, set default false +env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +env_sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' + +# to fill inventory +env_ip_kvm_host: 9.60.87.132 +env_ip_bastion: 9.60.87.139 +env_ip_bootstrap: 9.60.87.133 +env_ip_control_0: 9.60.87.136 +env_ip_control_1: 9.60.87.137 +env_ip_control_2: 9.60.87.138 +env_ip_compute_0: 9.60.87.134 +env_ip_compute_1: 9.60.87.135 + +# ssh +env_ssh_username: jacob #Username to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_pass: ibmzrocks #Password to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. + +# Ansible passwordless ssh +env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible +env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) + +# networking +dns_nameserver: 9.60.87.139 +default_gateway: 9.60.86.1 +netmask: 255.255.254.0 +# END ANSIBLE MANAGED BLOCK diff --git a/inventory b/inventory new file mode 100755 index 00000000..0e672395 --- /dev/null +++ b/inventory @@ -0,0 +1,23 @@ +# will populate from ansible_setup playbook + +[localhost] +127.0.0.1 +# BEGIN ANSIBLE MANAGED BLOCK +[kvm_host] +9.60.87.132 + +[bastion] +9.60.87.139 + +[bootstrap] +9.60.87.133 + +[control_nodes] +9.60.87.136 +9.60.87.137 +9.60.87.138 + +[compute_nodes] +9.60.87.134 +9.60.87.135 +# END ANSIBLE MANAGED BLOCK diff --git a/inventory.yaml b/inventory.yaml deleted file mode 100644 index 017c9f58..00000000 --- a/inventory.yaml +++ /dev/null @@ -1,20 +0,0 @@ -all: - children: - kvm_host: - hosts: - "{{ env_ip_kvm_host }}": - bastion: - hosts: - "{{ env_ip_bastion }}": - bootstrap: - hosts: - "{{ env_ip_bootstrap }}": - control_nodes: - hosts: - "{{ env_ip_control_0 }}": - "{{ env_ip_control_1 }}": - "{{ env_ip_control_2 }}": - compute_nodes: - hosts: - "{{ env_ip_compute_0 }}": - "{{ env_ip_compute_1 }}": \ No newline at end of file diff --git a/main.yaml b/main.yaml index a4cf5cfb..ac268345 100644 --- a/main.yaml +++ b/main.yaml @@ -3,6 +3,7 @@ - hosts: localhost connection: local become: false + gather_facts: no vars_files: - env.yaml vars: @@ -14,6 +15,8 @@ - hosts: kvm_host become: true + vars_files: + - env.yaml vars: - packages: [ 'libvirt', 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img' ] roles: diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 59988ba3..c2ed474a 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,21 +1,23 @@ --- -- name: Ansible generic setup - ansible.builtin.setup: - - name: install ansible.community.general collection for use later + tags: setup command: ansible-galaxy collection install community.general - name: install ansible.community.crypto collection for use later + tags: setup command: ansible-galaxy collection install community.crypto - name: install ansible.posix collection for use later + tags: setup command: ansible-galaxy collection install ansible.posix - name: Load in variables from env.yaml + tags: setup include_vars: env.yaml - name: Check for any undefined user-input variables in env.yaml. Fail if true. + tags: setup fail: msg: "Required variable {{item}} has not been provided in env.yaml file." when: vars[item] is undefined @@ -46,6 +48,7 @@ - env_ssh_ans_pass - name: Set facts from env.yaml so they can be used in other playbooks + tags: setup set_fact: env_baseDomain: "{{ env_baseDomain }}" env_compute_arch: "{{ env_compute_arch }}" @@ -76,12 +79,51 @@ netmask: "{{ netmask }}" cacheable: yes +- name: Add the contents of user-input variables from env.yaml to group_vars/all folder to persist across hosts + tags: setup + blockinfile: + path: group_vars/all/main.yaml + block: "{{ lookup('file', 'env.yaml') }}" + state: present + backup: yes + +- name: Populate inventory file with ip variables from env.yaml + tags: setup + blockinfile: + path: inventory + block: | + [kvm_host] + {{ env_ip_kvm_host }} + + [bastion] + {{ env_ip_bastion }} + + [bootstrap] + {{ env_ip_bootstrap }} + + [control_nodes] + {{ env_ip_control_0 }} + {{ env_ip_control_1 }} + {{ env_ip_control_2 }} + + [compute_nodes] + {{ env_ip_compute_0 }} + {{ env_ip_compute_1 }} + state: present + backup: yes + - name: check inventory setup + tags: setup command: ansible-inventory --list register: inv_check failed_when: "inv_check.rc != 0" +- name: Ansible generic setup to re-read inventory file after populated in previous tasks + tags: setup + ansible.builtin.gather_facts: + - name: Install ssh packages on Linux localhost machines + tags: setup ansible.builtin.package: name: - sshpass @@ -92,6 +134,7 @@ when: ansible_os_family == "RedHat" or ansible_os_family == "Debian" - name: Install ssh packages on Mac machine via homebrew + tags: setup community.general.homebrew: name: - openssh diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index b79a29ab..9916d57d 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: distribute the ssh key to a remote host - shell: "/usr/local/bin/sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p {{ ssh_target_ip }}.22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" + shell: "/usr/local/bin/sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 From 11046cc2b76397e54c351fa0f56c5a2392b179a4 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 19 Aug 2021 13:31:28 -0500 Subject: [PATCH 299/885] Fixed bugs in the implementation of variables and tags. --- main.yaml | 4 ++-- roles/check_ssh/tasks/main.yaml | 4 ++-- roles/create_bastion/tasks/main.yaml | 15 ++++++++------- roles/enable_packages/defaults/main.yaml | 1 - roles/enable_packages/tasks/main.yaml | 10 ++-------- roles/install_packages/tasks/main.yaml | 1 + roles/mount_rhel/tasks/main.yaml | 5 +++++ roles/set_selinux_permissive/tasks/main.yaml | 2 +- 8 files changed, 21 insertions(+), 21 deletions(-) delete mode 100644 roles/enable_packages/defaults/main.yaml create mode 100644 roles/mount_rhel/tasks/main.yaml diff --git a/main.yaml b/main.yaml index ac268345..7912eff7 100644 --- a/main.yaml +++ b/main.yaml @@ -18,13 +18,14 @@ vars_files: - env.yaml vars: - - packages: [ 'libvirt', 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img' ] + - packages: [ 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img', 'libvirt' ] roles: - check_ssh - install_packages - set_selinux_permissive - enable_packages - macvtap + - mount_rhel - create_bastion - hosts: localhost @@ -45,7 +46,6 @@ - install_packages - set_firewall - set_selinux_permissive - - enable_packages - dns - haproxy - httpd diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index ce7d56bf..a1a0772b 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -1,12 +1,12 @@ --- - name: check ssh to remote hosts works - tags: keymastr + tags: keymastr, kvm_host shell: "hostname; id" register: ssh_connection_test failed_when: ssh_connection_test.rc != 0 - name: print the connectivity test results - tags: keymastr + tags: keymastr, kvm_host debug: var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index dd48d14e..41e3bcb4 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -22,11 +22,12 @@ # - name: virtualize bastion server # command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - - name: start bastion install - tags: bastionvm - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +- name: start bastion install + tags: kvm_host, bastionvm + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + regsiter: bastion_boot - - name: Pause 15 minutes for installation. Once you see the login prompt on the bastion's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. - tags: bastionvm - pause: - minutes: 60 \ No newline at end of file +- name: README - Pausing for 60 minutes for you to complete the bastion installation of rhel OS with your specific installation's requirements. Please go to your kvm host at to complete installation. Once you see the login prompt on the bastion's terminal, come back here and press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. + tags: kvm_host, bastionvm + pause: + minutes: 60 \ No newline at end of file diff --git a/roles/enable_packages/defaults/main.yaml b/roles/enable_packages/defaults/main.yaml deleted file mode 100644 index ccfaa59c..00000000 --- a/roles/enable_packages/defaults/main.yaml +++ /dev/null @@ -1 +0,0 @@ -packages[4]: "named" \ No newline at end of file diff --git a/roles/enable_packages/tasks/main.yaml b/roles/enable_packages/tasks/main.yaml index e1caacbe..7d87e5df 100644 --- a/roles/enable_packages/tasks/main.yaml +++ b/roles/enable_packages/tasks/main.yaml @@ -1,14 +1,8 @@ --- - name: enable packages + tags: kvm_host ansible.builtin.service: name: - - "{{ packages[0] | default(omit) }}" - - "{{ packages[1] | default(omit) }}" - - "{{ packages[2] | default(omit) }}" - - "{{ packages[3] | default(omit) }}" - - "{{ packages[4] | default(omit) }}" - - "{{ packages[5] | default(omit) }}" - - "{{ packages[6] | default(omit) }}" - - "{{ packages[7] | default(omit) }}" + - libvirtd state: enabled \ No newline at end of file diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index ce342bab..f335e3eb 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: Installing required packages + tags: kvm_host,bastion ansible.builtin.package: name: - "{{ packages[0] | default(omit) }}" diff --git a/roles/mount_rhel/tasks/main.yaml b/roles/mount_rhel/tasks/main.yaml new file mode 100644 index 00000000..b62d64ef --- /dev/null +++ b/roles/mount_rhel/tasks/main.yaml @@ -0,0 +1,5 @@ +--- + +- name: Mount red hat core os install directory + tags: kvm_host + command: mount -o loop /var/lib/libvirt/images/rhel83.iso /rhcos-install/ \ No newline at end of file diff --git a/roles/set_selinux_permissive/tasks/main.yaml b/roles/set_selinux_permissive/tasks/main.yaml index 71bd211a..8fd64c57 100644 --- a/roles/set_selinux_permissive/tasks/main.yaml +++ b/roles/set_selinux_permissive/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Put SELinux in permissive mode, logging actions that would be blocked. - tags: selinux,kvmhost,bastion + tags: selinux,kvm_host,bastion ansible.posix.selinux: policy: targeted state: permissive \ No newline at end of file From 1939dc4f32b4abbb1a9374f66323479b1bd7826b Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 19 Aug 2021 13:31:28 -0500 Subject: [PATCH 300/885] Fixed bugs in the implementation of variables and tags. --- main.yaml | 4 ++-- roles/check_ssh/tasks/main.yaml | 4 ++-- roles/create_bastion/tasks/main.yaml | 15 ++++++++------- roles/enable_packages/defaults/main.yaml | 1 - roles/enable_packages/tasks/main.yaml | 10 ++-------- roles/install_packages/tasks/main.yaml | 1 + roles/mount_rhel/tasks/main.yaml | 5 +++++ roles/set_selinux_permissive/tasks/main.yaml | 2 +- 8 files changed, 21 insertions(+), 21 deletions(-) delete mode 100644 roles/enable_packages/defaults/main.yaml create mode 100644 roles/mount_rhel/tasks/main.yaml diff --git a/main.yaml b/main.yaml index ac268345..7912eff7 100644 --- a/main.yaml +++ b/main.yaml @@ -18,13 +18,14 @@ vars_files: - env.yaml vars: - - packages: [ 'libvirt', 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img' ] + - packages: [ 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img', 'libvirt' ] roles: - check_ssh - install_packages - set_selinux_permissive - enable_packages - macvtap + - mount_rhel - create_bastion - hosts: localhost @@ -45,7 +46,6 @@ - install_packages - set_firewall - set_selinux_permissive - - enable_packages - dns - haproxy - httpd diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index ce7d56bf..a1a0772b 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -1,12 +1,12 @@ --- - name: check ssh to remote hosts works - tags: keymastr + tags: keymastr, kvm_host shell: "hostname; id" register: ssh_connection_test failed_when: ssh_connection_test.rc != 0 - name: print the connectivity test results - tags: keymastr + tags: keymastr, kvm_host debug: var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index dd48d14e..41e3bcb4 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -22,11 +22,12 @@ # - name: virtualize bastion server # command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - - name: start bastion install - tags: bastionvm - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole +- name: start bastion install + tags: kvm_host, bastionvm + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + regsiter: bastion_boot - - name: Pause 15 minutes for installation. Once you see the login prompt on the bastion's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. - tags: bastionvm - pause: - minutes: 60 \ No newline at end of file +- name: README - Pausing for 60 minutes for you to complete the bastion installation of rhel OS with your specific installation's requirements. Please go to your kvm host at to complete installation. Once you see the login prompt on the bastion's terminal, come back here and press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. + tags: kvm_host, bastionvm + pause: + minutes: 60 \ No newline at end of file diff --git a/roles/enable_packages/defaults/main.yaml b/roles/enable_packages/defaults/main.yaml deleted file mode 100644 index ccfaa59c..00000000 --- a/roles/enable_packages/defaults/main.yaml +++ /dev/null @@ -1 +0,0 @@ -packages[4]: "named" \ No newline at end of file diff --git a/roles/enable_packages/tasks/main.yaml b/roles/enable_packages/tasks/main.yaml index e1caacbe..7d87e5df 100644 --- a/roles/enable_packages/tasks/main.yaml +++ b/roles/enable_packages/tasks/main.yaml @@ -1,14 +1,8 @@ --- - name: enable packages + tags: kvm_host ansible.builtin.service: name: - - "{{ packages[0] | default(omit) }}" - - "{{ packages[1] | default(omit) }}" - - "{{ packages[2] | default(omit) }}" - - "{{ packages[3] | default(omit) }}" - - "{{ packages[4] | default(omit) }}" - - "{{ packages[5] | default(omit) }}" - - "{{ packages[6] | default(omit) }}" - - "{{ packages[7] | default(omit) }}" + - libvirtd state: enabled \ No newline at end of file diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index ce342bab..f335e3eb 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: Installing required packages + tags: kvm_host,bastion ansible.builtin.package: name: - "{{ packages[0] | default(omit) }}" diff --git a/roles/mount_rhel/tasks/main.yaml b/roles/mount_rhel/tasks/main.yaml new file mode 100644 index 00000000..b62d64ef --- /dev/null +++ b/roles/mount_rhel/tasks/main.yaml @@ -0,0 +1,5 @@ +--- + +- name: Mount red hat core os install directory + tags: kvm_host + command: mount -o loop /var/lib/libvirt/images/rhel83.iso /rhcos-install/ \ No newline at end of file diff --git a/roles/set_selinux_permissive/tasks/main.yaml b/roles/set_selinux_permissive/tasks/main.yaml index 71bd211a..8fd64c57 100644 --- a/roles/set_selinux_permissive/tasks/main.yaml +++ b/roles/set_selinux_permissive/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Put SELinux in permissive mode, logging actions that would be blocked. - tags: selinux,kvmhost,bastion + tags: selinux,kvm_host,bastion ansible.posix.selinux: policy: targeted state: permissive \ No newline at end of file From 38c05b1b8aa03ab52266e9f601c6589cdc4bacc1 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 19 Aug 2021 13:53:51 -0500 Subject: [PATCH 301/885] added tags for bastion --- README.md | 2 +- roles/check_ssh/tasks/main.yaml | 4 ++-- roles/dns/tasks/main.yaml | 12 +++++----- roles/get-ocp/tasks/main.yaml | 35 +++++++++++++++--------------- roles/haproxy/tasks/main.yaml | 8 +++---- roles/httpd/tasks/main.yaml | 8 +++---- roles/set_firewall/tasks/main.yaml | 12 +++++----- roles/ssh_copy_id/tasks/main.yaml | 1 + 8 files changed, 42 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index a21c50a4..80c07b8b 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ Tags: * compute = creation of the Compute nodes KVM guests (minimum 2) * control = creation of the Control nodes KVM guests (minimum 3) * dns = configuration of dns server on bastion -* setocp = download of OCP installer and http server configuration +* getocp = download of OCP installer and http server configuration * haproxy = configuration of haproxy on bastion kvm guest * httpconf = configuration of httpd server on bastion kvm guest * kvmhost = tasks to apply to KVM host for OCP cluster diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index a1a0772b..b17745d7 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -1,12 +1,12 @@ --- - name: check ssh to remote hosts works - tags: keymastr, kvm_host + tags: keymastr,kvm_host,bastion shell: "hostname; id" register: ssh_connection_test failed_when: ssh_connection_test.rc != 0 - name: print the connectivity test results - tags: keymastr, kvm_host + tags: keymastr,kvm_host,bastion debug: var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index ed9807cf..b2e959b4 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -1,19 +1,19 @@ --- - name: enable named - tags: dns + tags: dns,bastion ansible.builtin.systemd: name: named enabled: yes - name: start named - tags: dns + tags: dns,bastion ansible.builtin.systemd: name: named state: started - name: Copy named.conf file to bastion - tags: dns + tags: dns,bastion ansible.builtin.copy: src: named.conf dest: /etc/ @@ -23,7 +23,7 @@ backup: yes - name: Copy distribution.db file to bastion - tags: dns + tags: dns,bastion ansible.builtin.copy: src: distribution.db dest: /var/named @@ -33,7 +33,7 @@ backup: yes - name: Copy distribution.rev file to bastion - tags: dns + tags: dns,bastion ansible.builtin.copy: src: distribution.rev dest: /var/named @@ -43,7 +43,7 @@ backup: yes - name: restart named to update changes made to DNS - tags: dns + tags: dns,bastion ansible.builtin.systemd: name: named state: restarted \ No newline at end of file diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 20816eb5..e2bfa443 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -1,54 +1,55 @@ --- - name: create directory bin for mirrors - tags: setocp + tags: getocp,bastion file: path: /var/www/html/bin state: directory mode: '0755' - name: get ocp kernel - tags: setocp + tags: getocp,bastion get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin mode: '0755' - name: get ocp initramfs - tags: setocp + tags: getocp,bastion get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin mode: '0755' - name: get ocp rootfs - tags: setocp + tags: getocp,bastion get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin mode: '0755' - name: create OCP download landing directory - tags: setocp + tags: getocp,bastion file: path: /ocpinst/ state: directory - name: Unzip OCP Client - tags: setocp + tags: getocp,bastion ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz dest: /ocpinst/ remote_src: yes - name: Unzip OCP Installer + tags: getocp,bastion ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz dest: /ocpinst/ remote_src: yes - name: Copy kubectl file - tags: setocp + tags: getocp,bastion ansible.builtin.copy: src: /ocpinst/kubectl dest: /usr/local/bin/kubectl @@ -58,7 +59,7 @@ mode: '0755' - name: Copy oc file - tags: setocp + tags: getocp,bastion ansible.builtin.copy: src: /ocpinst/oc dest: /usr/local/bin/oc @@ -68,7 +69,7 @@ mode: '0755' - name: Copy openshift-install file - tags: setocp + tags: getocp,bastion ansible.builtin.copy: src: /ocpinst/openshift-install dest: /usr/local/bin/openshift-install @@ -78,50 +79,50 @@ mode: '0755' - name: Copy install-config.yaml to ocp install directory - tags: setocp + tags: getocp,bastion copy: src: install-config.yaml dest: /ocpinst/install-config.yaml - name: Create Manifests - tags: setocp + tags: getocp,bastion command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes - name: Set mastersSchedulable parameter to False - tags: setocp + tags: getocp,bastion replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml regexp: ': true' replace: ': false' - name: Create Ignition files - tags: setocp + tags: getocp,bastion command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes - name: create Ignition directory on webserver - tags: setocp + tags: getocp,bastion file: path: /var/www/html/ignition state: directory - name: Copy bootstrap Ignition file to web server - tags: setocp + tags: getocp,bastion copy: src: /ocpinst/bootstrap.ign dest: /var/www/html/ignition remote_src: yes - name: Copy control plane Ignition file to web server - tags: setocp + tags: getocp,bastion copy: src: /ocpinst/master.ign dest: /var/www/html/ignition remote_src: yes - name: Copy worker Ignition file to web server - tags: setocp + tags: getocp,bastion copy: src: /ocpinst/worker.ign dest: /var/www/html/ignition diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index 32f3322e..3f42bcab 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -1,13 +1,13 @@ --- - name: Change permissive domain for haproxy - tags: selinux,haproxy + tags: selinux,haproxy,bastion selinux_permissive: name: haproxy_t permissive: true - name: move haproxy config file to bastion - tags: haproxy + tags: haproxy,bastion copy: src: haproxy.cfg dest: /etc/haproxy/haproxy.cfg @@ -15,13 +15,13 @@ backup: yes - name: enable haproxy - tags: haproxy + tags: haproxy,bastion systemd: enabled: yes name: haproxy - name: Restart haproxy - tags: haproxy + tags: haproxy,bastion systemd: state: restarted name: haproxy diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index ed01ef53..b6bb6937 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -1,19 +1,19 @@ --- - name: Change permissive domain for httpd - tags: selinux,httpconf + tags: selinux,httpconf,bastion selinux_permissive: name: httpd_t permissive: true - name: enable httpd - tags: httpconf + tags: httpconf,bastion systemd: name: httpd enabled: yes - name: Ensure the default Apache port is 8080 - tags: httpconf + tags: httpconf,bastion replace: path: /etc/httpd/conf/httpd.conf regexp: '^Listen 80' @@ -22,7 +22,7 @@ notify: restart httpd - name: Ensure the SSL default port is 4443 - tags: httpconf + tags: httpconf,bastion replace: path: /etc/httpd/conf.d/ssl.conf regexp: '^Listen 443 https' diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index be02d15d..878f4975 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -1,42 +1,42 @@ --- - name: Allow all access to tcp port 8080 - tags: firewall + tags: firewall,bastion community.general.ufw: rule: allow port: '8080' proto: tcp - name: Allow all access to tcp port 80 - tags: firewall + tags: firewall,bastion community.general.ufw: rule: allow port: '80' proto: tcp - name: Allow all access to tcp port 443 - tags: firewall + tags: firewall,bastion community.general.ufw: rule: allow port: '443' proto: tcp - name: Allow all access to tcp port 4443 - tags: firewall + tags: firewall,bastion community.general.ufw: rule: allow port: '4443' proto: tcp - name: Permit traffic in default zone for http - tags: firewall + tags: firewall,bastion ansible.posix.firewalld: service: http permanent: yes state: enabled - name: Permit traffic in default zone for https - tags: firewall + tags: firewall,bastion ansible.posix.firewalld: service: https permanent: yes diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index 9916d57d..c12795ae 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: distribute the ssh key to a remote host + tags: ssh,kvm_host,bastion shell: "/usr/local/bin/sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 From e5a35c6c8ded27fb48384b3ab7f9f7ae3e143f9b Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 19 Aug 2021 13:53:51 -0500 Subject: [PATCH 302/885] added tags for bastion --- README.md | 2 +- roles/check_ssh/tasks/main.yaml | 4 ++-- roles/dns/tasks/main.yaml | 12 +++++----- roles/get-ocp/tasks/main.yaml | 35 +++++++++++++++--------------- roles/haproxy/tasks/main.yaml | 8 +++---- roles/httpd/tasks/main.yaml | 8 +++---- roles/set_firewall/tasks/main.yaml | 12 +++++----- roles/ssh_copy_id/tasks/main.yaml | 1 + 8 files changed, 42 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index a21c50a4..80c07b8b 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ Tags: * compute = creation of the Compute nodes KVM guests (minimum 2) * control = creation of the Control nodes KVM guests (minimum 3) * dns = configuration of dns server on bastion -* setocp = download of OCP installer and http server configuration +* getocp = download of OCP installer and http server configuration * haproxy = configuration of haproxy on bastion kvm guest * httpconf = configuration of httpd server on bastion kvm guest * kvmhost = tasks to apply to KVM host for OCP cluster diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index a1a0772b..b17745d7 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -1,12 +1,12 @@ --- - name: check ssh to remote hosts works - tags: keymastr, kvm_host + tags: keymastr,kvm_host,bastion shell: "hostname; id" register: ssh_connection_test failed_when: ssh_connection_test.rc != 0 - name: print the connectivity test results - tags: keymastr, kvm_host + tags: keymastr,kvm_host,bastion debug: var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index ed9807cf..b2e959b4 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -1,19 +1,19 @@ --- - name: enable named - tags: dns + tags: dns,bastion ansible.builtin.systemd: name: named enabled: yes - name: start named - tags: dns + tags: dns,bastion ansible.builtin.systemd: name: named state: started - name: Copy named.conf file to bastion - tags: dns + tags: dns,bastion ansible.builtin.copy: src: named.conf dest: /etc/ @@ -23,7 +23,7 @@ backup: yes - name: Copy distribution.db file to bastion - tags: dns + tags: dns,bastion ansible.builtin.copy: src: distribution.db dest: /var/named @@ -33,7 +33,7 @@ backup: yes - name: Copy distribution.rev file to bastion - tags: dns + tags: dns,bastion ansible.builtin.copy: src: distribution.rev dest: /var/named @@ -43,7 +43,7 @@ backup: yes - name: restart named to update changes made to DNS - tags: dns + tags: dns,bastion ansible.builtin.systemd: name: named state: restarted \ No newline at end of file diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 20816eb5..e2bfa443 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -1,54 +1,55 @@ --- - name: create directory bin for mirrors - tags: setocp + tags: getocp,bastion file: path: /var/www/html/bin state: directory mode: '0755' - name: get ocp kernel - tags: setocp + tags: getocp,bastion get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin mode: '0755' - name: get ocp initramfs - tags: setocp + tags: getocp,bastion get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin mode: '0755' - name: get ocp rootfs - tags: setocp + tags: getocp,bastion get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin mode: '0755' - name: create OCP download landing directory - tags: setocp + tags: getocp,bastion file: path: /ocpinst/ state: directory - name: Unzip OCP Client - tags: setocp + tags: getocp,bastion ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz dest: /ocpinst/ remote_src: yes - name: Unzip OCP Installer + tags: getocp,bastion ansible.builtin.unarchive: src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz dest: /ocpinst/ remote_src: yes - name: Copy kubectl file - tags: setocp + tags: getocp,bastion ansible.builtin.copy: src: /ocpinst/kubectl dest: /usr/local/bin/kubectl @@ -58,7 +59,7 @@ mode: '0755' - name: Copy oc file - tags: setocp + tags: getocp,bastion ansible.builtin.copy: src: /ocpinst/oc dest: /usr/local/bin/oc @@ -68,7 +69,7 @@ mode: '0755' - name: Copy openshift-install file - tags: setocp + tags: getocp,bastion ansible.builtin.copy: src: /ocpinst/openshift-install dest: /usr/local/bin/openshift-install @@ -78,50 +79,50 @@ mode: '0755' - name: Copy install-config.yaml to ocp install directory - tags: setocp + tags: getocp,bastion copy: src: install-config.yaml dest: /ocpinst/install-config.yaml - name: Create Manifests - tags: setocp + tags: getocp,bastion command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ become: yes - name: Set mastersSchedulable parameter to False - tags: setocp + tags: getocp,bastion replace: path: /ocpinst/manifests/cluster-scheduler-02-config.yml regexp: ': true' replace: ': false' - name: Create Ignition files - tags: setocp + tags: getocp,bastion command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes - name: create Ignition directory on webserver - tags: setocp + tags: getocp,bastion file: path: /var/www/html/ignition state: directory - name: Copy bootstrap Ignition file to web server - tags: setocp + tags: getocp,bastion copy: src: /ocpinst/bootstrap.ign dest: /var/www/html/ignition remote_src: yes - name: Copy control plane Ignition file to web server - tags: setocp + tags: getocp,bastion copy: src: /ocpinst/master.ign dest: /var/www/html/ignition remote_src: yes - name: Copy worker Ignition file to web server - tags: setocp + tags: getocp,bastion copy: src: /ocpinst/worker.ign dest: /var/www/html/ignition diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index 32f3322e..3f42bcab 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -1,13 +1,13 @@ --- - name: Change permissive domain for haproxy - tags: selinux,haproxy + tags: selinux,haproxy,bastion selinux_permissive: name: haproxy_t permissive: true - name: move haproxy config file to bastion - tags: haproxy + tags: haproxy,bastion copy: src: haproxy.cfg dest: /etc/haproxy/haproxy.cfg @@ -15,13 +15,13 @@ backup: yes - name: enable haproxy - tags: haproxy + tags: haproxy,bastion systemd: enabled: yes name: haproxy - name: Restart haproxy - tags: haproxy + tags: haproxy,bastion systemd: state: restarted name: haproxy diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index ed01ef53..b6bb6937 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -1,19 +1,19 @@ --- - name: Change permissive domain for httpd - tags: selinux,httpconf + tags: selinux,httpconf,bastion selinux_permissive: name: httpd_t permissive: true - name: enable httpd - tags: httpconf + tags: httpconf,bastion systemd: name: httpd enabled: yes - name: Ensure the default Apache port is 8080 - tags: httpconf + tags: httpconf,bastion replace: path: /etc/httpd/conf/httpd.conf regexp: '^Listen 80' @@ -22,7 +22,7 @@ notify: restart httpd - name: Ensure the SSL default port is 4443 - tags: httpconf + tags: httpconf,bastion replace: path: /etc/httpd/conf.d/ssl.conf regexp: '^Listen 443 https' diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index be02d15d..878f4975 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -1,42 +1,42 @@ --- - name: Allow all access to tcp port 8080 - tags: firewall + tags: firewall,bastion community.general.ufw: rule: allow port: '8080' proto: tcp - name: Allow all access to tcp port 80 - tags: firewall + tags: firewall,bastion community.general.ufw: rule: allow port: '80' proto: tcp - name: Allow all access to tcp port 443 - tags: firewall + tags: firewall,bastion community.general.ufw: rule: allow port: '443' proto: tcp - name: Allow all access to tcp port 4443 - tags: firewall + tags: firewall,bastion community.general.ufw: rule: allow port: '4443' proto: tcp - name: Permit traffic in default zone for http - tags: firewall + tags: firewall,bastion ansible.posix.firewalld: service: http permanent: yes state: enabled - name: Permit traffic in default zone for https - tags: firewall + tags: firewall,bastion ansible.posix.firewalld: service: https permanent: yes diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index 9916d57d..c12795ae 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,6 +1,7 @@ --- - name: distribute the ssh key to a remote host + tags: ssh,kvm_host,bastion shell: "/usr/local/bin/sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 From 3e24735e2fc871e8b07c2b24dffe02f1b77f22cf Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 19 Aug 2021 15:23:10 -0500 Subject: [PATCH 303/885] edit bastion packages to correct name from bind_utils to bind-utils --- main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.yaml b/main.yaml index 7912eff7..115cffa9 100644 --- a/main.yaml +++ b/main.yaml @@ -40,7 +40,7 @@ - hosts: bastion become: true vars: - - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] + - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils' ] roles: - check_ssh - install_packages From d83081d658811b198c95a794afe2e9428486af06 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Thu, 19 Aug 2021 15:23:10 -0500 Subject: [PATCH 304/885] edit bastion packages to correct name from bind_utils to bind-utils --- main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.yaml b/main.yaml index 7912eff7..115cffa9 100644 --- a/main.yaml +++ b/main.yaml @@ -40,7 +40,7 @@ - hosts: bastion become: true vars: - - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind_utils' ] + - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils' ] roles: - check_ssh - install_packages From d9bb26011a8db1861c4eb3ac1f82eff8a8a56b85 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 19 Aug 2021 19:39:41 -0500 Subject: [PATCH 305/885] Fixed bugs related to the implementation of variables --- README.md | 1 + env.yaml | 7 ++- main.yaml | 22 ++++--- roles/ansible_setup/tasks/main.yaml | 21 ++++--- roles/check_ssh/tasks/main.yaml | 4 +- roles/create_bastion/tasks/main.yaml | 1 - roles/get-ocp/files/install-config.yaml | 26 -------- roles/get-ocp/files/tmp-install-config.yaml | 28 --------- roles/get-ocp/tasks/main.yaml | 10 ++++ .../get-ocp/templates/install-config.yaml.j2 | 26 ++++++++ roles/httpd/handlers/main.yaml | 6 -- roles/httpd/tasks/main.yaml | 7 ++- roles/install_packages/tasks/main.yaml | 1 - roles/prep_kvm_guests/tasks/main.yaml | 10 ++-- roles/set_firewall/tasks/main.yaml | 38 ++++++------ roles/set_selinux_permissive/tasks/main.yaml | 2 +- roles/ssh-ocp-key-gen/tasks/main.yaml | 59 +++++++++++++++++++ roles/ssh_copy_id/tasks/main.yaml | 3 +- 18 files changed, 164 insertions(+), 108 deletions(-) delete mode 100644 roles/get-ocp/files/install-config.yaml delete mode 100644 roles/get-ocp/files/tmp-install-config.yaml create mode 100644 roles/get-ocp/templates/install-config.yaml.j2 delete mode 100644 roles/httpd/handlers/main.yaml create mode 100644 roles/ssh-ocp-key-gen/tasks/main.yaml diff --git a/README.md b/README.md index 80c07b8b..65fabb58 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,7 @@ Tags: * boostrap = creation of Boostrap KVM guest * compute = creation of the Compute nodes KVM guests (minimum 2) * control = creation of the Control nodes KVM guests (minimum 3) +* ssh-copy-id = for copying ssh id * dns = configuration of dns server on bastion * getocp = download of OCP installer and http server configuration * haproxy = configuration of haproxy on bastion kvm guest diff --git a/env.yaml b/env.yaml index 59b2b7e7..03122612 100644 --- a/env.yaml +++ b/env.yaml @@ -11,7 +11,6 @@ env_network_type: OpenShiftSDN #set default OpenShiftSDN env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 env_fips: false #true or false, set default false env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -env_sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' # to fill inventory env_ip_kvm_host: 9.60.87.132 @@ -31,7 +30,11 @@ env_ssh_pass: ibmzrocks #Password to use for ssh to kvm and bastion for first-ti env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) +# OpenShift cluster's ssh key pair filename +env_ssh_ocp_name: ocp +env_ssh_ocp_pass: ibmzrocks + # networking dns_nameserver: 9.60.87.139 default_gateway: 9.60.86.1 -netmask: 255.255.254.0 +netmask: 255.255.254.0 \ No newline at end of file diff --git a/main.yaml b/main.yaml index 115cffa9..08a4c5dc 100644 --- a/main.yaml +++ b/main.yaml @@ -1,6 +1,7 @@ --- - hosts: localhost + tags: localhost connection: local become: false gather_facts: no @@ -14,6 +15,7 @@ - ssh_copy_id - hosts: kvm_host + tags: kvm_host become: true vars_files: - env.yaml @@ -29,6 +31,7 @@ - create_bastion - hosts: localhost + tags: localhost,bastion connection: local become: false gather_facts: no @@ -38,20 +41,25 @@ - ssh_copy_id - hosts: bastion + tags: bastion become: true + vars_files: + - env.yaml vars: - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils' ] roles: - - check_ssh - - install_packages - - set_firewall - - set_selinux_permissive - - dns - - haproxy - - httpd + #- check_ssh + #- install_packages + #- set_firewall + #- set_selinux_permissive + #- dns + #- haproxy + #- httpd + - ssh-ocp-key-gen - get-ocp - hosts: kvm_host + tags: kvm_host become: true gather_facts: no roles: diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index c2ed474a..df19a270 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -19,7 +19,7 @@ - name: Check for any undefined user-input variables in env.yaml. Fail if true. tags: setup fail: - msg: "Required variable {{item}} has not been provided in env.yaml file." + msg: Required variable "{{item}}" has not been provided in env.yaml file. when: vars[item] is undefined loop: - env_baseDomain @@ -33,7 +33,6 @@ - env_service_network - env_fips - env_pullSecret - - env_sshKey - env_ip_kvm_host - env_ip_bastion - env_ip_bootstrap @@ -46,6 +45,11 @@ - env_ssh_pass - env_ssh_ans_name - env_ssh_ans_pass + - env_ssh_ocp_name + - env_ssh_ocp_pass + - dns_nameserver + - default_gateway + - netmask - name: Set facts from env.yaml so they can be used in other playbooks tags: setup @@ -61,7 +65,6 @@ env_service_network: "{{ env_service_network }}" env_fips: "{{ env_fips }}" env_pullSecret: "{{ env_pullSecret }}" - env_sshKey: "{{ env_sshKey }}" env_ip_kvm_host: "{{ env_ip_kvm_host }}" env_ip_bastion: "{{ env_ip_bastion }}" env_ip_bootstrap: "{{ env_ip_bootstrap }}" @@ -73,7 +76,9 @@ env_ssh_username: "{{ env_ssh_username }}" env_ssh_pass: "{{ env_ssh_pass }}" env_ssh_ans_name: "{{ env_ssh_ans_name }}" - env_ssh_ans_pass: "{{ env_ssh_ans_pass }}" + env_ssh_ans_pass: "{{ env_ssh_ans_pass }}" + env_ssh_ocp_name: "{{ env_ssh_ocp_name }}" + env_ssh_ocp_pass: "{{ env_ssh_ocp_pass }}" dns_nameserver: "{{ dns_nameserver }}" default_gateway: "{{ default_gateway }}" netmask: "{{ netmask }}" @@ -111,12 +116,12 @@ {{ env_ip_compute_1 }} state: present backup: yes - + - name: check inventory setup tags: setup command: ansible-inventory --list register: inv_check - failed_when: "inv_check.rc != 0" + failed_when: inv_check.rc != 0 - name: Ansible generic setup to re-read inventory file after populated in previous tasks tags: setup @@ -131,7 +136,7 @@ - ssh-copy-id state: latest update_cache: yes - when: ansible_os_family == "RedHat" or ansible_os_family == "Debian" + when: ansible_os_family == RedHat or ansible_os_family == Debian - name: Install ssh packages on Mac machine via homebrew tags: setup @@ -141,4 +146,4 @@ - ssh-copy-id state: latest update_homebrew: yes - when: ansible_os_family == "Darwin" \ No newline at end of file + when: ansible_os_family == Darwin \ No newline at end of file diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index b17745d7..ce7d56bf 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -1,12 +1,12 @@ --- - name: check ssh to remote hosts works - tags: keymastr,kvm_host,bastion + tags: keymastr shell: "hostname; id" register: ssh_connection_test failed_when: ssh_connection_test.rc != 0 - name: print the connectivity test results - tags: keymastr,kvm_host,bastion + tags: keymastr debug: var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 41e3bcb4..742a47b7 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -25,7 +25,6 @@ - name: start bastion install tags: kvm_host, bastionvm command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - regsiter: bastion_boot - name: README - Pausing for 60 minutes for you to complete the bastion installation of rhel OS with your specific installation's requirements. Please go to your kvm host at to complete installation. Once you see the login prompt on the bastion's terminal, come back here and press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. tags: kvm_host, bastionvm diff --git a/roles/get-ocp/files/install-config.yaml b/roles/get-ocp/files/install-config.yaml deleted file mode 100644 index f72ce8cd..00000000 --- a/roles/get-ocp/files/install-config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -baseDomain: "{{ env_baseDomain }}" -compute: -- hyperthreading: Enabled - name: worker - replicas: 0 - architecture : "{{ env_compute_arch | default(s390x) }}" -controlPlane: - hyperthreading: Enabled - name: master - replicas: "{{ env_control_count | default(3) }}" - architecture: "{{ env_control_arch | default(s390x) }}" -metadata: - name: "{{ env_metadata_name }}" -networking: - clusterNetwork: - - cidr: "{{ env_cidr | default(10.128.0.0/14)}}" - hostPrefix: "{{ env_host_prefix | default(23) }}" - networkType: "{{ env_network_type | default(OpenShiftSDN) }}" - serviceNetwork: - - "{{ env_service_network | default(172.30.0.0/16) }}" -platform: - none: {} -fips: false "{{ env_fips | default(false) }}" -pullSecret: "{{ env_pullSecret }}" -sshKey: "{{ env_sshKey }}" diff --git a/roles/get-ocp/files/tmp-install-config.yaml b/roles/get-ocp/files/tmp-install-config.yaml deleted file mode 100644 index 1d0fa8c7..00000000 --- a/roles/get-ocp/files/tmp-install-config.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: v1 # "{{ env-api-version | default(v1) }}" -baseDomain: ocpz.wsclab.endicott.ibm.com # "{{ env-baseDomain }}" -compute: -- hyperthreading: Enabled # "{{ env-compute-hyperthreading | default(Enabled) }}" - name: worker # "{{ env-compute-name | default(worker) }}" - replicas: 0 # "{{ env-compute-count | default(0) }}" - architecture : s390x # "{{ env-compute-arch | default(s390x) }}" -controlPlane: - hyperthreading: Enabled # "{{ env-control-hyperthreading | default(Enabled) }}" - name: master # "{{ env-control-name | default(compute) }}" - replicas: 3 # "{{ env-control-count | default(3) }}" - architecture : s390x # "{{ env-control-arch | default(s390x) }}" -metadata: - name: distribution # "{{ env-metadata-name }}" -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 # "{{ env-cidr }}" - hostPrefix: 23 # "{{ env-host-prefix | default(23) }}" - networkType: OpenShiftSDN # "{{ env-network-type | default(OpenShiftSDN) }}" - serviceNetwork: - - 172.30.0.0/16 # "{{ env-service-network }}" -platform: - none: {} -fips: false # "{{ env-fips | default(false) }}" -pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -# "{{ env-pullSecret }}" -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' -# "{{ env-sshKey }}" \ No newline at end of file diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index e2bfa443..ebaf5431 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- name: Load in variables from env.yaml + tags: setup + include_vars: env.yaml + - name: create directory bin for mirrors tags: getocp,bastion file: @@ -78,6 +82,12 @@ group: root mode: '0755' +- name: Use template file to create install-config + tags: setup,get-ocp + template: + src: install-config.yaml.j2 + dest: roles/get-ocp/files/install-config.yaml + - name: Copy install-config.yaml to ocp install directory tags: getocp,bastion copy: diff --git a/roles/get-ocp/templates/install-config.yaml.j2 b/roles/get-ocp/templates/install-config.yaml.j2 new file mode 100644 index 00000000..2c9f386b --- /dev/null +++ b/roles/get-ocp/templates/install-config.yaml.j2 @@ -0,0 +1,26 @@ +apiVersion: v1 +baseDomain: "{{ env_baseDomain }}" +"compute\:" +- hyperthreading: Enabled + name: worker + replicas: 2 + architecture: "{{ env_compute_arch | default(s390x) }}" +"controlPlane\:" + hyperthreading: Enabled + name: master + replicas: "{{ env_control_count | default(3) }}" + architecture: "{{ env_control_arch | default(s390x) }}" +"metadata\:" + name: "{{ env_metadata_name }}" +"networking\:" + "clusterNetwork\:" + - cidr: "{{ env_cidr | default("10.128.0.0/14") }}" + hostPrefix: "{{ env_host_prefix | default(23) }}" + networkType: "{{ env_network_type | default(OpenShiftSDN) }}" + "serviceNetwork\:" + - "{{ env_service_network | default("172.30.0.0/16") }}" +"platform/:" + "none\: {}" +fips: "{{ env_fips | default(false) }}" +pullSecret: "{{ env_pullSecret }}" +sshKey: "{{ env_ocp_pub }}" \ No newline at end of file diff --git a/roles/httpd/handlers/main.yaml b/roles/httpd/handlers/main.yaml deleted file mode 100644 index f86d6901..00000000 --- a/roles/httpd/handlers/main.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- - -- name: restart httpd - service: - name: httpd - state: restarted \ No newline at end of file diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index b6bb6937..9f3ae813 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -19,7 +19,6 @@ regexp: '^Listen 80' replace: 'Listen 8080' backup: yes - notify: restart httpd - name: Ensure the SSL default port is 4443 tags: httpconf,bastion @@ -28,4 +27,8 @@ regexp: '^Listen 443 https' replace: 'Listen 4443 https' backup: yes - notify: restart httpd + +- name: restart httpd + service: + name: httpd + state: restarted \ No newline at end of file diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index f335e3eb..ce342bab 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,7 +1,6 @@ --- - name: Installing required packages - tags: kvm_host,bastion ansible.builtin.package: name: - "{{ packages[0] | default(omit) }}" diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index f37119d7..9e824095 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: get rhcos qcow2 file - tags: kvmhost + tags: kvm_host get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images/ @@ -9,11 +9,11 @@ mode: '0755' - name: Unzip rhcos qcow2 files - tags: kvmhost + tags: kvm_host command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz - name: get rhcos initramfs image - tags: kvmhost + tags: kvm_host get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/lib/libvirt/images/ @@ -21,7 +21,7 @@ force: yes - name: get rhcos kernel - tags: kvmhost + tags: kvm_host get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/lib/libvirt/images/ @@ -29,7 +29,7 @@ force: yes - name: get rhcos rootfs image - tags: kvmhost + tags: kvm_host get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/lib/libvirt/images/ diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 878f4975..4b66144a 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -2,31 +2,31 @@ - name: Allow all access to tcp port 8080 tags: firewall,bastion - community.general.ufw: - rule: allow - port: '8080' - proto: tcp + ansible.posix.firewalld: + port: 8081/tcp + permanent: yes + state: enabled - name: Allow all access to tcp port 80 tags: firewall,bastion - community.general.ufw: - rule: allow - port: '80' - proto: tcp + ansible.posix.firewalld: + port: 80/tcp + permanent: yes + state: enabled - name: Allow all access to tcp port 443 tags: firewall,bastion - community.general.ufw: - rule: allow - port: '443' - proto: tcp + ansible.posix.firewalld: + port: 443/tcp + permanent: yes + state: enabled - name: Allow all access to tcp port 4443 tags: firewall,bastion - community.general.ufw: - rule: allow - port: '4443' - proto: tcp + ansible.posix.firewalld: + port: 4443/tcp + permanent: yes + state: enabled - name: Permit traffic in default zone for http tags: firewall,bastion @@ -42,5 +42,7 @@ permanent: yes state: enabled - - \ No newline at end of file +- name: restart httpd + service: + name: httpd + state: restarted \ No newline at end of file diff --git a/roles/set_selinux_permissive/tasks/main.yaml b/roles/set_selinux_permissive/tasks/main.yaml index 8fd64c57..d3cfee5a 100644 --- a/roles/set_selinux_permissive/tasks/main.yaml +++ b/roles/set_selinux_permissive/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Put SELinux in permissive mode, logging actions that would be blocked. - tags: selinux,kvm_host,bastion + tags: selinux ansible.posix.selinux: policy: targeted state: permissive \ No newline at end of file diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml new file mode 100644 index 00000000..4f0912a3 --- /dev/null +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -0,0 +1,59 @@ +--- + +- name: Load in variables from env.yaml + tags: setup + include_vars: env.yaml + +- name: Check to see if local .ssh directory exists + tags: keymastr + stat: + path: "~/.ssh" + register: ssh_directory_exists_check + +- name: Print results of .ssh directory check + tags: keymastr + debug: + var: ssh_directory_exists_check + +- name: Create .ssh local directory if it doesn't already exist + tags: keymastr + file: + path: "~/.ssh" + state: directory + mode: "0700" + register: ssh_directory_creation + when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false + +- name: Print results of ssh directory creation + tags: keymastr + debug: + var: ssh_directory_creation + +- name: Check .ssh key pair files exist + stat: + path: "~/.ssh/{{item}}" + register: ssh_key_file_exists_check + with_items: + - "{{env_ssh_ocp_name}}" + - "{{env_ssh_ocp_name}}.pub" + +- name: Print results of ssh key pair files check + tags: keymastr + debug: + var: ssh_key_file_exists_check.results[1].stat.exists + +- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key + tags: keymastr + community.crypto.openssh_keypair: + path: ~/.ssh/{{ env_ssh_ocp_name }} + passphrase: "{{ env_ssh_ocp_pass }}" + register: public_key + when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + +- name: set public key fact + set_fact: + env-ocp-pub: '{{ public_key.env_ocp_pub }}' + delegate_to: '{{ item }}' + with_items: '{{ groups["bastion"] }}' + + \ No newline at end of file diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index c12795ae..c6208527 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,11 +1,12 @@ --- - name: distribute the ssh key to a remote host - tags: ssh,kvm_host,bastion + tags: ssh,ssh-copy-id shell: "/usr/local/bin/sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 - name: Print results of copying ssh id to remote host. + tags: ssh,ssh-copy-id debug: var: ssh_copy_id_execution From 9f0839e7a34f5c19d0b1e18e13e3fcc330a7c56a Mon Sep 17 00:00:00 2001 From: jacobemery Date: Thu, 19 Aug 2021 19:39:41 -0500 Subject: [PATCH 306/885] Fixed bugs related to the implementation of variables --- README.md | 1 + main.yaml | 22 ++++--- roles/ansible_setup/tasks/main.yaml | 21 ++++--- roles/check_ssh/tasks/main.yaml | 4 +- roles/create_bastion/tasks/main.yaml | 1 - roles/get-ocp/files/install-config.yaml | 26 -------- roles/get-ocp/files/tmp-install-config.yaml | 28 --------- roles/get-ocp/tasks/main.yaml | 10 ++++ .../get-ocp/templates/install-config.yaml.j2 | 26 ++++++++ roles/httpd/handlers/main.yaml | 6 -- roles/httpd/tasks/main.yaml | 7 ++- roles/install_packages/tasks/main.yaml | 1 - roles/prep_kvm_guests/tasks/main.yaml | 10 ++-- roles/set_firewall/tasks/main.yaml | 38 ++++++------ roles/set_selinux_permissive/tasks/main.yaml | 2 +- roles/ssh-ocp-key-gen/tasks/main.yaml | 59 +++++++++++++++++++ roles/ssh_copy_id/tasks/main.yaml | 3 +- 17 files changed, 159 insertions(+), 106 deletions(-) delete mode 100644 roles/get-ocp/files/install-config.yaml delete mode 100644 roles/get-ocp/files/tmp-install-config.yaml create mode 100644 roles/get-ocp/templates/install-config.yaml.j2 delete mode 100644 roles/httpd/handlers/main.yaml create mode 100644 roles/ssh-ocp-key-gen/tasks/main.yaml diff --git a/README.md b/README.md index 80c07b8b..65fabb58 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,7 @@ Tags: * boostrap = creation of Boostrap KVM guest * compute = creation of the Compute nodes KVM guests (minimum 2) * control = creation of the Control nodes KVM guests (minimum 3) +* ssh-copy-id = for copying ssh id * dns = configuration of dns server on bastion * getocp = download of OCP installer and http server configuration * haproxy = configuration of haproxy on bastion kvm guest diff --git a/main.yaml b/main.yaml index 115cffa9..08a4c5dc 100644 --- a/main.yaml +++ b/main.yaml @@ -1,6 +1,7 @@ --- - hosts: localhost + tags: localhost connection: local become: false gather_facts: no @@ -14,6 +15,7 @@ - ssh_copy_id - hosts: kvm_host + tags: kvm_host become: true vars_files: - env.yaml @@ -29,6 +31,7 @@ - create_bastion - hosts: localhost + tags: localhost,bastion connection: local become: false gather_facts: no @@ -38,20 +41,25 @@ - ssh_copy_id - hosts: bastion + tags: bastion become: true + vars_files: + - env.yaml vars: - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils' ] roles: - - check_ssh - - install_packages - - set_firewall - - set_selinux_permissive - - dns - - haproxy - - httpd + #- check_ssh + #- install_packages + #- set_firewall + #- set_selinux_permissive + #- dns + #- haproxy + #- httpd + - ssh-ocp-key-gen - get-ocp - hosts: kvm_host + tags: kvm_host become: true gather_facts: no roles: diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index c2ed474a..df19a270 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -19,7 +19,7 @@ - name: Check for any undefined user-input variables in env.yaml. Fail if true. tags: setup fail: - msg: "Required variable {{item}} has not been provided in env.yaml file." + msg: Required variable "{{item}}" has not been provided in env.yaml file. when: vars[item] is undefined loop: - env_baseDomain @@ -33,7 +33,6 @@ - env_service_network - env_fips - env_pullSecret - - env_sshKey - env_ip_kvm_host - env_ip_bastion - env_ip_bootstrap @@ -46,6 +45,11 @@ - env_ssh_pass - env_ssh_ans_name - env_ssh_ans_pass + - env_ssh_ocp_name + - env_ssh_ocp_pass + - dns_nameserver + - default_gateway + - netmask - name: Set facts from env.yaml so they can be used in other playbooks tags: setup @@ -61,7 +65,6 @@ env_service_network: "{{ env_service_network }}" env_fips: "{{ env_fips }}" env_pullSecret: "{{ env_pullSecret }}" - env_sshKey: "{{ env_sshKey }}" env_ip_kvm_host: "{{ env_ip_kvm_host }}" env_ip_bastion: "{{ env_ip_bastion }}" env_ip_bootstrap: "{{ env_ip_bootstrap }}" @@ -73,7 +76,9 @@ env_ssh_username: "{{ env_ssh_username }}" env_ssh_pass: "{{ env_ssh_pass }}" env_ssh_ans_name: "{{ env_ssh_ans_name }}" - env_ssh_ans_pass: "{{ env_ssh_ans_pass }}" + env_ssh_ans_pass: "{{ env_ssh_ans_pass }}" + env_ssh_ocp_name: "{{ env_ssh_ocp_name }}" + env_ssh_ocp_pass: "{{ env_ssh_ocp_pass }}" dns_nameserver: "{{ dns_nameserver }}" default_gateway: "{{ default_gateway }}" netmask: "{{ netmask }}" @@ -111,12 +116,12 @@ {{ env_ip_compute_1 }} state: present backup: yes - + - name: check inventory setup tags: setup command: ansible-inventory --list register: inv_check - failed_when: "inv_check.rc != 0" + failed_when: inv_check.rc != 0 - name: Ansible generic setup to re-read inventory file after populated in previous tasks tags: setup @@ -131,7 +136,7 @@ - ssh-copy-id state: latest update_cache: yes - when: ansible_os_family == "RedHat" or ansible_os_family == "Debian" + when: ansible_os_family == RedHat or ansible_os_family == Debian - name: Install ssh packages on Mac machine via homebrew tags: setup @@ -141,4 +146,4 @@ - ssh-copy-id state: latest update_homebrew: yes - when: ansible_os_family == "Darwin" \ No newline at end of file + when: ansible_os_family == Darwin \ No newline at end of file diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index b17745d7..ce7d56bf 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -1,12 +1,12 @@ --- - name: check ssh to remote hosts works - tags: keymastr,kvm_host,bastion + tags: keymastr shell: "hostname; id" register: ssh_connection_test failed_when: ssh_connection_test.rc != 0 - name: print the connectivity test results - tags: keymastr,kvm_host,bastion + tags: keymastr debug: var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 41e3bcb4..742a47b7 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -25,7 +25,6 @@ - name: start bastion install tags: kvm_host, bastionvm command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - regsiter: bastion_boot - name: README - Pausing for 60 minutes for you to complete the bastion installation of rhel OS with your specific installation's requirements. Please go to your kvm host at to complete installation. Once you see the login prompt on the bastion's terminal, come back here and press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. tags: kvm_host, bastionvm diff --git a/roles/get-ocp/files/install-config.yaml b/roles/get-ocp/files/install-config.yaml deleted file mode 100644 index f72ce8cd..00000000 --- a/roles/get-ocp/files/install-config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -baseDomain: "{{ env_baseDomain }}" -compute: -- hyperthreading: Enabled - name: worker - replicas: 0 - architecture : "{{ env_compute_arch | default(s390x) }}" -controlPlane: - hyperthreading: Enabled - name: master - replicas: "{{ env_control_count | default(3) }}" - architecture: "{{ env_control_arch | default(s390x) }}" -metadata: - name: "{{ env_metadata_name }}" -networking: - clusterNetwork: - - cidr: "{{ env_cidr | default(10.128.0.0/14)}}" - hostPrefix: "{{ env_host_prefix | default(23) }}" - networkType: "{{ env_network_type | default(OpenShiftSDN) }}" - serviceNetwork: - - "{{ env_service_network | default(172.30.0.0/16) }}" -platform: - none: {} -fips: false "{{ env_fips | default(false) }}" -pullSecret: "{{ env_pullSecret }}" -sshKey: "{{ env_sshKey }}" diff --git a/roles/get-ocp/files/tmp-install-config.yaml b/roles/get-ocp/files/tmp-install-config.yaml deleted file mode 100644 index 1d0fa8c7..00000000 --- a/roles/get-ocp/files/tmp-install-config.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: v1 # "{{ env-api-version | default(v1) }}" -baseDomain: ocpz.wsclab.endicott.ibm.com # "{{ env-baseDomain }}" -compute: -- hyperthreading: Enabled # "{{ env-compute-hyperthreading | default(Enabled) }}" - name: worker # "{{ env-compute-name | default(worker) }}" - replicas: 0 # "{{ env-compute-count | default(0) }}" - architecture : s390x # "{{ env-compute-arch | default(s390x) }}" -controlPlane: - hyperthreading: Enabled # "{{ env-control-hyperthreading | default(Enabled) }}" - name: master # "{{ env-control-name | default(compute) }}" - replicas: 3 # "{{ env-control-count | default(3) }}" - architecture : s390x # "{{ env-control-arch | default(s390x) }}" -metadata: - name: distribution # "{{ env-metadata-name }}" -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 # "{{ env-cidr }}" - hostPrefix: 23 # "{{ env-host-prefix | default(23) }}" - networkType: OpenShiftSDN # "{{ env-network-type | default(OpenShiftSDN) }}" - serviceNetwork: - - 172.30.0.0/16 # "{{ env-service-network }}" -platform: - none: {} -fips: false # "{{ env-fips | default(false) }}" -pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -# "{{ env-pullSecret }}" -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' -# "{{ env-sshKey }}" \ No newline at end of file diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index e2bfa443..ebaf5431 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- name: Load in variables from env.yaml + tags: setup + include_vars: env.yaml + - name: create directory bin for mirrors tags: getocp,bastion file: @@ -78,6 +82,12 @@ group: root mode: '0755' +- name: Use template file to create install-config + tags: setup,get-ocp + template: + src: install-config.yaml.j2 + dest: roles/get-ocp/files/install-config.yaml + - name: Copy install-config.yaml to ocp install directory tags: getocp,bastion copy: diff --git a/roles/get-ocp/templates/install-config.yaml.j2 b/roles/get-ocp/templates/install-config.yaml.j2 new file mode 100644 index 00000000..2c9f386b --- /dev/null +++ b/roles/get-ocp/templates/install-config.yaml.j2 @@ -0,0 +1,26 @@ +apiVersion: v1 +baseDomain: "{{ env_baseDomain }}" +"compute\:" +- hyperthreading: Enabled + name: worker + replicas: 2 + architecture: "{{ env_compute_arch | default(s390x) }}" +"controlPlane\:" + hyperthreading: Enabled + name: master + replicas: "{{ env_control_count | default(3) }}" + architecture: "{{ env_control_arch | default(s390x) }}" +"metadata\:" + name: "{{ env_metadata_name }}" +"networking\:" + "clusterNetwork\:" + - cidr: "{{ env_cidr | default("10.128.0.0/14") }}" + hostPrefix: "{{ env_host_prefix | default(23) }}" + networkType: "{{ env_network_type | default(OpenShiftSDN) }}" + "serviceNetwork\:" + - "{{ env_service_network | default("172.30.0.0/16") }}" +"platform/:" + "none\: {}" +fips: "{{ env_fips | default(false) }}" +pullSecret: "{{ env_pullSecret }}" +sshKey: "{{ env_ocp_pub }}" \ No newline at end of file diff --git a/roles/httpd/handlers/main.yaml b/roles/httpd/handlers/main.yaml deleted file mode 100644 index f86d6901..00000000 --- a/roles/httpd/handlers/main.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- - -- name: restart httpd - service: - name: httpd - state: restarted \ No newline at end of file diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index b6bb6937..9f3ae813 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -19,7 +19,6 @@ regexp: '^Listen 80' replace: 'Listen 8080' backup: yes - notify: restart httpd - name: Ensure the SSL default port is 4443 tags: httpconf,bastion @@ -28,4 +27,8 @@ regexp: '^Listen 443 https' replace: 'Listen 4443 https' backup: yes - notify: restart httpd + +- name: restart httpd + service: + name: httpd + state: restarted \ No newline at end of file diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index f335e3eb..ce342bab 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,7 +1,6 @@ --- - name: Installing required packages - tags: kvm_host,bastion ansible.builtin.package: name: - "{{ packages[0] | default(omit) }}" diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index f37119d7..9e824095 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: get rhcos qcow2 file - tags: kvmhost + tags: kvm_host get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz dest: /var/lib/libvirt/images/ @@ -9,11 +9,11 @@ mode: '0755' - name: Unzip rhcos qcow2 files - tags: kvmhost + tags: kvm_host command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz - name: get rhcos initramfs image - tags: kvmhost + tags: kvm_host get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/lib/libvirt/images/ @@ -21,7 +21,7 @@ force: yes - name: get rhcos kernel - tags: kvmhost + tags: kvm_host get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/lib/libvirt/images/ @@ -29,7 +29,7 @@ force: yes - name: get rhcos rootfs image - tags: kvmhost + tags: kvm_host get_url: url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/lib/libvirt/images/ diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 878f4975..4b66144a 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -2,31 +2,31 @@ - name: Allow all access to tcp port 8080 tags: firewall,bastion - community.general.ufw: - rule: allow - port: '8080' - proto: tcp + ansible.posix.firewalld: + port: 8081/tcp + permanent: yes + state: enabled - name: Allow all access to tcp port 80 tags: firewall,bastion - community.general.ufw: - rule: allow - port: '80' - proto: tcp + ansible.posix.firewalld: + port: 80/tcp + permanent: yes + state: enabled - name: Allow all access to tcp port 443 tags: firewall,bastion - community.general.ufw: - rule: allow - port: '443' - proto: tcp + ansible.posix.firewalld: + port: 443/tcp + permanent: yes + state: enabled - name: Allow all access to tcp port 4443 tags: firewall,bastion - community.general.ufw: - rule: allow - port: '4443' - proto: tcp + ansible.posix.firewalld: + port: 4443/tcp + permanent: yes + state: enabled - name: Permit traffic in default zone for http tags: firewall,bastion @@ -42,5 +42,7 @@ permanent: yes state: enabled - - \ No newline at end of file +- name: restart httpd + service: + name: httpd + state: restarted \ No newline at end of file diff --git a/roles/set_selinux_permissive/tasks/main.yaml b/roles/set_selinux_permissive/tasks/main.yaml index 8fd64c57..d3cfee5a 100644 --- a/roles/set_selinux_permissive/tasks/main.yaml +++ b/roles/set_selinux_permissive/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Put SELinux in permissive mode, logging actions that would be blocked. - tags: selinux,kvm_host,bastion + tags: selinux ansible.posix.selinux: policy: targeted state: permissive \ No newline at end of file diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml new file mode 100644 index 00000000..4f0912a3 --- /dev/null +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -0,0 +1,59 @@ +--- + +- name: Load in variables from env.yaml + tags: setup + include_vars: env.yaml + +- name: Check to see if local .ssh directory exists + tags: keymastr + stat: + path: "~/.ssh" + register: ssh_directory_exists_check + +- name: Print results of .ssh directory check + tags: keymastr + debug: + var: ssh_directory_exists_check + +- name: Create .ssh local directory if it doesn't already exist + tags: keymastr + file: + path: "~/.ssh" + state: directory + mode: "0700" + register: ssh_directory_creation + when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false + +- name: Print results of ssh directory creation + tags: keymastr + debug: + var: ssh_directory_creation + +- name: Check .ssh key pair files exist + stat: + path: "~/.ssh/{{item}}" + register: ssh_key_file_exists_check + with_items: + - "{{env_ssh_ocp_name}}" + - "{{env_ssh_ocp_name}}.pub" + +- name: Print results of ssh key pair files check + tags: keymastr + debug: + var: ssh_key_file_exists_check.results[1].stat.exists + +- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key + tags: keymastr + community.crypto.openssh_keypair: + path: ~/.ssh/{{ env_ssh_ocp_name }} + passphrase: "{{ env_ssh_ocp_pass }}" + register: public_key + when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + +- name: set public key fact + set_fact: + env-ocp-pub: '{{ public_key.env_ocp_pub }}' + delegate_to: '{{ item }}' + with_items: '{{ groups["bastion"] }}' + + \ No newline at end of file diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index c12795ae..c6208527 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,11 +1,12 @@ --- - name: distribute the ssh key to a remote host - tags: ssh,kvm_host,bastion + tags: ssh,ssh-copy-id shell: "/usr/local/bin/sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 - name: Print results of copying ssh id to remote host. + tags: ssh,ssh-copy-id debug: var: ssh_copy_id_execution From f1998ec42aa53cc387fe480d57570e269cfbc465 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 14:13:21 -0500 Subject: [PATCH 307/885] Updates for the implementation of variables. --- README.md | 12 ++++++++++-- env.yaml | 13 +++++++++++-- main.yaml | 4 ++-- roles/ansible_setup/tasks/main.yaml | 12 ++++++++++++ roles/get-ocp/tasks/main.yaml | 10 +++++----- 5 files changed, 40 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 65fabb58..47b6b903 100644 --- a/README.md +++ b/README.md @@ -22,8 +22,16 @@ Pre-requisites: When you are ready: * Step 1: Download this Git repository to a folder on your local computer +* Step 2: Go to to download your local command line tools (oc and kubectl) and to copy the following OpenShift links to use in the next step: + - OCP installer + - pull secret + - RHCOS initramfs + - RHCOS kernel + - RHCOS rootfs + - QCOW2 image + - * if you want to use alternate OCP client and installer links, swap them out in env.yaml (env_ocp_client and env_ocp_client) * Step 2: Fill out the required variables for your specific installation in the env.yaml file -* Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in roles/dns/files folder. +* Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. * Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. @@ -36,7 +44,7 @@ When you are ready: * Step 9: Shutdown and destroy bootstrap (or optionally convert bootstrap to worker node) * Step 8: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" -Step 9: +Step 9: ./openshift-install create cluster Tags: * setup = first-time setup of ansible diff --git a/env.yaml b/env.yaml index 03122612..651d4427 100644 --- a/env.yaml +++ b/env.yaml @@ -10,8 +10,17 @@ env_host_prefix: 23 #default 23 for now env_network_type: OpenShiftSDN #set default OpenShiftSDN env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 env_fips: false #true or false, set default false + env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +# Install links from: https://console.redhat.com/openshift/install/ibmz/user-provisioned to be used in getting OpenShift files +env_rhcos_initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img +env_rhcos_kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img +env_rhcos_rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img +env_qcow2_img: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz +env_ocp_client: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz +env_ocp_installer: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + # to fill inventory env_ip_kvm_host: 9.60.87.132 env_ip_bastion: 9.60.87.139 @@ -23,8 +32,8 @@ env_ip_compute_0: 9.60.87.134 env_ip_compute_1: 9.60.87.135 # ssh -env_ssh_username: jacob #Username to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. -env_ssh_pass: ibmzrocks #Password to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_username: jacob #Username to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. # Ansible passwordless ssh env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible diff --git a/main.yaml b/main.yaml index 08a4c5dc..ada15b80 100644 --- a/main.yaml +++ b/main.yaml @@ -3,7 +3,7 @@ - hosts: localhost tags: localhost connection: local - become: false + become: true gather_facts: no vars_files: - env.yaml @@ -46,7 +46,7 @@ vars_files: - env.yaml vars: - - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils' ] + - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: #- check_ssh #- install_packages diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index df19a270..096f5a4f 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -33,6 +33,12 @@ - env_service_network - env_fips - env_pullSecret + - env_rhcos_initramfs + - env_rhcos_kernel + - env_rhcos_rootfs + - env_qcow2_img + - env_ocp_client + - env_ocp_installer - env_ip_kvm_host - env_ip_bastion - env_ip_bootstrap @@ -65,6 +71,12 @@ env_service_network: "{{ env_service_network }}" env_fips: "{{ env_fips }}" env_pullSecret: "{{ env_pullSecret }}" + env_rhcos_initramfs: "{{ env_rhcos_initramfs }}" + env_rhcos_kernel: "{{ env_rhcos_kernel }}" + env_rhcos_rootfs: "{{ env_rhcos_rootfs }}" + env_qcow2_img: "{{ env_qcow2_img }}" + env_ocp_client: "{{ env_ocp_client }}" + env_ocp_installer: "{{ env_ocp_installer }}" env_ip_kvm_host: "{{ env_ip_kvm_host }}" env_ip_bastion: "{{ env_ip_bastion }}" env_ip_bootstrap: "{{ env_ip_bootstrap }}" diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index ebaf5431..8b6bf52b 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -14,21 +14,21 @@ - name: get ocp kernel tags: getocp,bastion get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + url: "{{ env_rhcos_kernel }}" dest: /var/www/html/bin mode: '0755' - name: get ocp initramfs tags: getocp,bastion get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + url: "{{ env_rhcos_initramfs }}" dest: /var/www/html/bin mode: '0755' - name: get ocp rootfs tags: getocp,bastion get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + url: "{{ env_rhcos_rootfs }}" dest: /var/www/html/bin mode: '0755' @@ -41,14 +41,14 @@ - name: Unzip OCP Client tags: getocp,bastion ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + src: "{{ env_ocp_client | default(https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz) }}" dest: /ocpinst/ remote_src: yes - name: Unzip OCP Installer tags: getocp,bastion ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + src: "{{ env_ocp_installer | default(https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz) }}" dest: /ocpinst/ remote_src: yes From 702b48e54e5396fb9dbacb22fc42289fddf8c55b Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 14:13:21 -0500 Subject: [PATCH 308/885] Updates for the implementation of variables. --- README.md | 12 ++++++++++-- main.yaml | 4 ++-- roles/ansible_setup/tasks/main.yaml | 12 ++++++++++++ roles/get-ocp/tasks/main.yaml | 10 +++++----- 4 files changed, 29 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 65fabb58..47b6b903 100644 --- a/README.md +++ b/README.md @@ -22,8 +22,16 @@ Pre-requisites: When you are ready: * Step 1: Download this Git repository to a folder on your local computer +* Step 2: Go to to download your local command line tools (oc and kubectl) and to copy the following OpenShift links to use in the next step: + - OCP installer + - pull secret + - RHCOS initramfs + - RHCOS kernel + - RHCOS rootfs + - QCOW2 image + - * if you want to use alternate OCP client and installer links, swap them out in env.yaml (env_ocp_client and env_ocp_client) * Step 2: Fill out the required variables for your specific installation in the env.yaml file -* Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in roles/dns/files folder. +* Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. * Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. @@ -36,7 +44,7 @@ When you are ready: * Step 9: Shutdown and destroy bootstrap (or optionally convert bootstrap to worker node) * Step 8: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" -Step 9: +Step 9: ./openshift-install create cluster Tags: * setup = first-time setup of ansible diff --git a/main.yaml b/main.yaml index 08a4c5dc..ada15b80 100644 --- a/main.yaml +++ b/main.yaml @@ -3,7 +3,7 @@ - hosts: localhost tags: localhost connection: local - become: false + become: true gather_facts: no vars_files: - env.yaml @@ -46,7 +46,7 @@ vars_files: - env.yaml vars: - - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils' ] + - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: #- check_ssh #- install_packages diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index df19a270..096f5a4f 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -33,6 +33,12 @@ - env_service_network - env_fips - env_pullSecret + - env_rhcos_initramfs + - env_rhcos_kernel + - env_rhcos_rootfs + - env_qcow2_img + - env_ocp_client + - env_ocp_installer - env_ip_kvm_host - env_ip_bastion - env_ip_bootstrap @@ -65,6 +71,12 @@ env_service_network: "{{ env_service_network }}" env_fips: "{{ env_fips }}" env_pullSecret: "{{ env_pullSecret }}" + env_rhcos_initramfs: "{{ env_rhcos_initramfs }}" + env_rhcos_kernel: "{{ env_rhcos_kernel }}" + env_rhcos_rootfs: "{{ env_rhcos_rootfs }}" + env_qcow2_img: "{{ env_qcow2_img }}" + env_ocp_client: "{{ env_ocp_client }}" + env_ocp_installer: "{{ env_ocp_installer }}" env_ip_kvm_host: "{{ env_ip_kvm_host }}" env_ip_bastion: "{{ env_ip_bastion }}" env_ip_bootstrap: "{{ env_ip_bootstrap }}" diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index ebaf5431..8b6bf52b 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -14,21 +14,21 @@ - name: get ocp kernel tags: getocp,bastion get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x + url: "{{ env_rhcos_kernel }}" dest: /var/www/html/bin mode: '0755' - name: get ocp initramfs tags: getocp,bastion get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img + url: "{{ env_rhcos_initramfs }}" dest: /var/www/html/bin mode: '0755' - name: get ocp rootfs tags: getocp,bastion get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + url: "{{ env_rhcos_rootfs }}" dest: /var/www/html/bin mode: '0755' @@ -41,14 +41,14 @@ - name: Unzip OCP Client tags: getocp,bastion ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + src: "{{ env_ocp_client | default(https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz) }}" dest: /ocpinst/ remote_src: yes - name: Unzip OCP Installer tags: getocp,bastion ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + src: "{{ env_ocp_installer | default(https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz) }}" dest: /ocpinst/ remote_src: yes From 4d5559a8d9d2a76ede9701cf4ed9872153ed0fb1 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:00:05 -0500 Subject: [PATCH 309/885] Fixing setup scripts --- roles/ansible_setup/files/ansible-modules.sh | 5 +++++ roles/ansible_setup/tasks/main.yaml | 12 ++---------- 2 files changed, 7 insertions(+), 10 deletions(-) create mode 100644 roles/ansible_setup/files/ansible-modules.sh diff --git a/roles/ansible_setup/files/ansible-modules.sh b/roles/ansible_setup/files/ansible-modules.sh new file mode 100644 index 00000000..0093ef81 --- /dev/null +++ b/roles/ansible_setup/files/ansible-modules.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +ansible-galaxy collection install community.general +ansible-galaxy collection install community.crypto +ansible-galaxy collection install ansible.posix \ No newline at end of file diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 096f5a4f..24f4f553 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,16 +1,8 @@ --- -- name: install ansible.community.general collection for use later +- name: run shell script to download required ansible modules tags: setup - command: ansible-galaxy collection install community.general - -- name: install ansible.community.crypto collection for use later - tags: setup - command: ansible-galaxy collection install community.crypto - -- name: install ansible.posix collection for use later - tags: setup - command: ansible-galaxy collection install ansible.posix + ansible.builtin.script: ansible-modules.sh - name: Load in variables from env.yaml tags: setup From f5d9b5458389f370b22782184041fbb5ffbce7f6 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:00:05 -0500 Subject: [PATCH 310/885] Fixing setup scripts --- roles/ansible_setup/files/ansible-modules.sh | 5 +++++ roles/ansible_setup/tasks/main.yaml | 12 ++---------- 2 files changed, 7 insertions(+), 10 deletions(-) create mode 100644 roles/ansible_setup/files/ansible-modules.sh diff --git a/roles/ansible_setup/files/ansible-modules.sh b/roles/ansible_setup/files/ansible-modules.sh new file mode 100644 index 00000000..0093ef81 --- /dev/null +++ b/roles/ansible_setup/files/ansible-modules.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +ansible-galaxy collection install community.general +ansible-galaxy collection install community.crypto +ansible-galaxy collection install ansible.posix \ No newline at end of file diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 096f5a4f..24f4f553 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,16 +1,8 @@ --- -- name: install ansible.community.general collection for use later +- name: run shell script to download required ansible modules tags: setup - command: ansible-galaxy collection install community.general - -- name: install ansible.community.crypto collection for use later - tags: setup - command: ansible-galaxy collection install community.crypto - -- name: install ansible.posix collection for use later - tags: setup - command: ansible-galaxy collection install ansible.posix + ansible.builtin.script: ansible-modules.sh - name: Load in variables from env.yaml tags: setup From 06c8d5756bc07fb3abc0c77c92320020137144b9 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:16:15 -0500 Subject: [PATCH 311/885] Updated ansible setup to use scripts for loading modules --- README.md | 1 + .../files/ansible-modules.sh => ansible-setup.sh | 0 roles/ansible_setup/tasks/main.yaml | 4 ++-- 3 files changed, 3 insertions(+), 2 deletions(-) rename roles/ansible_setup/files/ansible-modules.sh => ansible-setup.sh (100%) diff --git a/README.md b/README.md index 47b6b903..ca692c0d 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,7 @@ When you are ready: - * if you want to use alternate OCP client and installer links, swap them out in env.yaml (env_ocp_client and env_ocp_client) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. +* Step 4: Run ansible-modules.sh in the main directory to download required Ansible modules * Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. diff --git a/roles/ansible_setup/files/ansible-modules.sh b/ansible-setup.sh similarity index 100% rename from roles/ansible_setup/files/ansible-modules.sh rename to ansible-setup.sh diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 24f4f553..a6005e4c 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,8 +1,8 @@ --- -- name: run shell script to download required ansible modules +- name: run ansible-setup.sh shell script to download required ansible modules tags: setup - ansible.builtin.script: ansible-modules.sh + ansible.builtin.script: roles/ansible_setup/files/ansible-setup.sh - name: Load in variables from env.yaml tags: setup From bcdc1cc12bb722a43e6ad4e44d9a22a71710d01a Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:16:15 -0500 Subject: [PATCH 312/885] Updated ansible setup to use scripts for loading modules --- README.md | 1 + .../files/ansible-modules.sh => ansible-setup.sh | 0 roles/ansible_setup/tasks/main.yaml | 4 ++-- 3 files changed, 3 insertions(+), 2 deletions(-) rename roles/ansible_setup/files/ansible-modules.sh => ansible-setup.sh (100%) diff --git a/README.md b/README.md index 47b6b903..ca692c0d 100644 --- a/README.md +++ b/README.md @@ -32,6 +32,7 @@ When you are ready: - * if you want to use alternate OCP client and installer links, swap them out in env.yaml (env_ocp_client and env_ocp_client) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. +* Step 4: Run ansible-modules.sh in the main directory to download required Ansible modules * Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. diff --git a/roles/ansible_setup/files/ansible-modules.sh b/ansible-setup.sh similarity index 100% rename from roles/ansible_setup/files/ansible-modules.sh rename to ansible-setup.sh diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 24f4f553..a6005e4c 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,8 +1,8 @@ --- -- name: run shell script to download required ansible modules +- name: run ansible-setup.sh shell script to download required ansible modules tags: setup - ansible.builtin.script: ansible-modules.sh + ansible.builtin.script: roles/ansible_setup/files/ansible-setup.sh - name: Load in variables from env.yaml tags: setup From f402484e99e7a702f8a46afe19ec95261eb072f1 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:20:00 -0500 Subject: [PATCH 313/885] Adding libvirt to ansible-setup shell script --- ansible-setup.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible-setup.sh b/ansible-setup.sh index 0093ef81..72596147 100644 --- a/ansible-setup.sh +++ b/ansible-setup.sh @@ -2,4 +2,5 @@ ansible-galaxy collection install community.general ansible-galaxy collection install community.crypto -ansible-galaxy collection install ansible.posix \ No newline at end of file +ansible-galaxy collection install ansible.posix +ansible-galaxy collection install community.libvirt \ No newline at end of file From 985550a37a5dc0f274756914b3b784546964a8c8 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:20:00 -0500 Subject: [PATCH 314/885] Adding libvirt to ansible-setup shell script --- ansible-setup.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ansible-setup.sh b/ansible-setup.sh index 0093ef81..72596147 100644 --- a/ansible-setup.sh +++ b/ansible-setup.sh @@ -2,4 +2,5 @@ ansible-galaxy collection install community.general ansible-galaxy collection install community.crypto -ansible-galaxy collection install ansible.posix \ No newline at end of file +ansible-galaxy collection install ansible.posix +ansible-galaxy collection install community.libvirt \ No newline at end of file From 6e078dc1821c30bb7911d2656487db22f6d4efe7 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:23:14 -0500 Subject: [PATCH 315/885] Fixed ansible playbook to take out shell script --- README.md | 2 +- roles/ansible_setup/tasks/main.yaml | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/README.md b/README.md index ca692c0d..dd681e6f 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ When you are ready: - * if you want to use alternate OCP client and installer links, swap them out in env.yaml (env_ocp_client and env_ocp_client) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. -* Step 4: Run ansible-modules.sh in the main directory to download required Ansible modules +* Step 4: Run ansible-modules.sh in the main directory to download required Ansible modules. First change permissions by running "chmod * Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index a6005e4c..c08e1f0d 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,9 +1,5 @@ --- -- name: run ansible-setup.sh shell script to download required ansible modules - tags: setup - ansible.builtin.script: roles/ansible_setup/files/ansible-setup.sh - - name: Load in variables from env.yaml tags: setup include_vars: env.yaml From 5e1371a485132e502b617ba766e33bfdc404060a Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:23:14 -0500 Subject: [PATCH 316/885] Fixed ansible playbook to take out shell script --- README.md | 2 +- roles/ansible_setup/tasks/main.yaml | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/README.md b/README.md index ca692c0d..dd681e6f 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ When you are ready: - * if you want to use alternate OCP client and installer links, swap them out in env.yaml (env_ocp_client and env_ocp_client) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. -* Step 4: Run ansible-modules.sh in the main directory to download required Ansible modules +* Step 4: Run ansible-modules.sh in the main directory to download required Ansible modules. First change permissions by running "chmod * Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index a6005e4c..c08e1f0d 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -1,9 +1,5 @@ --- -- name: run ansible-setup.sh shell script to download required ansible modules - tags: setup - ansible.builtin.script: roles/ansible_setup/files/ansible-setup.sh - - name: Load in variables from env.yaml tags: setup include_vars: env.yaml From 3be814c86086725d9d6b4c62d0e281e3a5b9ab81 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:30:06 -0500 Subject: [PATCH 317/885] Gather facts false on localhost setup --- main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.yaml b/main.yaml index ada15b80..3842b0c2 100644 --- a/main.yaml +++ b/main.yaml @@ -3,7 +3,7 @@ - hosts: localhost tags: localhost connection: local - become: true + become: false gather_facts: no vars_files: - env.yaml From af07f2c3ddd979b794711dd1abe780d30b8bb6ff Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:30:06 -0500 Subject: [PATCH 318/885] Gather facts false on localhost setup --- main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.yaml b/main.yaml index ada15b80..3842b0c2 100644 --- a/main.yaml +++ b/main.yaml @@ -3,7 +3,7 @@ - hosts: localhost tags: localhost connection: local - become: true + become: false gather_facts: no vars_files: - env.yaml From 5e1e254bc52f3c6a8d00be7fff85bca370cd610a Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:40:59 -0500 Subject: [PATCH 319/885] Added quotes around host os family conditional --- main.yaml | 4 ++-- roles/ansible_setup/tasks/main.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/main.yaml b/main.yaml index 3842b0c2..99364600 100644 --- a/main.yaml +++ b/main.yaml @@ -38,7 +38,8 @@ vars: - ssh_target_ip: "{{ env_ip_bastion }}" roles: - - ssh_copy_id + - ssh_copy_id # to connect to bastion + - ssh-ocp-key-gen # for bastion to connect to nodes - hosts: bastion tags: bastion @@ -55,7 +56,6 @@ #- dns #- haproxy #- httpd - - ssh-ocp-key-gen - get-ocp - hosts: kvm_host diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index c08e1f0d..fb5a9dda 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -136,7 +136,7 @@ - ssh-copy-id state: latest update_cache: yes - when: ansible_os_family == RedHat or ansible_os_family == Debian + when: ansible_os_family == 'RedHat' or ansible_os_family == 'Debian' - name: Install ssh packages on Mac machine via homebrew tags: setup @@ -146,4 +146,4 @@ - ssh-copy-id state: latest update_homebrew: yes - when: ansible_os_family == Darwin \ No newline at end of file + when: ansible_os_family == 'Darwin' \ No newline at end of file From e0bcc0f9fbeaf4b707647883b4c00f3c81baa771 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:40:59 -0500 Subject: [PATCH 320/885] Added quotes around host os family conditional --- main.yaml | 4 ++-- roles/ansible_setup/tasks/main.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/main.yaml b/main.yaml index 3842b0c2..99364600 100644 --- a/main.yaml +++ b/main.yaml @@ -38,7 +38,8 @@ vars: - ssh_target_ip: "{{ env_ip_bastion }}" roles: - - ssh_copy_id + - ssh_copy_id # to connect to bastion + - ssh-ocp-key-gen # for bastion to connect to nodes - hosts: bastion tags: bastion @@ -55,7 +56,6 @@ #- dns #- haproxy #- httpd - - ssh-ocp-key-gen - get-ocp - hosts: kvm_host diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index c08e1f0d..fb5a9dda 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -136,7 +136,7 @@ - ssh-copy-id state: latest update_cache: yes - when: ansible_os_family == RedHat or ansible_os_family == Debian + when: ansible_os_family == 'RedHat' or ansible_os_family == 'Debian' - name: Install ssh packages on Mac machine via homebrew tags: setup @@ -146,4 +146,4 @@ - ssh-copy-id state: latest update_homebrew: yes - when: ansible_os_family == Darwin \ No newline at end of file + when: ansible_os_family == 'Darwin' \ No newline at end of file From a5c687955737b59dcda4fb2fe6563ed88b296225 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:43:40 -0500 Subject: [PATCH 321/885] Added become: yes to ansible-setup main.yaml tasks --- roles/ansible_setup/tasks/main.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index fb5a9dda..b5a32396 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -136,6 +136,7 @@ - ssh-copy-id state: latest update_cache: yes + become: yes when: ansible_os_family == 'RedHat' or ansible_os_family == 'Debian' - name: Install ssh packages on Mac machine via homebrew @@ -146,4 +147,5 @@ - ssh-copy-id state: latest update_homebrew: yes + become: yes when: ansible_os_family == 'Darwin' \ No newline at end of file From cd133d3e2ac48dacd1ffca40b73c61e645c51db3 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:43:40 -0500 Subject: [PATCH 322/885] Added become: yes to ansible-setup main.yaml tasks --- roles/ansible_setup/tasks/main.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index fb5a9dda..b5a32396 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -136,6 +136,7 @@ - ssh-copy-id state: latest update_cache: yes + become: yes when: ansible_os_family == 'RedHat' or ansible_os_family == 'Debian' - name: Install ssh packages on Mac machine via homebrew @@ -146,4 +147,5 @@ - ssh-copy-id state: latest update_homebrew: yes + become: yes when: ansible_os_family == 'Darwin' \ No newline at end of file From 950fd880fa8b469ec50fdd2bcbba975f3718fdf6 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:47:41 -0500 Subject: [PATCH 323/885] Took out become yes --- roles/ansible_setup/tasks/main.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index b5a32396..fb5a9dda 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -136,7 +136,6 @@ - ssh-copy-id state: latest update_cache: yes - become: yes when: ansible_os_family == 'RedHat' or ansible_os_family == 'Debian' - name: Install ssh packages on Mac machine via homebrew @@ -147,5 +146,4 @@ - ssh-copy-id state: latest update_homebrew: yes - become: yes when: ansible_os_family == 'Darwin' \ No newline at end of file From 3289028b605dc4fb27cd4d4e30c73867edc24c1d Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 15:47:41 -0500 Subject: [PATCH 324/885] Took out become yes --- roles/ansible_setup/tasks/main.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index b5a32396..fb5a9dda 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -136,7 +136,6 @@ - ssh-copy-id state: latest update_cache: yes - become: yes when: ansible_os_family == 'RedHat' or ansible_os_family == 'Debian' - name: Install ssh packages on Mac machine via homebrew @@ -147,5 +146,4 @@ - ssh-copy-id state: latest update_homebrew: yes - become: yes when: ansible_os_family == 'Darwin' \ No newline at end of file From e8a0764999939b186d842679790a944c89c4d860 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 16:01:56 -0500 Subject: [PATCH 325/885] Added two setup scripts for linux and mac --- README.md | 3 +-- ansible-setup.sh => ansible-setup-linux.sh | 5 ++++- ansible-setup-mac.sh | 10 ++++++++++ roles/ansible_setup/tasks/main.yaml | 2 +- 4 files changed, 16 insertions(+), 4 deletions(-) rename ansible-setup.sh => ansible-setup-linux.sh (56%) create mode 100644 ansible-setup-mac.sh diff --git a/README.md b/README.md index dd681e6f..4bc129f6 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,6 @@ Pre-requisites: * If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: * homebrew package manager installed ( how-to: https://brew.sh/ ) * Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) - * sshpass package ( run "brew install esolitos/ipa/sshpass" in your terminal ) * A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: * 6 Integrated Facilities for Linux (IFLs) * 75 GB of RAM @@ -32,7 +31,7 @@ When you are ready: - * if you want to use alternate OCP client and installer links, swap them out in env.yaml (env_ocp_client and env_ocp_client) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. -* Step 4: Run ansible-modules.sh in the main directory to download required Ansible modules. First change permissions by running "chmod +* Step 4: Run ansible-setup-linux/mac.sh in the main directory depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) * Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. diff --git a/ansible-setup.sh b/ansible-setup-linux.sh similarity index 56% rename from ansible-setup.sh rename to ansible-setup-linux.sh index 72596147..7c5ac00e 100644 --- a/ansible-setup.sh +++ b/ansible-setup-linux.sh @@ -3,4 +3,7 @@ ansible-galaxy collection install community.general ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix -ansible-galaxy collection install community.libvirt \ No newline at end of file +ansible-galaxy collection install community.libvirt +dnf install sshpass -y +dnf install openssh -y +dnf install ssh-copy-id -y \ No newline at end of file diff --git a/ansible-setup-mac.sh b/ansible-setup-mac.sh new file mode 100644 index 00000000..3aa4f937 --- /dev/null +++ b/ansible-setup-mac.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +ansible-galaxy collection install community.general +ansible-galaxy collection install community.crypto +ansible-galaxy collection install ansible.posix +ansible-galaxy collection install community.libvirt + +brew install sshpass -y +brew install openssh -y +brew install ssh-copy-id -y \ No newline at end of file diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index fb5a9dda..268e04ea 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -146,4 +146,4 @@ - ssh-copy-id state: latest update_homebrew: yes - when: ansible_os_family == 'Darwin' \ No newline at end of file + when: ansible_os_family == 'Darwin' \ No newline at end of file From c43ac1b7f79cf93e17663ee351173c201c9ca960 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 16:01:56 -0500 Subject: [PATCH 326/885] Added two setup scripts for linux and mac --- README.md | 3 +-- ansible-setup.sh => ansible-setup-linux.sh | 5 ++++- ansible-setup-mac.sh | 10 ++++++++++ roles/ansible_setup/tasks/main.yaml | 2 +- 4 files changed, 16 insertions(+), 4 deletions(-) rename ansible-setup.sh => ansible-setup-linux.sh (56%) create mode 100644 ansible-setup-mac.sh diff --git a/README.md b/README.md index dd681e6f..4bc129f6 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,6 @@ Pre-requisites: * If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: * homebrew package manager installed ( how-to: https://brew.sh/ ) * Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) - * sshpass package ( run "brew install esolitos/ipa/sshpass" in your terminal ) * A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: * 6 Integrated Facilities for Linux (IFLs) * 75 GB of RAM @@ -32,7 +31,7 @@ When you are ready: - * if you want to use alternate OCP client and installer links, swap them out in env.yaml (env_ocp_client and env_ocp_client) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. -* Step 4: Run ansible-modules.sh in the main directory to download required Ansible modules. First change permissions by running "chmod +* Step 4: Run ansible-setup-linux/mac.sh in the main directory depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) * Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. diff --git a/ansible-setup.sh b/ansible-setup-linux.sh similarity index 56% rename from ansible-setup.sh rename to ansible-setup-linux.sh index 72596147..7c5ac00e 100644 --- a/ansible-setup.sh +++ b/ansible-setup-linux.sh @@ -3,4 +3,7 @@ ansible-galaxy collection install community.general ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix -ansible-galaxy collection install community.libvirt \ No newline at end of file +ansible-galaxy collection install community.libvirt +dnf install sshpass -y +dnf install openssh -y +dnf install ssh-copy-id -y \ No newline at end of file diff --git a/ansible-setup-mac.sh b/ansible-setup-mac.sh new file mode 100644 index 00000000..3aa4f937 --- /dev/null +++ b/ansible-setup-mac.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +ansible-galaxy collection install community.general +ansible-galaxy collection install community.crypto +ansible-galaxy collection install ansible.posix +ansible-galaxy collection install community.libvirt + +brew install sshpass -y +brew install openssh -y +brew install ssh-copy-id -y \ No newline at end of file diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index fb5a9dda..268e04ea 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -146,4 +146,4 @@ - ssh-copy-id state: latest update_homebrew: yes - when: ansible_os_family == 'Darwin' \ No newline at end of file + when: ansible_os_family == 'Darwin' \ No newline at end of file From 43c3cfaa3dcf71570478d79ab9bb26ee5216e777 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 16:03:24 -0500 Subject: [PATCH 327/885] deleted substituted tasks for shell script --- roles/ansible_setup/tasks/main.yaml | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 268e04ea..aafa9838 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -125,25 +125,4 @@ - name: Ansible generic setup to re-read inventory file after populated in previous tasks tags: setup - ansible.builtin.gather_facts: - -- name: Install ssh packages on Linux localhost machines - tags: setup - ansible.builtin.package: - name: - - sshpass - - openssh - - ssh-copy-id - state: latest - update_cache: yes - when: ansible_os_family == 'RedHat' or ansible_os_family == 'Debian' - -- name: Install ssh packages on Mac machine via homebrew - tags: setup - community.general.homebrew: - name: - - openssh - - ssh-copy-id - state: latest - update_homebrew: yes - when: ansible_os_family == 'Darwin' \ No newline at end of file + ansible.builtin.gather_facts: \ No newline at end of file From 7918d7f8dbcd22c6de766c4db2d7b5e1f54a81b3 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 16:03:24 -0500 Subject: [PATCH 328/885] deleted substituted tasks for shell script --- roles/ansible_setup/tasks/main.yaml | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 268e04ea..aafa9838 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -125,25 +125,4 @@ - name: Ansible generic setup to re-read inventory file after populated in previous tasks tags: setup - ansible.builtin.gather_facts: - -- name: Install ssh packages on Linux localhost machines - tags: setup - ansible.builtin.package: - name: - - sshpass - - openssh - - ssh-copy-id - state: latest - update_cache: yes - when: ansible_os_family == 'RedHat' or ansible_os_family == 'Debian' - -- name: Install ssh packages on Mac machine via homebrew - tags: setup - community.general.homebrew: - name: - - openssh - - ssh-copy-id - state: latest - update_homebrew: yes - when: ansible_os_family == 'Darwin' \ No newline at end of file + ansible.builtin.gather_facts: \ No newline at end of file From b4a6ebf8d250f245b4f754be039168798bc36c13 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 16:07:38 -0500 Subject: [PATCH 329/885] Adding sudo to shell scripts --- README.md | 2 +- ansible-setup-linux.sh | 6 +++--- ansible-setup-mac.sh | 7 +++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 4bc129f6..3686bc5a 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ When you are ready: - * if you want to use alternate OCP client and installer links, swap them out in env.yaml (env_ocp_client and env_ocp_client) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. -* Step 4: Run ansible-setup-linux/mac.sh in the main directory depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) +* Step 4: Run ansible-setup-linux/mac.sh shell script in the main directory (run /.) depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) * Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. diff --git a/ansible-setup-linux.sh b/ansible-setup-linux.sh index 7c5ac00e..b3a5086f 100644 --- a/ansible-setup-linux.sh +++ b/ansible-setup-linux.sh @@ -4,6 +4,6 @@ ansible-galaxy collection install community.general ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix ansible-galaxy collection install community.libvirt -dnf install sshpass -y -dnf install openssh -y -dnf install ssh-copy-id -y \ No newline at end of file +sudo dnf install sshpass -y +sudo dnf install openssh -y +sudo dnf install ssh-copy-id -y \ No newline at end of file diff --git a/ansible-setup-mac.sh b/ansible-setup-mac.sh index 3aa4f937..06eacc19 100644 --- a/ansible-setup-mac.sh +++ b/ansible-setup-mac.sh @@ -4,7 +4,6 @@ ansible-galaxy collection install community.general ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix ansible-galaxy collection install community.libvirt - -brew install sshpass -y -brew install openssh -y -brew install ssh-copy-id -y \ No newline at end of file +sudo brew install sshpass -y +sudo brew install openssh -y +sudo brew install ssh-copy-id -y \ No newline at end of file From d0f56c249a09369c71184d57da4ea289f465d17f Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 16:07:38 -0500 Subject: [PATCH 330/885] Adding sudo to shell scripts --- README.md | 2 +- ansible-setup-linux.sh | 6 +++--- ansible-setup-mac.sh | 7 +++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 4bc129f6..3686bc5a 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ When you are ready: - * if you want to use alternate OCP client and installer links, swap them out in env.yaml (env_ocp_client and env_ocp_client) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. -* Step 4: Run ansible-setup-linux/mac.sh in the main directory depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) +* Step 4: Run ansible-setup-linux/mac.sh shell script in the main directory (run /.) depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) * Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. diff --git a/ansible-setup-linux.sh b/ansible-setup-linux.sh index 7c5ac00e..b3a5086f 100644 --- a/ansible-setup-linux.sh +++ b/ansible-setup-linux.sh @@ -4,6 +4,6 @@ ansible-galaxy collection install community.general ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix ansible-galaxy collection install community.libvirt -dnf install sshpass -y -dnf install openssh -y -dnf install ssh-copy-id -y \ No newline at end of file +sudo dnf install sshpass -y +sudo dnf install openssh -y +sudo dnf install ssh-copy-id -y \ No newline at end of file diff --git a/ansible-setup-mac.sh b/ansible-setup-mac.sh index 3aa4f937..06eacc19 100644 --- a/ansible-setup-mac.sh +++ b/ansible-setup-mac.sh @@ -4,7 +4,6 @@ ansible-galaxy collection install community.general ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix ansible-galaxy collection install community.libvirt - -brew install sshpass -y -brew install openssh -y -brew install ssh-copy-id -y \ No newline at end of file +sudo brew install sshpass -y +sudo brew install openssh -y +sudo brew install ssh-copy-id -y \ No newline at end of file From 6ff97cc47d46cb3ce846106dd643fb1d465c3352 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 16:25:39 -0500 Subject: [PATCH 331/885] Took out path from copy-id task --- README.md | 4 +--- ansible-setup-linux.sh | 3 +-- ansible-setup-mac.sh | 3 +-- env.yaml | 2 -- roles/ansible_setup/tasks/main.yaml | 4 ---- roles/ssh_copy_id/tasks/main.yaml | 2 +- 6 files changed, 4 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 3686bc5a..16b427ea 100644 --- a/README.md +++ b/README.md @@ -22,16 +22,14 @@ Pre-requisites: When you are ready: * Step 1: Download this Git repository to a folder on your local computer * Step 2: Go to to download your local command line tools (oc and kubectl) and to copy the following OpenShift links to use in the next step: - - OCP installer - pull secret - RHCOS initramfs - RHCOS kernel - RHCOS rootfs - QCOW2 image - - * if you want to use alternate OCP client and installer links, swap them out in env.yaml (env_ocp_client and env_ocp_client) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. -* Step 4: Run ansible-setup-linux/mac.sh shell script in the main directory (run /.) depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) +* Step 4: Run ansible-setup-linux/mac.sh shell script in the main directory (run ./ansible-setup-linux/mac.sh in terminal) depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) * Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. diff --git a/ansible-setup-linux.sh b/ansible-setup-linux.sh index b3a5086f..64ac4cf6 100644 --- a/ansible-setup-linux.sh +++ b/ansible-setup-linux.sh @@ -5,5 +5,4 @@ ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix ansible-galaxy collection install community.libvirt sudo dnf install sshpass -y -sudo dnf install openssh -y -sudo dnf install ssh-copy-id -y \ No newline at end of file +sudo dnf install openssh -y \ No newline at end of file diff --git a/ansible-setup-mac.sh b/ansible-setup-mac.sh index 06eacc19..cb81cad9 100644 --- a/ansible-setup-mac.sh +++ b/ansible-setup-mac.sh @@ -5,5 +5,4 @@ ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix ansible-galaxy collection install community.libvirt sudo brew install sshpass -y -sudo brew install openssh -y -sudo brew install ssh-copy-id -y \ No newline at end of file +sudo brew install openssh -y \ No newline at end of file diff --git a/env.yaml b/env.yaml index 651d4427..fdd57705 100644 --- a/env.yaml +++ b/env.yaml @@ -18,8 +18,6 @@ env_rhcos_initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/depende env_rhcos_kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img env_rhcos_rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img env_qcow2_img: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz -env_ocp_client: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz -env_ocp_installer: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz # to fill inventory env_ip_kvm_host: 9.60.87.132 diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index aafa9838..1c9c1ed3 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -25,8 +25,6 @@ - env_rhcos_kernel - env_rhcos_rootfs - env_qcow2_img - - env_ocp_client - - env_ocp_installer - env_ip_kvm_host - env_ip_bastion - env_ip_bootstrap @@ -63,8 +61,6 @@ env_rhcos_kernel: "{{ env_rhcos_kernel }}" env_rhcos_rootfs: "{{ env_rhcos_rootfs }}" env_qcow2_img: "{{ env_qcow2_img }}" - env_ocp_client: "{{ env_ocp_client }}" - env_ocp_installer: "{{ env_ocp_installer }}" env_ip_kvm_host: "{{ env_ip_kvm_host }}" env_ip_bastion: "{{ env_ip_bastion }}" env_ip_bootstrap: "{{ env_ip_bootstrap }}" diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index c6208527..e87e40c7 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -2,7 +2,7 @@ - name: distribute the ssh key to a remote host tags: ssh,ssh-copy-id - shell: "/usr/local/bin/sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" + shell: "sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 From 3bd7eae3f1ec32dd0402d8c29058b508c6eb2f04 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 16:25:39 -0500 Subject: [PATCH 332/885] Took out path from copy-id task --- README.md | 4 +--- ansible-setup-linux.sh | 3 +-- ansible-setup-mac.sh | 3 +-- roles/ansible_setup/tasks/main.yaml | 4 ---- roles/ssh_copy_id/tasks/main.yaml | 2 +- 5 files changed, 4 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 3686bc5a..16b427ea 100644 --- a/README.md +++ b/README.md @@ -22,16 +22,14 @@ Pre-requisites: When you are ready: * Step 1: Download this Git repository to a folder on your local computer * Step 2: Go to to download your local command line tools (oc and kubectl) and to copy the following OpenShift links to use in the next step: - - OCP installer - pull secret - RHCOS initramfs - RHCOS kernel - RHCOS rootfs - QCOW2 image - - * if you want to use alternate OCP client and installer links, swap them out in env.yaml (env_ocp_client and env_ocp_client) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. -* Step 4: Run ansible-setup-linux/mac.sh shell script in the main directory (run /.) depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) +* Step 4: Run ansible-setup-linux/mac.sh shell script in the main directory (run ./ansible-setup-linux/mac.sh in terminal) depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) * Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. diff --git a/ansible-setup-linux.sh b/ansible-setup-linux.sh index b3a5086f..64ac4cf6 100644 --- a/ansible-setup-linux.sh +++ b/ansible-setup-linux.sh @@ -5,5 +5,4 @@ ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix ansible-galaxy collection install community.libvirt sudo dnf install sshpass -y -sudo dnf install openssh -y -sudo dnf install ssh-copy-id -y \ No newline at end of file +sudo dnf install openssh -y \ No newline at end of file diff --git a/ansible-setup-mac.sh b/ansible-setup-mac.sh index 06eacc19..cb81cad9 100644 --- a/ansible-setup-mac.sh +++ b/ansible-setup-mac.sh @@ -5,5 +5,4 @@ ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix ansible-galaxy collection install community.libvirt sudo brew install sshpass -y -sudo brew install openssh -y -sudo brew install ssh-copy-id -y \ No newline at end of file +sudo brew install openssh -y \ No newline at end of file diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index aafa9838..1c9c1ed3 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -25,8 +25,6 @@ - env_rhcos_kernel - env_rhcos_rootfs - env_qcow2_img - - env_ocp_client - - env_ocp_installer - env_ip_kvm_host - env_ip_bastion - env_ip_bootstrap @@ -63,8 +61,6 @@ env_rhcos_kernel: "{{ env_rhcos_kernel }}" env_rhcos_rootfs: "{{ env_rhcos_rootfs }}" env_qcow2_img: "{{ env_qcow2_img }}" - env_ocp_client: "{{ env_ocp_client }}" - env_ocp_installer: "{{ env_ocp_installer }}" env_ip_kvm_host: "{{ env_ip_kvm_host }}" env_ip_bastion: "{{ env_ip_bastion }}" env_ip_bootstrap: "{{ env_ip_bootstrap }}" diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index c6208527..e87e40c7 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -2,7 +2,7 @@ - name: distribute the ssh key to a remote host tags: ssh,ssh-copy-id - shell: "/usr/local/bin/sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" + shell: "sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 From a3bdde0da0ee151a8d94b5f208fb8eb713f1f309 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 16:58:54 -0500 Subject: [PATCH 333/885] Changed variable reference in ssh-ocp-key-gen playbook --- roles/ssh-ocp-key-gen/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index 4f0912a3..81e69a89 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -52,7 +52,7 @@ - name: set public key fact set_fact: - env-ocp-pub: '{{ public_key.env_ocp_pub }}' + env-ocp-pub: '{{ public_key }}' delegate_to: '{{ item }}' with_items: '{{ groups["bastion"] }}' From 1f424e3dbbae373a2485321e0ced64537b10f7c5 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 23 Aug 2021 16:58:54 -0500 Subject: [PATCH 334/885] Changed variable reference in ssh-ocp-key-gen playbook --- roles/ssh-ocp-key-gen/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index 4f0912a3..81e69a89 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -52,7 +52,7 @@ - name: set public key fact set_fact: - env-ocp-pub: '{{ public_key.env_ocp_pub }}' + env-ocp-pub: '{{ public_key }}' delegate_to: '{{ item }}' with_items: '{{ groups["bastion"] }}' From 79938b9871f565445e5dc8e90505cedad24a1572 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 24 Aug 2021 09:15:04 -0500 Subject: [PATCH 335/885] Bug fixes for the implementation of variables --- ansible-setup-mac.sh | 4 +- group_vars/all/main.yaml | 16 ++++++-- .../all/main.yaml.1782.2021-08-23@17:21:08~ | 40 +++++++++++++++++++ main.yaml | 16 ++++---- roles/enable_packages/tasks/main.yaml | 7 ++-- roles/mount_rhel/tasks/main.yaml | 14 ++++++- roles/ssh-ocp-key-gen/tasks/main.yaml | 4 +- roles/ssh_copy_id/tasks/main.yaml | 2 +- 8 files changed, 82 insertions(+), 21 deletions(-) mode change 100644 => 100755 ansible-setup-mac.sh create mode 100644 group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ diff --git a/ansible-setup-mac.sh b/ansible-setup-mac.sh old mode 100644 new mode 100755 index cb81cad9..ecdfe1ad --- a/ansible-setup-mac.sh +++ b/ansible-setup-mac.sh @@ -4,5 +4,5 @@ ansible-galaxy collection install community.general ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix ansible-galaxy collection install community.libvirt -sudo brew install sshpass -y -sudo brew install openssh -y \ No newline at end of file +brew install sshpass -y +brew install openssh -y \ No newline at end of file diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml index cdfa933b..d4931867 100644 --- a/group_vars/all/main.yaml +++ b/group_vars/all/main.yaml @@ -12,8 +12,14 @@ env_host_prefix: 23 #default 23 for now env_network_type: OpenShiftSDN #set default OpenShiftSDN env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 env_fips: false #true or false, set default false + env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -env_sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' + +# Install links from: https://console.redhat.com/openshift/install/ibmz/user-provisioned to be used in getting OpenShift files +env_rhcos_initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img +env_rhcos_kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img +env_rhcos_rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img +env_qcow2_img: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz # to fill inventory env_ip_kvm_host: 9.60.87.132 @@ -26,13 +32,17 @@ env_ip_compute_0: 9.60.87.134 env_ip_compute_1: 9.60.87.135 # ssh -env_ssh_username: jacob #Username to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. -env_ssh_pass: ibmzrocks #Password to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_username: jacob #Username to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. # Ansible passwordless ssh env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) +# OpenShift cluster's ssh key pair filename +env_ssh_ocp_name: ocp +env_ssh_ocp_pass: ibmzrocks + # networking dns_nameserver: 9.60.87.139 default_gateway: 9.60.86.1 diff --git a/group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ b/group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ new file mode 100644 index 00000000..cdfa933b --- /dev/null +++ b/group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ @@ -0,0 +1,40 @@ +# will populate with ansible_setup playbook +# BEGIN ANSIBLE MANAGED BLOCK + +# to populate install_config +env_baseDomain: ocpz.wsclab.endicott.ibm.com +env_compute_arch: s390x #default to s390x +env_control_count: 3 #default 3 +env_control_arch: s390x #default s390x +env_metadata_name: distribution +env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now +env_host_prefix: 23 #default 23 for now +env_network_type: OpenShiftSDN #set default OpenShiftSDN +env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 +env_fips: false #true or false, set default false +env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +env_sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' + +# to fill inventory +env_ip_kvm_host: 9.60.87.132 +env_ip_bastion: 9.60.87.139 +env_ip_bootstrap: 9.60.87.133 +env_ip_control_0: 9.60.87.136 +env_ip_control_1: 9.60.87.137 +env_ip_control_2: 9.60.87.138 +env_ip_compute_0: 9.60.87.134 +env_ip_compute_1: 9.60.87.135 + +# ssh +env_ssh_username: jacob #Username to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_pass: ibmzrocks #Password to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. + +# Ansible passwordless ssh +env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible +env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) + +# networking +dns_nameserver: 9.60.87.139 +default_gateway: 9.60.86.1 +netmask: 255.255.254.0 +# END ANSIBLE MANAGED BLOCK diff --git a/main.yaml b/main.yaml index 99364600..627cc7a5 100644 --- a/main.yaml +++ b/main.yaml @@ -28,7 +28,7 @@ - enable_packages - macvtap - mount_rhel - - create_bastion + #- create_bastion - hosts: localhost tags: localhost,bastion @@ -49,13 +49,13 @@ vars: - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - #- check_ssh - #- install_packages - #- set_firewall - #- set_selinux_permissive - #- dns - #- haproxy - #- httpd + - check_ssh + - install_packages + - set_firewall + - set_selinux_permissive + - dns + - haproxy + - httpd - get-ocp - hosts: kvm_host diff --git a/roles/enable_packages/tasks/main.yaml b/roles/enable_packages/tasks/main.yaml index 7d87e5df..0cf851a7 100644 --- a/roles/enable_packages/tasks/main.yaml +++ b/roles/enable_packages/tasks/main.yaml @@ -2,7 +2,6 @@ - name: enable packages tags: kvm_host - ansible.builtin.service: - name: - - libvirtd - state: enabled \ No newline at end of file + service: + name: libvirtd + state: started \ No newline at end of file diff --git a/roles/mount_rhel/tasks/main.yaml b/roles/mount_rhel/tasks/main.yaml index b62d64ef..b86b3454 100644 --- a/roles/mount_rhel/tasks/main.yaml +++ b/roles/mount_rhel/tasks/main.yaml @@ -1,5 +1,17 @@ --- +- name: Check to see if local .ssh directory exists + tags: keymastr + stat: + path: "/rhcos-install/" + register: rhcos_mount + +- name: Print results of .ssh directory check + tags: keymastr + debug: + var: rhcos_mount + - name: Mount red hat core os install directory tags: kvm_host - command: mount -o loop /var/lib/libvirt/images/rhel83.iso /rhcos-install/ \ No newline at end of file + command: mount -o loop /var/lib/libvirt/images/rhel83.iso /rhcos-install/ + when: rhcos_mount is defined and rhcos_mount.stat.exists == false \ No newline at end of file diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index 81e69a89..12cbe55e 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -47,12 +47,12 @@ community.crypto.openssh_keypair: path: ~/.ssh/{{ env_ssh_ocp_name }} passphrase: "{{ env_ssh_ocp_pass }}" - register: public_key + register: ssh_ocp when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: set public key fact set_fact: - env-ocp-pub: '{{ public_key }}' + env-ocp-pub: '{{ ssh_ocp.public_key }}' delegate_to: '{{ item }}' with_items: '{{ groups["bastion"] }}' diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index e87e40c7..7ad3e169 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -2,7 +2,7 @@ - name: distribute the ssh key to a remote host tags: ssh,ssh-copy-id - shell: "sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" + shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 From 8754f7d30c4f1433af004f9ac147a3d599be10bd Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 24 Aug 2021 09:15:04 -0500 Subject: [PATCH 336/885] Bug fixes for the implementation of variables --- ansible-setup-mac.sh | 4 +- group_vars/all/main.yaml | 16 ++++++-- .../all/main.yaml.1782.2021-08-23@17:21:08~ | 40 +++++++++++++++++++ main.yaml | 16 ++++---- roles/enable_packages/tasks/main.yaml | 7 ++-- roles/mount_rhel/tasks/main.yaml | 14 ++++++- roles/ssh-ocp-key-gen/tasks/main.yaml | 4 +- roles/ssh_copy_id/tasks/main.yaml | 2 +- 8 files changed, 82 insertions(+), 21 deletions(-) mode change 100644 => 100755 ansible-setup-mac.sh create mode 100644 group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ diff --git a/ansible-setup-mac.sh b/ansible-setup-mac.sh old mode 100644 new mode 100755 index cb81cad9..ecdfe1ad --- a/ansible-setup-mac.sh +++ b/ansible-setup-mac.sh @@ -4,5 +4,5 @@ ansible-galaxy collection install community.general ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix ansible-galaxy collection install community.libvirt -sudo brew install sshpass -y -sudo brew install openssh -y \ No newline at end of file +brew install sshpass -y +brew install openssh -y \ No newline at end of file diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml index cdfa933b..d4931867 100644 --- a/group_vars/all/main.yaml +++ b/group_vars/all/main.yaml @@ -12,8 +12,14 @@ env_host_prefix: 23 #default 23 for now env_network_type: OpenShiftSDN #set default OpenShiftSDN env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 env_fips: false #true or false, set default false + env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -env_sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' + +# Install links from: https://console.redhat.com/openshift/install/ibmz/user-provisioned to be used in getting OpenShift files +env_rhcos_initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img +env_rhcos_kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img +env_rhcos_rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img +env_qcow2_img: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz # to fill inventory env_ip_kvm_host: 9.60.87.132 @@ -26,13 +32,17 @@ env_ip_compute_0: 9.60.87.134 env_ip_compute_1: 9.60.87.135 # ssh -env_ssh_username: jacob #Username to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. -env_ssh_pass: ibmzrocks #Password to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_username: jacob #Username to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. # Ansible passwordless ssh env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) +# OpenShift cluster's ssh key pair filename +env_ssh_ocp_name: ocp +env_ssh_ocp_pass: ibmzrocks + # networking dns_nameserver: 9.60.87.139 default_gateway: 9.60.86.1 diff --git a/group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ b/group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ new file mode 100644 index 00000000..cdfa933b --- /dev/null +++ b/group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ @@ -0,0 +1,40 @@ +# will populate with ansible_setup playbook +# BEGIN ANSIBLE MANAGED BLOCK + +# to populate install_config +env_baseDomain: ocpz.wsclab.endicott.ibm.com +env_compute_arch: s390x #default to s390x +env_control_count: 3 #default 3 +env_control_arch: s390x #default s390x +env_metadata_name: distribution +env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now +env_host_prefix: 23 #default 23 for now +env_network_type: OpenShiftSDN #set default OpenShiftSDN +env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 +env_fips: false #true or false, set default false +env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' +env_sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' + +# to fill inventory +env_ip_kvm_host: 9.60.87.132 +env_ip_bastion: 9.60.87.139 +env_ip_bootstrap: 9.60.87.133 +env_ip_control_0: 9.60.87.136 +env_ip_control_1: 9.60.87.137 +env_ip_control_2: 9.60.87.138 +env_ip_compute_0: 9.60.87.134 +env_ip_compute_1: 9.60.87.135 + +# ssh +env_ssh_username: jacob #Username to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_pass: ibmzrocks #Password to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. + +# Ansible passwordless ssh +env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible +env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) + +# networking +dns_nameserver: 9.60.87.139 +default_gateway: 9.60.86.1 +netmask: 255.255.254.0 +# END ANSIBLE MANAGED BLOCK diff --git a/main.yaml b/main.yaml index 99364600..627cc7a5 100644 --- a/main.yaml +++ b/main.yaml @@ -28,7 +28,7 @@ - enable_packages - macvtap - mount_rhel - - create_bastion + #- create_bastion - hosts: localhost tags: localhost,bastion @@ -49,13 +49,13 @@ vars: - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - #- check_ssh - #- install_packages - #- set_firewall - #- set_selinux_permissive - #- dns - #- haproxy - #- httpd + - check_ssh + - install_packages + - set_firewall + - set_selinux_permissive + - dns + - haproxy + - httpd - get-ocp - hosts: kvm_host diff --git a/roles/enable_packages/tasks/main.yaml b/roles/enable_packages/tasks/main.yaml index 7d87e5df..0cf851a7 100644 --- a/roles/enable_packages/tasks/main.yaml +++ b/roles/enable_packages/tasks/main.yaml @@ -2,7 +2,6 @@ - name: enable packages tags: kvm_host - ansible.builtin.service: - name: - - libvirtd - state: enabled \ No newline at end of file + service: + name: libvirtd + state: started \ No newline at end of file diff --git a/roles/mount_rhel/tasks/main.yaml b/roles/mount_rhel/tasks/main.yaml index b62d64ef..b86b3454 100644 --- a/roles/mount_rhel/tasks/main.yaml +++ b/roles/mount_rhel/tasks/main.yaml @@ -1,5 +1,17 @@ --- +- name: Check to see if local .ssh directory exists + tags: keymastr + stat: + path: "/rhcos-install/" + register: rhcos_mount + +- name: Print results of .ssh directory check + tags: keymastr + debug: + var: rhcos_mount + - name: Mount red hat core os install directory tags: kvm_host - command: mount -o loop /var/lib/libvirt/images/rhel83.iso /rhcos-install/ \ No newline at end of file + command: mount -o loop /var/lib/libvirt/images/rhel83.iso /rhcos-install/ + when: rhcos_mount is defined and rhcos_mount.stat.exists == false \ No newline at end of file diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index 81e69a89..12cbe55e 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -47,12 +47,12 @@ community.crypto.openssh_keypair: path: ~/.ssh/{{ env_ssh_ocp_name }} passphrase: "{{ env_ssh_ocp_pass }}" - register: public_key + register: ssh_ocp when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: set public key fact set_fact: - env-ocp-pub: '{{ public_key }}' + env-ocp-pub: '{{ ssh_ocp.public_key }}' delegate_to: '{{ item }}' with_items: '{{ groups["bastion"] }}' diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index e87e40c7..7ad3e169 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -2,7 +2,7 @@ - name: distribute the ssh key to a remote host tags: ssh,ssh-copy-id - shell: "sshpass -p \"{{ env_ssh_pass }}\" ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" + shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution failed_when: ssh_copy_id_execution.rc != 0 From 028304ab3b08cbe1781f01d7da4797855e9116f9 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 24 Aug 2021 09:30:38 -0500 Subject: [PATCH 337/885] Deleted group_vars/all/main.yaml temporary backup file --- README.md | 14 +++---- ansible-setup-mac.sh | 3 +- .../all/main.yaml.1782.2021-08-23@17:21:08~ | 40 ------------------- 3 files changed, 8 insertions(+), 49 deletions(-) delete mode 100644 group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ diff --git a/README.md b/README.md index 16b427ea..b4ecea30 100644 --- a/README.md +++ b/README.md @@ -22,11 +22,11 @@ Pre-requisites: When you are ready: * Step 1: Download this Git repository to a folder on your local computer * Step 2: Go to to download your local command line tools (oc and kubectl) and to copy the following OpenShift links to use in the next step: - - pull secret - - RHCOS initramfs - - RHCOS kernel - - RHCOS rootfs - - QCOW2 image + * pull secret + * RHCOS initramfs + * RHCOS kernel + * RHCOS rootfs + * QCOW2 image * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. * Step 4: Run ansible-setup-linux/mac.sh shell script in the main directory (run ./ansible-setup-linux/mac.sh in terminal) depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) @@ -38,11 +38,11 @@ When you are ready: - list options here - list options here * Step 7: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. -* Step 8: approve certs... need more detail +* Step 8: approve certs... need more detail here * Step 9: Shutdown and destroy bootstrap (or optionally convert bootstrap to worker node) * Step 8: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" -Step 9: ./openshift-install create cluster +* Step 9: ./openshift-install create cluster Tags: * setup = first-time setup of ansible diff --git a/ansible-setup-mac.sh b/ansible-setup-mac.sh index ecdfe1ad..f918d37a 100755 --- a/ansible-setup-mac.sh +++ b/ansible-setup-mac.sh @@ -4,5 +4,4 @@ ansible-galaxy collection install community.general ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix ansible-galaxy collection install community.libvirt -brew install sshpass -y -brew install openssh -y \ No newline at end of file +brew install openssh \ No newline at end of file diff --git a/group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ b/group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ deleted file mode 100644 index cdfa933b..00000000 --- a/group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ +++ /dev/null @@ -1,40 +0,0 @@ -# will populate with ansible_setup playbook -# BEGIN ANSIBLE MANAGED BLOCK - -# to populate install_config -env_baseDomain: ocpz.wsclab.endicott.ibm.com -env_compute_arch: s390x #default to s390x -env_control_count: 3 #default 3 -env_control_arch: s390x #default s390x -env_metadata_name: distribution -env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now -env_host_prefix: 23 #default 23 for now -env_network_type: OpenShiftSDN #set default OpenShiftSDN -env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 -env_fips: false #true or false, set default false -env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -env_sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' - -# to fill inventory -env_ip_kvm_host: 9.60.87.132 -env_ip_bastion: 9.60.87.139 -env_ip_bootstrap: 9.60.87.133 -env_ip_control_0: 9.60.87.136 -env_ip_control_1: 9.60.87.137 -env_ip_control_2: 9.60.87.138 -env_ip_compute_0: 9.60.87.134 -env_ip_compute_1: 9.60.87.135 - -# ssh -env_ssh_username: jacob #Username to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. -env_ssh_pass: ibmzrocks #Password to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. - -# Ansible passwordless ssh -env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible -env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) - -# networking -dns_nameserver: 9.60.87.139 -default_gateway: 9.60.86.1 -netmask: 255.255.254.0 -# END ANSIBLE MANAGED BLOCK From 52beba240db2c6f9e65343c1ee887dcb613cd349 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 24 Aug 2021 09:30:38 -0500 Subject: [PATCH 338/885] Deleted group_vars/all/main.yaml temporary backup file --- README.md | 14 +++---- ansible-setup-mac.sh | 3 +- .../all/main.yaml.1782.2021-08-23@17:21:08~ | 40 ------------------- 3 files changed, 8 insertions(+), 49 deletions(-) delete mode 100644 group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ diff --git a/README.md b/README.md index 16b427ea..b4ecea30 100644 --- a/README.md +++ b/README.md @@ -22,11 +22,11 @@ Pre-requisites: When you are ready: * Step 1: Download this Git repository to a folder on your local computer * Step 2: Go to to download your local command line tools (oc and kubectl) and to copy the following OpenShift links to use in the next step: - - pull secret - - RHCOS initramfs - - RHCOS kernel - - RHCOS rootfs - - QCOW2 image + * pull secret + * RHCOS initramfs + * RHCOS kernel + * RHCOS rootfs + * QCOW2 image * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. * Step 4: Run ansible-setup-linux/mac.sh shell script in the main directory (run ./ansible-setup-linux/mac.sh in terminal) depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) @@ -38,11 +38,11 @@ When you are ready: - list options here - list options here * Step 7: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. -* Step 8: approve certs... need more detail +* Step 8: approve certs... need more detail here * Step 9: Shutdown and destroy bootstrap (or optionally convert bootstrap to worker node) * Step 8: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" -Step 9: ./openshift-install create cluster +* Step 9: ./openshift-install create cluster Tags: * setup = first-time setup of ansible diff --git a/ansible-setup-mac.sh b/ansible-setup-mac.sh index ecdfe1ad..f918d37a 100755 --- a/ansible-setup-mac.sh +++ b/ansible-setup-mac.sh @@ -4,5 +4,4 @@ ansible-galaxy collection install community.general ansible-galaxy collection install community.crypto ansible-galaxy collection install ansible.posix ansible-galaxy collection install community.libvirt -brew install sshpass -y -brew install openssh -y \ No newline at end of file +brew install openssh \ No newline at end of file diff --git a/group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ b/group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ deleted file mode 100644 index cdfa933b..00000000 --- a/group_vars/all/main.yaml.1782.2021-08-23@17:21:08~ +++ /dev/null @@ -1,40 +0,0 @@ -# will populate with ansible_setup playbook -# BEGIN ANSIBLE MANAGED BLOCK - -# to populate install_config -env_baseDomain: ocpz.wsclab.endicott.ibm.com -env_compute_arch: s390x #default to s390x -env_control_count: 3 #default 3 -env_control_arch: s390x #default s390x -env_metadata_name: distribution -env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now -env_host_prefix: 23 #default 23 for now -env_network_type: OpenShiftSDN #set default OpenShiftSDN -env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 -env_fips: false #true or false, set default false -env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -env_sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' - -# to fill inventory -env_ip_kvm_host: 9.60.87.132 -env_ip_bastion: 9.60.87.139 -env_ip_bootstrap: 9.60.87.133 -env_ip_control_0: 9.60.87.136 -env_ip_control_1: 9.60.87.137 -env_ip_control_2: 9.60.87.138 -env_ip_compute_0: 9.60.87.134 -env_ip_compute_1: 9.60.87.135 - -# ssh -env_ssh_username: jacob #Username to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. -env_ssh_pass: ibmzrocks #Password to use for ssh to kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. - -# Ansible passwordless ssh -env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible -env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) - -# networking -dns_nameserver: 9.60.87.139 -default_gateway: 9.60.86.1 -netmask: 255.255.254.0 -# END ANSIBLE MANAGED BLOCK From 79d10a498cf1b9ebaa3c08b7fa426bcf30105b4b Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 24 Aug 2021 09:34:32 -0500 Subject: [PATCH 339/885] Deleted group_vars/all --- group_vars/all/main.yaml | 50 ---------------------------------------- 1 file changed, 50 deletions(-) delete mode 100644 group_vars/all/main.yaml diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml deleted file mode 100644 index d4931867..00000000 --- a/group_vars/all/main.yaml +++ /dev/null @@ -1,50 +0,0 @@ -# will populate with ansible_setup playbook -# BEGIN ANSIBLE MANAGED BLOCK - -# to populate install_config -env_baseDomain: ocpz.wsclab.endicott.ibm.com -env_compute_arch: s390x #default to s390x -env_control_count: 3 #default 3 -env_control_arch: s390x #default s390x -env_metadata_name: distribution -env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now -env_host_prefix: 23 #default 23 for now -env_network_type: OpenShiftSDN #set default OpenShiftSDN -env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 -env_fips: false #true or false, set default false - -env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' - -# Install links from: https://console.redhat.com/openshift/install/ibmz/user-provisioned to be used in getting OpenShift files -env_rhcos_initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img -env_rhcos_kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img -env_rhcos_rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img -env_qcow2_img: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - -# to fill inventory -env_ip_kvm_host: 9.60.87.132 -env_ip_bastion: 9.60.87.139 -env_ip_bootstrap: 9.60.87.133 -env_ip_control_0: 9.60.87.136 -env_ip_control_1: 9.60.87.137 -env_ip_control_2: 9.60.87.138 -env_ip_compute_0: 9.60.87.134 -env_ip_compute_1: 9.60.87.135 - -# ssh -env_ssh_username: jacob #Username to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. -env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. - -# Ansible passwordless ssh -env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible -env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) - -# OpenShift cluster's ssh key pair filename -env_ssh_ocp_name: ocp -env_ssh_ocp_pass: ibmzrocks - -# networking -dns_nameserver: 9.60.87.139 -default_gateway: 9.60.86.1 -netmask: 255.255.254.0 -# END ANSIBLE MANAGED BLOCK From a2ebda50de496b692253c87dace4e349a226152c Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 24 Aug 2021 09:34:32 -0500 Subject: [PATCH 340/885] Deleted group_vars/all --- group_vars/all/main.yaml | 50 ---------------------------------------- 1 file changed, 50 deletions(-) delete mode 100644 group_vars/all/main.yaml diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml deleted file mode 100644 index d4931867..00000000 --- a/group_vars/all/main.yaml +++ /dev/null @@ -1,50 +0,0 @@ -# will populate with ansible_setup playbook -# BEGIN ANSIBLE MANAGED BLOCK - -# to populate install_config -env_baseDomain: ocpz.wsclab.endicott.ibm.com -env_compute_arch: s390x #default to s390x -env_control_count: 3 #default 3 -env_control_arch: s390x #default s390x -env_metadata_name: distribution -env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now -env_host_prefix: 23 #default 23 for now -env_network_type: OpenShiftSDN #set default OpenShiftSDN -env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 -env_fips: false #true or false, set default false - -env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' - -# Install links from: https://console.redhat.com/openshift/install/ibmz/user-provisioned to be used in getting OpenShift files -env_rhcos_initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img -env_rhcos_kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img -env_rhcos_rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img -env_qcow2_img: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - -# to fill inventory -env_ip_kvm_host: 9.60.87.132 -env_ip_bastion: 9.60.87.139 -env_ip_bootstrap: 9.60.87.133 -env_ip_control_0: 9.60.87.136 -env_ip_control_1: 9.60.87.137 -env_ip_control_2: 9.60.87.138 -env_ip_compute_0: 9.60.87.134 -env_ip_compute_1: 9.60.87.135 - -# ssh -env_ssh_username: jacob #Username to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. -env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. - -# Ansible passwordless ssh -env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible -env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) - -# OpenShift cluster's ssh key pair filename -env_ssh_ocp_name: ocp -env_ssh_ocp_pass: ibmzrocks - -# networking -dns_nameserver: 9.60.87.139 -default_gateway: 9.60.86.1 -netmask: 255.255.254.0 -# END ANSIBLE MANAGED BLOCK From 87591e1ea6e456ad6741134db42624b9775368f0 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 24 Aug 2021 10:19:50 -0500 Subject: [PATCH 341/885] adding CHANGELOG.md to repo --- CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..ea8cea0e --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,13 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +## [Unreleased] + +## [0.0.1] - 2019-02-15 + +### Added +- Added a changelog + +[unreleased]: https://github.com/ibm/repo-template/compare/v0.0.1...HEAD +[0.0.1]: https://github.com/ibm/repo-template/releases/tag/v0.0.1 From 9fe115fbe531922584263234dc90f6ff3aa8b2ca Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 24 Aug 2021 10:19:50 -0500 Subject: [PATCH 342/885] adding CHANGELOG.md to repo --- CHANGELOG.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..ea8cea0e --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,13 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +## [Unreleased] + +## [0.0.1] - 2019-02-15 + +### Added +- Added a changelog + +[unreleased]: https://github.com/ibm/repo-template/compare/v0.0.1...HEAD +[0.0.1]: https://github.com/ibm/repo-template/releases/tag/v0.0.1 From 53e6bfe3b11ed774ba4d3ead0324b40e6be87ea1 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 24 Aug 2021 10:20:42 -0500 Subject: [PATCH 343/885] Adding License to repo per template --- LICENSE | 202 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..8f71f43f --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + From 4d59af776a7086a9c378f342ef190ea62f9bc1f3 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 24 Aug 2021 10:20:42 -0500 Subject: [PATCH 344/885] Adding License to repo per template --- LICENSE | 202 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 202 insertions(+) create mode 100644 LICENSE diff --git a/LICENSE b/LICENSE new file mode 100644 index 00000000..8f71f43f --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + From 8f2932e10b21fcad261e634d770b7cf70a523143 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 24 Aug 2021 11:59:01 -0500 Subject: [PATCH 345/885] Github integration to enforce Developer Certificate of Origin in repo --- .github/dco.yml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .github/dco.yml diff --git a/.github/dco.yml b/.github/dco.yml new file mode 100644 index 00000000..de6cd3bf --- /dev/null +++ b/.github/dco.yml @@ -0,0 +1,4 @@ +# This enables DCO bot for you, please take a look https://github.com/probot/dco +# for more details. +require: + members: false From f51c938f18adaca9fd2ebd439d5b212b17136873 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 24 Aug 2021 11:59:01 -0500 Subject: [PATCH 346/885] Github integration to enforce Developer Certificate of Origin in repo --- .github/dco.yml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .github/dco.yml diff --git a/.github/dco.yml b/.github/dco.yml new file mode 100644 index 00000000..de6cd3bf --- /dev/null +++ b/.github/dco.yml @@ -0,0 +1,4 @@ +# This enables DCO bot for you, please take a look https://github.com/probot/dco +# for more details. +require: + members: false From 1451e74ad5e929c345fc5d423c1cf06d3f9e4b62 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 24 Aug 2021 13:43:29 -0500 Subject: [PATCH 347/885] Updated CHANGELOG to reflect our repo from template example --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ea8cea0e..112424b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,10 +4,10 @@ All notable changes to this project will be documented in this file. ## [Unreleased] -## [0.0.1] - 2019-02-15 +## [0.0.1] - 2021-08-24 ### Added - Added a changelog -[unreleased]: https://github.com/ibm/repo-template/compare/v0.0.1...HEAD -[0.0.1]: https://github.com/ibm/repo-template/releases/tag/v0.0.1 +[unreleased]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.0.1...HEAD +[0.0.1]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.0.1 From 9c6823cb27cb681fd077505c813e89aa73d7b26f Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 24 Aug 2021 13:43:29 -0500 Subject: [PATCH 348/885] Updated CHANGELOG to reflect our repo from template example --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ea8cea0e..112424b5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,10 +4,10 @@ All notable changes to this project will be documented in this file. ## [Unreleased] -## [0.0.1] - 2019-02-15 +## [0.0.1] - 2021-08-24 ### Added - Added a changelog -[unreleased]: https://github.com/ibm/repo-template/compare/v0.0.1...HEAD -[0.0.1]: https://github.com/ibm/repo-template/releases/tag/v0.0.1 +[unreleased]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.0.1...HEAD +[0.0.1]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.0.1 From 029c4abe36bb4dc8f72c7f107464f67ce669ba03 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 24 Aug 2021 13:45:56 -0500 Subject: [PATCH 349/885] Added formatting to Readme --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b4ecea30..813ff097 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,10 @@ # Ansible-OpenShift-Provisioning +## Scope + The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method. -Supported operating systems for the localhost (the starting workstation) are: + +## Supported operating systems for the localhost (the starting workstation) are: * Linux (RedHat and Debian families) * Unix and Unix-like (i.e. MacOS X) From 949fc69277f5af285a2b73583a933ec3fce34a4e Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 24 Aug 2021 13:45:56 -0500 Subject: [PATCH 350/885] Added formatting to Readme --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b4ecea30..813ff097 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,10 @@ # Ansible-OpenShift-Provisioning +## Scope + The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method. -Supported operating systems for the localhost (the starting workstation) are: + +## Supported operating systems for the localhost (the starting workstation) are: * Linux (RedHat and Debian families) * Unix and Unix-like (i.e. MacOS X) From f625dc34daff88a81f31673ec647584132f93c67 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 24 Aug 2021 14:06:59 -0500 Subject: [PATCH 351/885] Added formatting to Readme --- README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 813ff097..8d695eb0 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,8 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing * Linux (RedHat and Debian families) * Unix and Unix-like (i.e. MacOS X) -Pre-requisites: +## Pre-requisites: + * Python3 intalled on your local computer [how-to:] (https://realpython.com/installing-python/) * Ansible installed on your local computer [how-to:] (https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) * If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: @@ -22,7 +23,8 @@ Pre-requisites: * On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses * Fully Qualified Domain Names (FQDN) names for all IPv4 addresses -When you are ready: +## When you are ready: + * Step 1: Download this Git repository to a folder on your local computer * Step 2: Go to to download your local command line tools (oc and kubectl) and to copy the following OpenShift links to use in the next step: * pull secret @@ -47,7 +49,8 @@ When you are ready: "./openshift-install --dir=/ocpinst wait-for install-complete" * Step 9: ./openshift-install create cluster -Tags: +## Tags: + * setup = first-time setup of ansible * bastion = configuration of bastion for OCP * keymastr = ssh key configuration and testing From a057fdedc4421173dad9ae7891d7ae552fa3de41 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Tue, 24 Aug 2021 14:06:59 -0500 Subject: [PATCH 352/885] Added formatting to Readme --- README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 813ff097..8d695eb0 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,8 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing * Linux (RedHat and Debian families) * Unix and Unix-like (i.e. MacOS X) -Pre-requisites: +## Pre-requisites: + * Python3 intalled on your local computer [how-to:] (https://realpython.com/installing-python/) * Ansible installed on your local computer [how-to:] (https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) * If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: @@ -22,7 +23,8 @@ Pre-requisites: * On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses * Fully Qualified Domain Names (FQDN) names for all IPv4 addresses -When you are ready: +## When you are ready: + * Step 1: Download this Git repository to a folder on your local computer * Step 2: Go to to download your local command line tools (oc and kubectl) and to copy the following OpenShift links to use in the next step: * pull secret @@ -47,7 +49,8 @@ When you are ready: "./openshift-install --dir=/ocpinst wait-for install-complete" * Step 9: ./openshift-install create cluster -Tags: +## Tags: + * setup = first-time setup of ansible * bastion = configuration of bastion for OCP * keymastr = ssh key configuration and testing From 746b70d250c5826c7e164732f41a9e3dec758bf9 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 24 Aug 2021 14:54:30 -0500 Subject: [PATCH 353/885] Debugging variables run on MacOS --- ansible.cfg | 7 +++---- inventory | 4 ++++ main.yaml | 2 +- roles/ansible_setup/tasks/main.yaml | 21 ++++++++++++++------- roles/ssh-ocp-key-gen/tasks/main.yaml | 17 +++++++++++------ 5 files changed, 33 insertions(+), 18 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index e2b3c25f..bf674386 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,7 +1,6 @@ [defaults] -inventory = inventory -private_key_file = ~/.ssh/ansible -# BEGIN ANSIBLE MANAGED BLOCK +inventory=inventory +private_key_file=~/.ssh/ansible [inventory] cache=True -# END ANSIBLE MANAGED BLOCK +private_key_file = ~/.ssh/ansible diff --git a/inventory b/inventory index 0e672395..0b3481a3 100755 --- a/inventory +++ b/inventory @@ -2,6 +2,10 @@ [localhost] 127.0.0.1 + +[localhost:vars] +ansible_python_interpreter=/usr/bin/python3 + # BEGIN ANSIBLE MANAGED BLOCK [kvm_host] 9.60.87.132 diff --git a/main.yaml b/main.yaml index 627cc7a5..e219c222 100644 --- a/main.yaml +++ b/main.yaml @@ -50,7 +50,7 @@ - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - check_ssh - - install_packages + #- install_packages - set_firewall - set_selinux_permissive - dns diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 1c9c1ed3..93c534e5 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -80,13 +80,13 @@ netmask: "{{ netmask }}" cacheable: yes -- name: Add the contents of user-input variables from env.yaml to group_vars/all folder to persist across hosts - tags: setup - blockinfile: - path: group_vars/all/main.yaml - block: "{{ lookup('file', 'env.yaml') }}" - state: present - backup: yes +#- name: Add the contents of user-input variables from env.yaml to group_vars/all folder to persist across hosts +# tags: setup +# blockinfile: +# path: group_vars/all/main.yaml +# block: "{{ lookup('file', 'env.yaml') }}" +# state: present +# backup: yes - name: Populate inventory file with ip variables from env.yaml tags: setup @@ -119,6 +119,13 @@ register: inv_check failed_when: inv_check.rc != 0 +- name: fill ansible.cfg with given variable name for ansible passwordless ssh setup + tags: setup + ansible.builtin.lineinfile: + path: ansible.cfg + regexp: '^private_key_file=' + line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} + - name: Ansible generic setup to re-read inventory file after populated in previous tasks tags: setup ansible.builtin.gather_facts: \ No newline at end of file diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index 12cbe55e..9c118cc9 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -45,15 +45,20 @@ - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key tags: keymastr community.crypto.openssh_keypair: - path: ~/.ssh/{{ env_ssh_ocp_name }} + path: ~/.ssh/"{{ env_ssh_ocp_name }}" passphrase: "{{ env_ssh_ocp_pass }}" + backend: opensshbin register: ssh_ocp when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false +- name: Print results of ssh key generation + tags: keymastr + debug: + var: ssh_ocp + when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + - name: set public key fact set_fact: - env-ocp-pub: '{{ ssh_ocp.public_key }}' - delegate_to: '{{ item }}' - with_items: '{{ groups["bastion"] }}' - - \ No newline at end of file + env_ssh_key_ocp: "{{ ssh_ocp.public_key }}" + cacheable: yes + when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false \ No newline at end of file From 03174690a494d9366bfc95f006730fe58d89f023 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 24 Aug 2021 14:54:30 -0500 Subject: [PATCH 354/885] Debugging variables run on MacOS --- ansible.cfg | 7 +++---- inventory | 4 ++++ main.yaml | 2 +- roles/ansible_setup/tasks/main.yaml | 21 ++++++++++++++------- roles/ssh-ocp-key-gen/tasks/main.yaml | 17 +++++++++++------ 5 files changed, 33 insertions(+), 18 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index e2b3c25f..bf674386 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,7 +1,6 @@ [defaults] -inventory = inventory -private_key_file = ~/.ssh/ansible -# BEGIN ANSIBLE MANAGED BLOCK +inventory=inventory +private_key_file=~/.ssh/ansible [inventory] cache=True -# END ANSIBLE MANAGED BLOCK +private_key_file = ~/.ssh/ansible diff --git a/inventory b/inventory index 0e672395..0b3481a3 100755 --- a/inventory +++ b/inventory @@ -2,6 +2,10 @@ [localhost] 127.0.0.1 + +[localhost:vars] +ansible_python_interpreter=/usr/bin/python3 + # BEGIN ANSIBLE MANAGED BLOCK [kvm_host] 9.60.87.132 diff --git a/main.yaml b/main.yaml index 627cc7a5..e219c222 100644 --- a/main.yaml +++ b/main.yaml @@ -50,7 +50,7 @@ - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - check_ssh - - install_packages + #- install_packages - set_firewall - set_selinux_permissive - dns diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 1c9c1ed3..93c534e5 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -80,13 +80,13 @@ netmask: "{{ netmask }}" cacheable: yes -- name: Add the contents of user-input variables from env.yaml to group_vars/all folder to persist across hosts - tags: setup - blockinfile: - path: group_vars/all/main.yaml - block: "{{ lookup('file', 'env.yaml') }}" - state: present - backup: yes +#- name: Add the contents of user-input variables from env.yaml to group_vars/all folder to persist across hosts +# tags: setup +# blockinfile: +# path: group_vars/all/main.yaml +# block: "{{ lookup('file', 'env.yaml') }}" +# state: present +# backup: yes - name: Populate inventory file with ip variables from env.yaml tags: setup @@ -119,6 +119,13 @@ register: inv_check failed_when: inv_check.rc != 0 +- name: fill ansible.cfg with given variable name for ansible passwordless ssh setup + tags: setup + ansible.builtin.lineinfile: + path: ansible.cfg + regexp: '^private_key_file=' + line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} + - name: Ansible generic setup to re-read inventory file after populated in previous tasks tags: setup ansible.builtin.gather_facts: \ No newline at end of file diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index 12cbe55e..9c118cc9 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -45,15 +45,20 @@ - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key tags: keymastr community.crypto.openssh_keypair: - path: ~/.ssh/{{ env_ssh_ocp_name }} + path: ~/.ssh/"{{ env_ssh_ocp_name }}" passphrase: "{{ env_ssh_ocp_pass }}" + backend: opensshbin register: ssh_ocp when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false +- name: Print results of ssh key generation + tags: keymastr + debug: + var: ssh_ocp + when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + - name: set public key fact set_fact: - env-ocp-pub: '{{ ssh_ocp.public_key }}' - delegate_to: '{{ item }}' - with_items: '{{ groups["bastion"] }}' - - \ No newline at end of file + env_ssh_key_ocp: "{{ ssh_ocp.public_key }}" + cacheable: yes + when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false \ No newline at end of file From b6f3179a5ba2a9e0482963d1d198ecbe4c27246e Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 24 Aug 2021 16:55:41 -0500 Subject: [PATCH 355/885] Cleaned up main directory and debugged while implementing roles --- build_script.sh | 23 ------------ copy-image.yaml | 28 --------------- env.yaml | 6 ---- group_vars/bastion/main.yaml | 0 list_vms.yaml | 16 --------- roles/ansible_setup/tasks/main.yaml | 22 ++++-------- roles/get-ocp/tasks/main.yaml | 10 +++--- .../get-ocp/templates/install-config.yaml.j2 | 2 +- roles/httpd/tasks/main.yaml | 16 --------- roles/set_firewall/tasks/main.yaml | 23 ++++++++++++ scripts-naranja/2-generate-bin-tree.sh | 23 ------------ scripts-naranja/3-generate-ignitions.sh | 20 ----------- scripts-naranja/4-make-bootstrap-vm.sh | 12 ------- scripts-naranja/4-make-master-vms.sh | 26 -------------- scripts-naranja/4-make-worker-vms.sh | 26 -------------- scripts-naranja/env | 35 ------------------- scripts-naranja/install-config.yaml | 26 -------------- setup-mgmt-user.yaml | 28 --------------- 18 files changed, 36 insertions(+), 306 deletions(-) delete mode 100644 build_script.sh delete mode 100644 copy-image.yaml create mode 100644 group_vars/bastion/main.yaml delete mode 100644 list_vms.yaml delete mode 100644 scripts-naranja/2-generate-bin-tree.sh delete mode 100644 scripts-naranja/3-generate-ignitions.sh delete mode 100644 scripts-naranja/4-make-bootstrap-vm.sh delete mode 100644 scripts-naranja/4-make-master-vms.sh delete mode 100644 scripts-naranja/4-make-worker-vms.sh delete mode 100644 scripts-naranja/env delete mode 100644 scripts-naranja/install-config.yaml delete mode 100644 setup-mgmt-user.yaml diff --git a/build_script.sh b/build_script.sh deleted file mode 100644 index 52a90971..00000000 --- a/build_script.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# Created by Phillip - -##for when we start templating: https://searchservervirtualization.techtarget.com/tip/Expedite-Ansible-KVM-provisioning-with-automation - -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/copy-image.yaml b/copy-image.yaml deleted file mode 100644 index 13be2c2c..00000000 --- a/copy-image.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# tasks to check if kvm image file is present and copy if it is not - -- hosts: all - become: true - tasks: - - - name: check to see if kvm image file is present - stat: - path: /tmp/rhel-guest-image-8.3-400.s390x.qcow2 - get_checksum: no - get_mime: no - get_attributes: no - register: os_disk_file - - - name: fail if image file exists - fail: - msg: "Image file exists" - when: os_disk_file is true - - - name: copy kvm image to kvm host(s) - copy: - src: rhel-guest-image-8.3-400.s390x.qcow2 - dest: /tmp/rhel-guest-image-8.3-400.s390x.qcow2 - owner: root - owner: root - mode: 0644 - diff --git a/env.yaml b/env.yaml index fdd57705..4acb8125 100644 --- a/env.yaml +++ b/env.yaml @@ -13,12 +13,6 @@ env_fips: false #true or false, set default false env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -# Install links from: https://console.redhat.com/openshift/install/ibmz/user-provisioned to be used in getting OpenShift files -env_rhcos_initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img -env_rhcos_kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img -env_rhcos_rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img -env_qcow2_img: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - # to fill inventory env_ip_kvm_host: 9.60.87.132 env_ip_bastion: 9.60.87.139 diff --git a/group_vars/bastion/main.yaml b/group_vars/bastion/main.yaml new file mode 100644 index 00000000..e69de29b diff --git a/list_vms.yaml b/list_vms.yaml deleted file mode 100644 index 3c82bab5..00000000 --- a/list_vms.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - -- hosts: kvm_host -- tasks: - - - name: list all VMs - community.libvirt.virt: - command: list_vms - register: running_vms - - - name: Print running vms - ansible.builtin.debug: - var: running_vms - verbosity: 0 - - diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 93c534e5..c2a1700e 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -21,10 +21,6 @@ - env_service_network - env_fips - env_pullSecret - - env_rhcos_initramfs - - env_rhcos_kernel - - env_rhcos_rootfs - - env_qcow2_img - env_ip_kvm_host - env_ip_bastion - env_ip_bootstrap @@ -57,10 +53,6 @@ env_service_network: "{{ env_service_network }}" env_fips: "{{ env_fips }}" env_pullSecret: "{{ env_pullSecret }}" - env_rhcos_initramfs: "{{ env_rhcos_initramfs }}" - env_rhcos_kernel: "{{ env_rhcos_kernel }}" - env_rhcos_rootfs: "{{ env_rhcos_rootfs }}" - env_qcow2_img: "{{ env_qcow2_img }}" env_ip_kvm_host: "{{ env_ip_kvm_host }}" env_ip_bastion: "{{ env_ip_bastion }}" env_ip_bootstrap: "{{ env_ip_bootstrap }}" @@ -80,13 +72,13 @@ netmask: "{{ netmask }}" cacheable: yes -#- name: Add the contents of user-input variables from env.yaml to group_vars/all folder to persist across hosts -# tags: setup -# blockinfile: -# path: group_vars/all/main.yaml -# block: "{{ lookup('file', 'env.yaml') }}" -# state: present -# backup: yes +- name: Add the contents of user-input variables from env.yaml to group_vars/all folder to persist across hosts + tags: setup + blockinfile: + path: group_vars/all/main.yaml + block: "{{ lookup('file', 'env.yaml') }}" + state: present + backup: yes - name: Populate inventory file with ip variables from env.yaml tags: setup diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 8b6bf52b..d19d900d 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -14,21 +14,21 @@ - name: get ocp kernel tags: getocp,bastion get_url: - url: "{{ env_rhcos_kernel }}" + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin mode: '0755' - name: get ocp initramfs tags: getocp,bastion get_url: - url: "{{ env_rhcos_initramfs }}" + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin mode: '0755' - name: get ocp rootfs tags: getocp,bastion get_url: - url: "{{ env_rhcos_rootfs }}" + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin mode: '0755' @@ -41,14 +41,14 @@ - name: Unzip OCP Client tags: getocp,bastion ansible.builtin.unarchive: - src: "{{ env_ocp_client | default(https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz) }}" + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz dest: /ocpinst/ remote_src: yes - name: Unzip OCP Installer tags: getocp,bastion ansible.builtin.unarchive: - src: "{{ env_ocp_installer | default(https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz) }}" + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz dest: /ocpinst/ remote_src: yes diff --git a/roles/get-ocp/templates/install-config.yaml.j2 b/roles/get-ocp/templates/install-config.yaml.j2 index 2c9f386b..1e933d6d 100644 --- a/roles/get-ocp/templates/install-config.yaml.j2 +++ b/roles/get-ocp/templates/install-config.yaml.j2 @@ -23,4 +23,4 @@ baseDomain: "{{ env_baseDomain }}" "none\: {}" fips: "{{ env_fips | default(false) }}" pullSecret: "{{ env_pullSecret }}" -sshKey: "{{ env_ocp_pub }}" \ No newline at end of file +sshKey: "{{ env_ssh_key_ocp }}" \ No newline at end of file diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 9f3ae813..f74a8762 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -12,22 +12,6 @@ name: httpd enabled: yes -- name: Ensure the default Apache port is 8080 - tags: httpconf,bastion - replace: - path: /etc/httpd/conf/httpd.conf - regexp: '^Listen 80' - replace: 'Listen 8080' - backup: yes - -- name: Ensure the SSL default port is 4443 - tags: httpconf,bastion - replace: - path: /etc/httpd/conf.d/ssl.conf - regexp: '^Listen 443 https' - replace: 'Listen 4443 https' - backup: yes - - name: restart httpd service: name: httpd diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 4b66144a..08c648b6 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -28,6 +28,13 @@ permanent: yes state: enabled +- name: Allow all access to tcp port 6443 + tags: firewall,bastion + ansible.posix.firewalld: + port: 6443/tcp + permanent: yes + state: enabled + - name: Permit traffic in default zone for http tags: firewall,bastion ansible.posix.firewalld: @@ -42,6 +49,22 @@ permanent: yes state: enabled +- name: Ensure the default Apache port is 8080 + tags: httpconf,bastion + lineinfile: + path: /etc/httpd/conf/httpd.conf + search_string: 'Listen 80' + line: 'Listen 8080' + backup: yes + +- name: Ensure the SSL default port is 4443 + tags: httpconf,bastion + replace: + path: /etc/httpd/conf.d/ssl.conf + regexp: '^Listen 443 https' + replace: 'Listen 4443 https' + backup: yes + - name: restart httpd service: name: httpd diff --git a/scripts-naranja/2-generate-bin-tree.sh b/scripts-naranja/2-generate-bin-tree.sh deleted file mode 100644 index 5fa1edac..00000000 --- a/scripts-naranja/2-generate-bin-tree.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -. ./env - -# create ocp and ignitions directories in http server -mkdir -p /var/www/html/ocp/ignitions - -# copy binaries to http server directory -cp /root/ocpbin/rhcos-live-initramfs.s390x.img /var/www/html/${CLUSTER_NAME}/${INITRAMFS} -cp /root/ocpbin/rhcos-live-kernel-s390x /var/www/html/${CLUSTER_NAME}/${KERNEL} -cp /root/ocpbin/rhcos-live-rootfs.s390x.img /var/www/html/${CLUSTER_NAME}/${ROOTFS} - -#generating .treeinfo file to be read by --location parameter -cat << EOF >> /var/www/html/${CLUSTER_NAME}/.treeinfo -[general] -arch = ${ARCHITECTURE} -family = Red Hat CoreOS -platforms = ${ARCHITECTURE} -version = ${OCP_RELEASE} -[images-${ARCHITECTURE}] -initrd = ${INITRAMFS} -kernel = ${KERNEL} -EOF diff --git a/scripts-naranja/3-generate-ignitions.sh b/scripts-naranja/3-generate-ignitions.sh deleted file mode 100644 index 3319a9ec..00000000 --- a/scripts-naranja/3-generate-ignitions.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -rm -rf ignitions -mkdir -p ignitions -cp install-config.yaml ignitions - -# create kubernetes manifests -openshift-install create manifests --dir=./ignitions - -# ensure masters are not schedulable -sed -i 's/mastersSchedulable: true/mastersSchedulable: false/g' ignitions/manifests/cluster-scheduler-02-config.yml - -# create ignition config files -openshift-install create ignition-configs --dir=./ignitions - -# copy ign files to http server directory -cp ./ignitions/*.ign /var/www/html/ocp/ignitions - -# setting permissions in http server directory for binaries and ignitions -chmod -R 777 /var/www/html/ocp diff --git a/scripts-naranja/4-make-bootstrap-vm.sh b/scripts-naranja/4-make-bootstrap-vm.sh deleted file mode 100644 index ce239e82..00000000 --- a/scripts-naranja/4-make-bootstrap-vm.sh +++ /dev/null @@ -1,12 +0,0 @@ -#/bin/bash - -. ./env - -echo "using LOCATION: ${LOCATION}" - - virt-install --name bootstrap \ - --disk ${VIRT_IMAGE_DIR}/bootstrap.qcow2 --ram 16000 --cpu host --vcpus 4 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${BOOTSTRAP_IP}::${DEFAULT_GW}:${SUBNET_MASK}:bootstrap::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/bootstrap.ign" diff --git a/scripts-naranja/4-make-master-vms.sh b/scripts-naranja/4-make-master-vms.sh deleted file mode 100644 index 72dedc27..00000000 --- a/scripts-naranja/4-make-master-vms.sh +++ /dev/null @@ -1,26 +0,0 @@ -#/bin/bash - -. ./env - -echo "LOCATION: ${LOCATION}" - - virt-install --name master1 \ - --disk ${VIRT_IMAGE_DIR}/master1.qcow2 --ram 16000 --cpu host --vcpus 4 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${MASTER1_IP}::${DEFAULT_GW}:${SUBNET_MASK}:master1::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/master.ign" - - virt-install --name master2 \ - --disk ${VIRT_IMAGE_DIR}/master2.qcow2 --ram 16000 --cpu host --vcpus 4 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${MASTER2_IP}::${DEFAULT_GW}:${SUBNET_MASK}:master2::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/master.ign" - - virt-install --name master3 \ - --disk ${VIRT_IMAGE_DIR}/master3.qcow2 --ram 16000 --cpu host --vcpus 4 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${MASTER3_IP}::${DEFAULT_GW}:${SUBNET_MASK}:master3::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/master.ign" diff --git a/scripts-naranja/4-make-worker-vms.sh b/scripts-naranja/4-make-worker-vms.sh deleted file mode 100644 index 2556b4d5..00000000 --- a/scripts-naranja/4-make-worker-vms.sh +++ /dev/null @@ -1,26 +0,0 @@ -#/bin/bash - -. ./env - -echo "LOCATION: ${LOCATION}" - - virt-install --name worker1 \ - --disk ${VIRT_IMAGE_DIR}/worker1.qcow2 --ram 32000 --cpu host --vcpus 8 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${WORKER1_IP}::${DEFAULT_GW}:${SUBNET_MASK}:worker1::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/worker.ign" - - virt-install --name worker2 \ - --disk ${VIRT_IMAGE_DIR}/worker2.qcow2 --ram 32000 --cpu host --vcpus 8 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${WORKER2_IP}::${DEFAULT_GW}:${SUBNET_MASK}:worker2::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/worker.ign" - - virt-install --name worker3 \ - --disk ${VIRT_IMAGE_DIR}/worker3.qcow2 --ram 32000 --cpu host --vcpus 8 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${WORKER3_IP}::${DEFAULT_GW}:${SUBNET_MASK}:worker3::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/worker.ign" diff --git a/scripts-naranja/env b/scripts-naranja/env deleted file mode 100644 index 2ffafcf9..00000000 --- a/scripts-naranja/env +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -OCP_RELEASE="4.7.13" -ARCHITECTURE="s390x" - -# VIRT_IMAGE_DIR is the diretory where the KVM guest virtual disks will be stored -VIRT_IMAGE_DIR=/var/lib/libvirt/images - -# CLUSTER_NAME will be used as a prefix to name the KVM guests, amoung oter things -CLUSTER_NAME="ocp" - -# HOST_IP and WEB_PORT together will be the HTTP server where the images can be downloaded from -HOST_IP="10.10.195.89" -WEB_PORT="8080" -LOCATION="http://${HOST_IP}:${WEB_PORT}/${CLUSTER_NAME}" - -# VIR_NET defines the KVM network to which the KVM guests will be configured to connect -VIR_NET="macvtap" - -# The names of the files to be retrieved from LOCATION -KERNEL=vmlinuz -INITRAMFS=initramfs.img -ROOTFS=rootfs - -# Static IP addresses and other network configuration details for the KVM guests -BOOTSTRAP_IP="10.10.195.88" -MASTER1_IP="10.10.195.80" -MASTER2_IP="10.10.195.81" -MASTER3_IP="10.10.195.82" -WORKER1_IP="10.10.195.83" -WORKER2_IP="10.10.195.84" -WORKER3_IP="10.10.195.85" -DEFAULT_GW="10.10.195.1" -SUBNET_MASK="24" -NAMESERVER="10.10.195.89" diff --git a/scripts-naranja/install-config.yaml b/scripts-naranja/install-config.yaml deleted file mode 100644 index 021e7719..00000000 --- a/scripts-naranja/install-config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -baseDomain: naranja.local -compute: -- hyperthreading: Enabled - name: worker - replicas: 0 - architecture: s390x -controlPlane: - hyperthreading: Enabled - name: master - replicas: 3 - architecture: s390x -metadata: - name: ocp -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN - serviceNetwork: - - 172.30.0.0/16 -platform: - none: {} -fips: false -pullSecret: '' -sshKey: '' diff --git a/setup-mgmt-user.yaml b/setup-mgmt-user.yaml deleted file mode 100644 index 54dcba13..00000000 --- a/setup-mgmt-user.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- - - -- hosts: all - become: true - tasks: - - - name: create zcts user - tags: always - user: - name: zcts - groups: root - - - name: add ssh key for zcts user - tags: always - authorized_key: - user: zcts - key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKTf6OEBNCzusceF3/dTWK9rIACxOw009HMkH//AuE8h zcts default" - - - name: add sudoers file for zcts user - tags: always - copy: - src: sudoers_zcts - dest: /etc/sudoers.d/zcts - owner: root - group: root - mode: 0440 - From 5aa759b1c48d977946b930c0c8b28c9490684e5a Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 24 Aug 2021 16:55:41 -0500 Subject: [PATCH 356/885] Cleaned up main directory and debugged while implementing roles --- build_script.sh | 23 ------------ copy-image.yaml | 28 --------------- group_vars/bastion/main.yaml | 0 list_vms.yaml | 16 --------- roles/ansible_setup/tasks/main.yaml | 22 ++++-------- roles/get-ocp/tasks/main.yaml | 10 +++--- .../get-ocp/templates/install-config.yaml.j2 | 2 +- roles/httpd/tasks/main.yaml | 16 --------- roles/set_firewall/tasks/main.yaml | 23 ++++++++++++ scripts-naranja/2-generate-bin-tree.sh | 23 ------------ scripts-naranja/3-generate-ignitions.sh | 20 ----------- scripts-naranja/4-make-bootstrap-vm.sh | 12 ------- scripts-naranja/4-make-master-vms.sh | 26 -------------- scripts-naranja/4-make-worker-vms.sh | 26 -------------- scripts-naranja/env | 35 ------------------- scripts-naranja/install-config.yaml | 26 -------------- setup-mgmt-user.yaml | 28 --------------- 17 files changed, 36 insertions(+), 300 deletions(-) delete mode 100644 build_script.sh delete mode 100644 copy-image.yaml create mode 100644 group_vars/bastion/main.yaml delete mode 100644 list_vms.yaml delete mode 100644 scripts-naranja/2-generate-bin-tree.sh delete mode 100644 scripts-naranja/3-generate-ignitions.sh delete mode 100644 scripts-naranja/4-make-bootstrap-vm.sh delete mode 100644 scripts-naranja/4-make-master-vms.sh delete mode 100644 scripts-naranja/4-make-worker-vms.sh delete mode 100644 scripts-naranja/env delete mode 100644 scripts-naranja/install-config.yaml delete mode 100644 setup-mgmt-user.yaml diff --git a/build_script.sh b/build_script.sh deleted file mode 100644 index 52a90971..00000000 --- a/build_script.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -# Created by Phillip - -##for when we start templating: https://searchservervirtualization.techtarget.com/tip/Expedite-Ansible-KVM-provisioning-with-automation - -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G -#qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - -#virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-rootfs.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/copy-image.yaml b/copy-image.yaml deleted file mode 100644 index 13be2c2c..00000000 --- a/copy-image.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -# tasks to check if kvm image file is present and copy if it is not - -- hosts: all - become: true - tasks: - - - name: check to see if kvm image file is present - stat: - path: /tmp/rhel-guest-image-8.3-400.s390x.qcow2 - get_checksum: no - get_mime: no - get_attributes: no - register: os_disk_file - - - name: fail if image file exists - fail: - msg: "Image file exists" - when: os_disk_file is true - - - name: copy kvm image to kvm host(s) - copy: - src: rhel-guest-image-8.3-400.s390x.qcow2 - dest: /tmp/rhel-guest-image-8.3-400.s390x.qcow2 - owner: root - owner: root - mode: 0644 - diff --git a/group_vars/bastion/main.yaml b/group_vars/bastion/main.yaml new file mode 100644 index 00000000..e69de29b diff --git a/list_vms.yaml b/list_vms.yaml deleted file mode 100644 index 3c82bab5..00000000 --- a/list_vms.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- - -- hosts: kvm_host -- tasks: - - - name: list all VMs - community.libvirt.virt: - command: list_vms - register: running_vms - - - name: Print running vms - ansible.builtin.debug: - var: running_vms - verbosity: 0 - - diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 93c534e5..c2a1700e 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -21,10 +21,6 @@ - env_service_network - env_fips - env_pullSecret - - env_rhcos_initramfs - - env_rhcos_kernel - - env_rhcos_rootfs - - env_qcow2_img - env_ip_kvm_host - env_ip_bastion - env_ip_bootstrap @@ -57,10 +53,6 @@ env_service_network: "{{ env_service_network }}" env_fips: "{{ env_fips }}" env_pullSecret: "{{ env_pullSecret }}" - env_rhcos_initramfs: "{{ env_rhcos_initramfs }}" - env_rhcos_kernel: "{{ env_rhcos_kernel }}" - env_rhcos_rootfs: "{{ env_rhcos_rootfs }}" - env_qcow2_img: "{{ env_qcow2_img }}" env_ip_kvm_host: "{{ env_ip_kvm_host }}" env_ip_bastion: "{{ env_ip_bastion }}" env_ip_bootstrap: "{{ env_ip_bootstrap }}" @@ -80,13 +72,13 @@ netmask: "{{ netmask }}" cacheable: yes -#- name: Add the contents of user-input variables from env.yaml to group_vars/all folder to persist across hosts -# tags: setup -# blockinfile: -# path: group_vars/all/main.yaml -# block: "{{ lookup('file', 'env.yaml') }}" -# state: present -# backup: yes +- name: Add the contents of user-input variables from env.yaml to group_vars/all folder to persist across hosts + tags: setup + blockinfile: + path: group_vars/all/main.yaml + block: "{{ lookup('file', 'env.yaml') }}" + state: present + backup: yes - name: Populate inventory file with ip variables from env.yaml tags: setup diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 8b6bf52b..d19d900d 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -14,21 +14,21 @@ - name: get ocp kernel tags: getocp,bastion get_url: - url: "{{ env_rhcos_kernel }}" + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin mode: '0755' - name: get ocp initramfs tags: getocp,bastion get_url: - url: "{{ env_rhcos_initramfs }}" + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin mode: '0755' - name: get ocp rootfs tags: getocp,bastion get_url: - url: "{{ env_rhcos_rootfs }}" + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin mode: '0755' @@ -41,14 +41,14 @@ - name: Unzip OCP Client tags: getocp,bastion ansible.builtin.unarchive: - src: "{{ env_ocp_client | default(https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz) }}" + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz dest: /ocpinst/ remote_src: yes - name: Unzip OCP Installer tags: getocp,bastion ansible.builtin.unarchive: - src: "{{ env_ocp_installer | default(https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz) }}" + src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz dest: /ocpinst/ remote_src: yes diff --git a/roles/get-ocp/templates/install-config.yaml.j2 b/roles/get-ocp/templates/install-config.yaml.j2 index 2c9f386b..1e933d6d 100644 --- a/roles/get-ocp/templates/install-config.yaml.j2 +++ b/roles/get-ocp/templates/install-config.yaml.j2 @@ -23,4 +23,4 @@ baseDomain: "{{ env_baseDomain }}" "none\: {}" fips: "{{ env_fips | default(false) }}" pullSecret: "{{ env_pullSecret }}" -sshKey: "{{ env_ocp_pub }}" \ No newline at end of file +sshKey: "{{ env_ssh_key_ocp }}" \ No newline at end of file diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 9f3ae813..f74a8762 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -12,22 +12,6 @@ name: httpd enabled: yes -- name: Ensure the default Apache port is 8080 - tags: httpconf,bastion - replace: - path: /etc/httpd/conf/httpd.conf - regexp: '^Listen 80' - replace: 'Listen 8080' - backup: yes - -- name: Ensure the SSL default port is 4443 - tags: httpconf,bastion - replace: - path: /etc/httpd/conf.d/ssl.conf - regexp: '^Listen 443 https' - replace: 'Listen 4443 https' - backup: yes - - name: restart httpd service: name: httpd diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 4b66144a..08c648b6 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -28,6 +28,13 @@ permanent: yes state: enabled +- name: Allow all access to tcp port 6443 + tags: firewall,bastion + ansible.posix.firewalld: + port: 6443/tcp + permanent: yes + state: enabled + - name: Permit traffic in default zone for http tags: firewall,bastion ansible.posix.firewalld: @@ -42,6 +49,22 @@ permanent: yes state: enabled +- name: Ensure the default Apache port is 8080 + tags: httpconf,bastion + lineinfile: + path: /etc/httpd/conf/httpd.conf + search_string: 'Listen 80' + line: 'Listen 8080' + backup: yes + +- name: Ensure the SSL default port is 4443 + tags: httpconf,bastion + replace: + path: /etc/httpd/conf.d/ssl.conf + regexp: '^Listen 443 https' + replace: 'Listen 4443 https' + backup: yes + - name: restart httpd service: name: httpd diff --git a/scripts-naranja/2-generate-bin-tree.sh b/scripts-naranja/2-generate-bin-tree.sh deleted file mode 100644 index 5fa1edac..00000000 --- a/scripts-naranja/2-generate-bin-tree.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -. ./env - -# create ocp and ignitions directories in http server -mkdir -p /var/www/html/ocp/ignitions - -# copy binaries to http server directory -cp /root/ocpbin/rhcos-live-initramfs.s390x.img /var/www/html/${CLUSTER_NAME}/${INITRAMFS} -cp /root/ocpbin/rhcos-live-kernel-s390x /var/www/html/${CLUSTER_NAME}/${KERNEL} -cp /root/ocpbin/rhcos-live-rootfs.s390x.img /var/www/html/${CLUSTER_NAME}/${ROOTFS} - -#generating .treeinfo file to be read by --location parameter -cat << EOF >> /var/www/html/${CLUSTER_NAME}/.treeinfo -[general] -arch = ${ARCHITECTURE} -family = Red Hat CoreOS -platforms = ${ARCHITECTURE} -version = ${OCP_RELEASE} -[images-${ARCHITECTURE}] -initrd = ${INITRAMFS} -kernel = ${KERNEL} -EOF diff --git a/scripts-naranja/3-generate-ignitions.sh b/scripts-naranja/3-generate-ignitions.sh deleted file mode 100644 index 3319a9ec..00000000 --- a/scripts-naranja/3-generate-ignitions.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -rm -rf ignitions -mkdir -p ignitions -cp install-config.yaml ignitions - -# create kubernetes manifests -openshift-install create manifests --dir=./ignitions - -# ensure masters are not schedulable -sed -i 's/mastersSchedulable: true/mastersSchedulable: false/g' ignitions/manifests/cluster-scheduler-02-config.yml - -# create ignition config files -openshift-install create ignition-configs --dir=./ignitions - -# copy ign files to http server directory -cp ./ignitions/*.ign /var/www/html/ocp/ignitions - -# setting permissions in http server directory for binaries and ignitions -chmod -R 777 /var/www/html/ocp diff --git a/scripts-naranja/4-make-bootstrap-vm.sh b/scripts-naranja/4-make-bootstrap-vm.sh deleted file mode 100644 index ce239e82..00000000 --- a/scripts-naranja/4-make-bootstrap-vm.sh +++ /dev/null @@ -1,12 +0,0 @@ -#/bin/bash - -. ./env - -echo "using LOCATION: ${LOCATION}" - - virt-install --name bootstrap \ - --disk ${VIRT_IMAGE_DIR}/bootstrap.qcow2 --ram 16000 --cpu host --vcpus 4 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${BOOTSTRAP_IP}::${DEFAULT_GW}:${SUBNET_MASK}:bootstrap::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/bootstrap.ign" diff --git a/scripts-naranja/4-make-master-vms.sh b/scripts-naranja/4-make-master-vms.sh deleted file mode 100644 index 72dedc27..00000000 --- a/scripts-naranja/4-make-master-vms.sh +++ /dev/null @@ -1,26 +0,0 @@ -#/bin/bash - -. ./env - -echo "LOCATION: ${LOCATION}" - - virt-install --name master1 \ - --disk ${VIRT_IMAGE_DIR}/master1.qcow2 --ram 16000 --cpu host --vcpus 4 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${MASTER1_IP}::${DEFAULT_GW}:${SUBNET_MASK}:master1::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/master.ign" - - virt-install --name master2 \ - --disk ${VIRT_IMAGE_DIR}/master2.qcow2 --ram 16000 --cpu host --vcpus 4 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${MASTER2_IP}::${DEFAULT_GW}:${SUBNET_MASK}:master2::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/master.ign" - - virt-install --name master3 \ - --disk ${VIRT_IMAGE_DIR}/master3.qcow2 --ram 16000 --cpu host --vcpus 4 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${MASTER3_IP}::${DEFAULT_GW}:${SUBNET_MASK}:master3::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/master.ign" diff --git a/scripts-naranja/4-make-worker-vms.sh b/scripts-naranja/4-make-worker-vms.sh deleted file mode 100644 index 2556b4d5..00000000 --- a/scripts-naranja/4-make-worker-vms.sh +++ /dev/null @@ -1,26 +0,0 @@ -#/bin/bash - -. ./env - -echo "LOCATION: ${LOCATION}" - - virt-install --name worker1 \ - --disk ${VIRT_IMAGE_DIR}/worker1.qcow2 --ram 32000 --cpu host --vcpus 8 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${WORKER1_IP}::${DEFAULT_GW}:${SUBNET_MASK}:worker1::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/worker.ign" - - virt-install --name worker2 \ - --disk ${VIRT_IMAGE_DIR}/worker2.qcow2 --ram 32000 --cpu host --vcpus 8 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${WORKER2_IP}::${DEFAULT_GW}:${SUBNET_MASK}:worker2::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/worker.ign" - - virt-install --name worker3 \ - --disk ${VIRT_IMAGE_DIR}/worker3.qcow2 --ram 32000 --cpu host --vcpus 8 \ - --os-type linux --os-variant rhel8.0 \ - --network network=${VIR_NET} --noreboot --wait -1 --graphics none --console pty,target_type=serial \ - --location ${LOCATION} \ - --extra-args "nomodeset console=ttyS0,115200n8 rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=${LOCATION}/${ROOTFS} ip=${WORKER3_IP}::${DEFAULT_GW}:${SUBNET_MASK}:worker3::none:1500 nameserver=${NAMESERVER} coreos.inst.ignition_url=${LOCATION}/ignitions/worker.ign" diff --git a/scripts-naranja/env b/scripts-naranja/env deleted file mode 100644 index 2ffafcf9..00000000 --- a/scripts-naranja/env +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -OCP_RELEASE="4.7.13" -ARCHITECTURE="s390x" - -# VIRT_IMAGE_DIR is the diretory where the KVM guest virtual disks will be stored -VIRT_IMAGE_DIR=/var/lib/libvirt/images - -# CLUSTER_NAME will be used as a prefix to name the KVM guests, amoung oter things -CLUSTER_NAME="ocp" - -# HOST_IP and WEB_PORT together will be the HTTP server where the images can be downloaded from -HOST_IP="10.10.195.89" -WEB_PORT="8080" -LOCATION="http://${HOST_IP}:${WEB_PORT}/${CLUSTER_NAME}" - -# VIR_NET defines the KVM network to which the KVM guests will be configured to connect -VIR_NET="macvtap" - -# The names of the files to be retrieved from LOCATION -KERNEL=vmlinuz -INITRAMFS=initramfs.img -ROOTFS=rootfs - -# Static IP addresses and other network configuration details for the KVM guests -BOOTSTRAP_IP="10.10.195.88" -MASTER1_IP="10.10.195.80" -MASTER2_IP="10.10.195.81" -MASTER3_IP="10.10.195.82" -WORKER1_IP="10.10.195.83" -WORKER2_IP="10.10.195.84" -WORKER3_IP="10.10.195.85" -DEFAULT_GW="10.10.195.1" -SUBNET_MASK="24" -NAMESERVER="10.10.195.89" diff --git a/scripts-naranja/install-config.yaml b/scripts-naranja/install-config.yaml deleted file mode 100644 index 021e7719..00000000 --- a/scripts-naranja/install-config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -baseDomain: naranja.local -compute: -- hyperthreading: Enabled - name: worker - replicas: 0 - architecture: s390x -controlPlane: - hyperthreading: Enabled - name: master - replicas: 3 - architecture: s390x -metadata: - name: ocp -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN - serviceNetwork: - - 172.30.0.0/16 -platform: - none: {} -fips: false -pullSecret: '' -sshKey: '' diff --git a/setup-mgmt-user.yaml b/setup-mgmt-user.yaml deleted file mode 100644 index 54dcba13..00000000 --- a/setup-mgmt-user.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- - - -- hosts: all - become: true - tasks: - - - name: create zcts user - tags: always - user: - name: zcts - groups: root - - - name: add ssh key for zcts user - tags: always - authorized_key: - user: zcts - key: "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKTf6OEBNCzusceF3/dTWK9rIACxOw009HMkH//AuE8h zcts default" - - - name: add sudoers file for zcts user - tags: always - copy: - src: sudoers_zcts - dest: /etc/sudoers.d/zcts - owner: root - group: root - mode: 0440 - From aa911c567d0053ae1c6c136bfbd50281e023bb93 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 24 Aug 2021 17:40:26 -0500 Subject: [PATCH 357/885] Fixed error in set_firewall and changed permissions for ansible-setup-linux.sh --- ansible-setup-linux.sh | 0 roles/set_firewall/tasks/main.yaml | 9 ++++++++- 2 files changed, 8 insertions(+), 1 deletion(-) mode change 100644 => 100755 ansible-setup-linux.sh diff --git a/ansible-setup-linux.sh b/ansible-setup-linux.sh old mode 100644 new mode 100755 diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 08c648b6..17a0c27d 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -3,7 +3,7 @@ - name: Allow all access to tcp port 8080 tags: firewall,bastion ansible.posix.firewalld: - port: 8081/tcp + port: 8080/tcp permanent: yes state: enabled @@ -35,6 +35,13 @@ permanent: yes state: enabled +- name: Allow all access to tcp port 22623 + tags: firewall,bastion + ansible.posix.firewalld: + port: 22623/tcp + permanent: yes + state: enabled + - name: Permit traffic in default zone for http tags: firewall,bastion ansible.posix.firewalld: From 8c1aad24d4902a4adc7f67026cd3a363100961a9 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 24 Aug 2021 17:40:26 -0500 Subject: [PATCH 358/885] Fixed error in set_firewall and changed permissions for ansible-setup-linux.sh --- ansible-setup-linux.sh | 0 roles/set_firewall/tasks/main.yaml | 9 ++++++++- 2 files changed, 8 insertions(+), 1 deletion(-) mode change 100644 => 100755 ansible-setup-linux.sh diff --git a/ansible-setup-linux.sh b/ansible-setup-linux.sh old mode 100644 new mode 100755 diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 08c648b6..17a0c27d 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -3,7 +3,7 @@ - name: Allow all access to tcp port 8080 tags: firewall,bastion ansible.posix.firewalld: - port: 8081/tcp + port: 8080/tcp permanent: yes state: enabled @@ -35,6 +35,13 @@ permanent: yes state: enabled +- name: Allow all access to tcp port 22623 + tags: firewall,bastion + ansible.posix.firewalld: + port: 22623/tcp + permanent: yes + state: enabled + - name: Permit traffic in default zone for http tags: firewall,bastion ansible.posix.firewalld: From 96c5e26163610afd45f6e56529b4f8006b5c287d Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 25 Aug 2021 11:02:28 -0500 Subject: [PATCH 359/885] Bug fixing for the implementation of variables --- .vscode/settings.json | 3 ++ group_vars/all/main.yaml | 43 +++++++++++++++++++++++++++ main.yaml | 2 +- roles/get-ocp/tasks/main.yaml | 2 +- roles/get-ocp/vars/main.yaml | 0 roles/ssh-ocp-key-gen/tasks/main.yaml | 38 ++++++++++++++--------- 6 files changed, 72 insertions(+), 16 deletions(-) create mode 100644 .vscode/settings.json create mode 100644 group_vars/all/main.yaml create mode 100644 roles/get-ocp/vars/main.yaml diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..106f856c --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "files.autoSave": "onFocusChange" +} \ No newline at end of file diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml new file mode 100644 index 00000000..2e750815 --- /dev/null +++ b/group_vars/all/main.yaml @@ -0,0 +1,43 @@ +# BEGIN ANSIBLE MANAGED BLOCK + +# to populate install_config +env_baseDomain: ocpz.wsclab.endicott.ibm.com +env_compute_arch: s390x #default to s390x +env_control_count: 3 #default 3 +env_control_arch: s390x #default s390x +env_metadata_name: distribution +env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now +env_host_prefix: 23 #default 23 for now +env_network_type: OpenShiftSDN #set default OpenShiftSDN +env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 +env_fips: false #true or false, set default false + +env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' + +# to fill inventory +env_ip_kvm_host: 9.60.87.132 +env_ip_bastion: 9.60.87.139 +env_ip_bootstrap: 9.60.87.133 +env_ip_control_0: 9.60.87.136 +env_ip_control_1: 9.60.87.137 +env_ip_control_2: 9.60.87.138 +env_ip_compute_0: 9.60.87.134 +env_ip_compute_1: 9.60.87.135 + +# ssh +env_ssh_username: jacob #Username to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. + +# Ansible passwordless ssh +env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible +env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) + +# OpenShift cluster's ssh key pair filename +env_ssh_ocp_name: ocp +env_ssh_ocp_pass: ibmzrocks + +# networking +dns_nameserver: 9.60.87.139 +default_gateway: 9.60.86.1 +netmask: 255.255.254.0 +# END ANSIBLE MANAGED BLOCK diff --git a/main.yaml b/main.yaml index e219c222..a751c67e 100644 --- a/main.yaml +++ b/main.yaml @@ -28,7 +28,7 @@ - enable_packages - macvtap - mount_rhel - #- create_bastion + - create_bastion - hosts: localhost tags: localhost,bastion diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index d19d900d..75da4625 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Load in variables from env.yaml - tags: setup + tags: setup,getocp,bastion include_vars: env.yaml - name: create directory bin for mirrors diff --git a/roles/get-ocp/vars/main.yaml b/roles/get-ocp/vars/main.yaml new file mode 100644 index 00000000..e69de29b diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index 9c118cc9..0f4a2814 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -1,13 +1,13 @@ --- - name: Load in variables from env.yaml - tags: setup + tags: keymastr, getocp include_vars: env.yaml - name: Check to see if local .ssh directory exists - tags: keymastr + tags: keymastr, getocp stat: - path: "~/.ssh" + path: ~/.ssh register: ssh_directory_exists_check - name: Print results of .ssh directory check @@ -16,9 +16,9 @@ var: ssh_directory_exists_check - name: Create .ssh local directory if it doesn't already exist - tags: keymastr + tags: keymastr, getocp file: - path: "~/.ssh" + path: ~/.ssh state: directory mode: "0700" register: ssh_directory_creation @@ -30,35 +30,45 @@ var: ssh_directory_creation - name: Check .ssh key pair files exist + tags: keymastr,getocp stat: - path: "~/.ssh/{{item}}" + path: ~/.ssh/{{item}} register: ssh_key_file_exists_check with_items: - "{{env_ssh_ocp_name}}" - - "{{env_ssh_ocp_name}}.pub" + - "'{{env_ssh_ocp_name}}'.pub" - name: Print results of ssh key pair files check - tags: keymastr + tags: keymastr,getocp debug: var: ssh_key_file_exists_check.results[1].stat.exists - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key - tags: keymastr + tags: keymastr,getocp community.crypto.openssh_keypair: - path: ~/.ssh/"{{ env_ssh_ocp_name }}" + path: ~/.ssh/{{ env_ssh_ocp_name }} passphrase: "{{ env_ssh_ocp_pass }}" backend: opensshbin + owner: root + comment: "bastion root authority" register: ssh_ocp when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: Print results of ssh key generation - tags: keymastr + tags: keymastr,getocp debug: - var: ssh_ocp + var: ssh_ocp.public_key when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: set public key fact + tags: keymastr,getocp set_fact: - env_ssh_key_ocp: "{{ ssh_ocp.public_key }}" + env_ssh_key_ocp: ssh_ocp.public_key cacheable: yes - when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false \ No newline at end of file + when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + +- name: fill host_vars/bastion/main.yaml file with generated ssh key + tags: keymastr, getocp + lineinfile: + path: roles/get-ocp/vars/main.yaml + line: "env_ssh_key_ocp: '{{ ssh_ocp.public_key }}'" \ No newline at end of file From 71041e0c3202f7eed8a68b4833b9d74b085a56e6 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 25 Aug 2021 11:02:28 -0500 Subject: [PATCH 360/885] Bug fixing for the implementation of variables --- .vscode/settings.json | 3 ++ group_vars/all/main.yaml | 43 +++++++++++++++++++++++++++ main.yaml | 2 +- roles/get-ocp/tasks/main.yaml | 2 +- roles/get-ocp/vars/main.yaml | 0 roles/ssh-ocp-key-gen/tasks/main.yaml | 38 ++++++++++++++--------- 6 files changed, 72 insertions(+), 16 deletions(-) create mode 100644 .vscode/settings.json create mode 100644 group_vars/all/main.yaml create mode 100644 roles/get-ocp/vars/main.yaml diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..106f856c --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,3 @@ +{ + "files.autoSave": "onFocusChange" +} \ No newline at end of file diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml new file mode 100644 index 00000000..2e750815 --- /dev/null +++ b/group_vars/all/main.yaml @@ -0,0 +1,43 @@ +# BEGIN ANSIBLE MANAGED BLOCK + +# to populate install_config +env_baseDomain: ocpz.wsclab.endicott.ibm.com +env_compute_arch: s390x #default to s390x +env_control_count: 3 #default 3 +env_control_arch: s390x #default s390x +env_metadata_name: distribution +env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now +env_host_prefix: 23 #default 23 for now +env_network_type: OpenShiftSDN #set default OpenShiftSDN +env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 +env_fips: false #true or false, set default false + +env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' + +# to fill inventory +env_ip_kvm_host: 9.60.87.132 +env_ip_bastion: 9.60.87.139 +env_ip_bootstrap: 9.60.87.133 +env_ip_control_0: 9.60.87.136 +env_ip_control_1: 9.60.87.137 +env_ip_control_2: 9.60.87.138 +env_ip_compute_0: 9.60.87.134 +env_ip_compute_1: 9.60.87.135 + +# ssh +env_ssh_username: jacob #Username to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. + +# Ansible passwordless ssh +env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible +env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) + +# OpenShift cluster's ssh key pair filename +env_ssh_ocp_name: ocp +env_ssh_ocp_pass: ibmzrocks + +# networking +dns_nameserver: 9.60.87.139 +default_gateway: 9.60.86.1 +netmask: 255.255.254.0 +# END ANSIBLE MANAGED BLOCK diff --git a/main.yaml b/main.yaml index e219c222..a751c67e 100644 --- a/main.yaml +++ b/main.yaml @@ -28,7 +28,7 @@ - enable_packages - macvtap - mount_rhel - #- create_bastion + - create_bastion - hosts: localhost tags: localhost,bastion diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index d19d900d..75da4625 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Load in variables from env.yaml - tags: setup + tags: setup,getocp,bastion include_vars: env.yaml - name: create directory bin for mirrors diff --git a/roles/get-ocp/vars/main.yaml b/roles/get-ocp/vars/main.yaml new file mode 100644 index 00000000..e69de29b diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index 9c118cc9..0f4a2814 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -1,13 +1,13 @@ --- - name: Load in variables from env.yaml - tags: setup + tags: keymastr, getocp include_vars: env.yaml - name: Check to see if local .ssh directory exists - tags: keymastr + tags: keymastr, getocp stat: - path: "~/.ssh" + path: ~/.ssh register: ssh_directory_exists_check - name: Print results of .ssh directory check @@ -16,9 +16,9 @@ var: ssh_directory_exists_check - name: Create .ssh local directory if it doesn't already exist - tags: keymastr + tags: keymastr, getocp file: - path: "~/.ssh" + path: ~/.ssh state: directory mode: "0700" register: ssh_directory_creation @@ -30,35 +30,45 @@ var: ssh_directory_creation - name: Check .ssh key pair files exist + tags: keymastr,getocp stat: - path: "~/.ssh/{{item}}" + path: ~/.ssh/{{item}} register: ssh_key_file_exists_check with_items: - "{{env_ssh_ocp_name}}" - - "{{env_ssh_ocp_name}}.pub" + - "'{{env_ssh_ocp_name}}'.pub" - name: Print results of ssh key pair files check - tags: keymastr + tags: keymastr,getocp debug: var: ssh_key_file_exists_check.results[1].stat.exists - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key - tags: keymastr + tags: keymastr,getocp community.crypto.openssh_keypair: - path: ~/.ssh/"{{ env_ssh_ocp_name }}" + path: ~/.ssh/{{ env_ssh_ocp_name }} passphrase: "{{ env_ssh_ocp_pass }}" backend: opensshbin + owner: root + comment: "bastion root authority" register: ssh_ocp when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: Print results of ssh key generation - tags: keymastr + tags: keymastr,getocp debug: - var: ssh_ocp + var: ssh_ocp.public_key when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: set public key fact + tags: keymastr,getocp set_fact: - env_ssh_key_ocp: "{{ ssh_ocp.public_key }}" + env_ssh_key_ocp: ssh_ocp.public_key cacheable: yes - when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false \ No newline at end of file + when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + +- name: fill host_vars/bastion/main.yaml file with generated ssh key + tags: keymastr, getocp + lineinfile: + path: roles/get-ocp/vars/main.yaml + line: "env_ssh_key_ocp: '{{ ssh_ocp.public_key }}'" \ No newline at end of file From c6a5b6abf217f6d1ae03639cac848fe94de6ab6b Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 25 Aug 2021 12:35:19 -0500 Subject: [PATCH 361/885] Bug fixing for the implementation of variables --- README.md | 9 ++--- roles/create_bastion/tasks/main.yaml | 16 ++++++++- roles/create_bootstrap/tasks/main.yaml | 16 ++++++++- roles/create_compute_nodes/tasks/main.yaml | 30 +++++++++++++++- roles/create_control_nodes/tasks/main.yaml | 42 ++++++++++++++++++++++ roles/ssh-ocp-key-gen/tasks/main.yaml | 2 -- 6 files changed, 104 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 8d695eb0..c1137109 100644 --- a/README.md +++ b/README.md @@ -26,12 +26,9 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing ## When you are ready: * Step 1: Download this Git repository to a folder on your local computer -* Step 2: Go to to download your local command line tools (oc and kubectl) and to copy the following OpenShift links to use in the next step: - * pull secret - * RHCOS initramfs - * RHCOS kernel - * RHCOS rootfs - * QCOW2 image +* Step 2: Go to to: + * download your local command line tools (oc and kubectl) + * OpenShift pull secret (for inputting it into env.yaml) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. * Step 4: Run ansible-setup-linux/mac.sh shell script in the main directory (run ./ansible-setup-linux/mac.sh in terminal) depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 742a47b7..c2a74f80 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -22,11 +22,25 @@ # - name: virtualize bastion server # command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G +- name: check if bastion already exists + tags: kvm_host, bastionvm + community.libvirt.virt: + name: bastion + command: status + register: bastion_check + +- name: print status of bastion + tags: kvm_host, bastionvm + debug: + var: bastion_check + - name: start bastion install tags: kvm_host, bastionvm command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + when: bastion_check.failed == true - name: README - Pausing for 60 minutes for you to complete the bastion installation of rhel OS with your specific installation's requirements. Please go to your kvm host at to complete installation. Once you see the login prompt on the bastion's terminal, come back here and press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. tags: kvm_host, bastionvm pause: - minutes: 60 \ No newline at end of file + minutes: 60 + when: bastion_check.failed == true \ No newline at end of file diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 495c3a54..24c8fa3f 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -1,5 +1,17 @@ --- +- name: check if bootstrap already exists + tags: bootstrap + community.libvirt.virt: + name: bootstrap + command: status + register: bootstrap_check + +- name: print status of bootstrap + tags: bootstrap + debug: + var: bootstrap_check + - name: boot bootstrap tags: bootstrap command: | @@ -10,7 +22,9 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.133::9.60.86.1:255.255.254.0:bootstrap:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign" --noautoconsole + when: bastion_check.failed == true - name: Pause 15 minutes for installation. Once you see the login prompt on the bootstrap's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. pause: - minutes: 15 \ No newline at end of file + minutes: 15 + when: bastion_check.failed == true \ No newline at end of file diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index 6fe941f8..f4632189 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -1,5 +1,29 @@ --- +- name: check if compute-0 already exists + tags: compute + community.libvirt.virt: + name: compute-0 + command: status + register: compute-0_check + +- name: print status of compute-0 + tags: compute + debug: + var: compute-0_check + +- name: check if compute-1 already exists + tags: compute + community.libvirt.virt: + name: compute-1 + command: status + register: compute-1_check + +- name: print status of compute-1 + tags: compute + debug: + var: compute-1_check + - name: install CoreOS on compute-0 node tags: compute command: | @@ -10,11 +34,13 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.135::9.60.86.1:255.255.254.0:compute-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" --noautoconsole + when: compute-0_check.failed == true - name: pause 15 minutes tags: compute pause: minutes: 15 + when: compute-0_check.failed == true - name: install CoreOS on compute-1 node tags: compute @@ -26,8 +52,10 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.134::9.60.86.1:255.255.254.0:compute-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" --noautoconsole + when: compute-1_check.failed == true - name: pause 15 minutes tags: compute pause: - minutes: 15 \ No newline at end of file + minutes: 15 + when: compute-1_check.failed == true \ No newline at end of file diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 6f94ce6c..c0bb49e3 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -1,5 +1,41 @@ --- +- name: check if control-0 already exists + tags: control + community.libvirt.virt: + name: control-0 + command: status + register: control-0_check + +- name: print status of control-0 + tags: control + debug: + var: control-0_check + +- name: check if control-1 already exists + tags: control + community.libvirt.virt: + name: control-1 + command: status + register: control-1_check + +- name: print status of control-1 + tags: control + debug: + var: control-1_check + +- name: check if control-2 already exists + tags: control + community.libvirt.virt: + name: control-2 + command: status + register: control-2_check + +- name: print status of control-2 + tags: control + debug: + var: control-2_check + - name: install CoreOS on control-0 node tags: control command: | @@ -10,11 +46,13 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.138::9.60.86.1:255.255.254.0:control-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" --noautoconsole + when: control-0_check.failed == true - name: pause 15 minutes tags: control pause: minutes: 15 + when: control-0_check.failed == true - name: install CoreOS on control-1 node tags: control @@ -26,11 +64,13 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.137::9.60.86.1:255.255.254.0:control-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" --noautoconsole + when: control-1_check.failed == true - name: pause 15 minutes tags: control pause: minutes: 15 + when: control-1_check.failed == true - name: install CoreOS on control-2 node tags: control @@ -42,8 +82,10 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.136::9.60.86.1:255.255.254.0:control-2:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" --noautoconsole + when: control-2_check.failed == true - name: pause 15 minutes tags: control pause: minutes: 15 + when: control-2_check.failed == true diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index 0f4a2814..ed6b8345 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -49,8 +49,6 @@ path: ~/.ssh/{{ env_ssh_ocp_name }} passphrase: "{{ env_ssh_ocp_pass }}" backend: opensshbin - owner: root - comment: "bastion root authority" register: ssh_ocp when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false From 11cbc2c0aeb11e7b0cd775448bc695309447b4f4 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 25 Aug 2021 12:35:19 -0500 Subject: [PATCH 362/885] Bug fixing for the implementation of variables --- README.md | 9 ++--- roles/create_bastion/tasks/main.yaml | 16 ++++++++- roles/create_bootstrap/tasks/main.yaml | 16 ++++++++- roles/create_compute_nodes/tasks/main.yaml | 30 +++++++++++++++- roles/create_control_nodes/tasks/main.yaml | 42 ++++++++++++++++++++++ roles/ssh-ocp-key-gen/tasks/main.yaml | 2 -- 6 files changed, 104 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 8d695eb0..c1137109 100644 --- a/README.md +++ b/README.md @@ -26,12 +26,9 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing ## When you are ready: * Step 1: Download this Git repository to a folder on your local computer -* Step 2: Go to to download your local command line tools (oc and kubectl) and to copy the following OpenShift links to use in the next step: - * pull secret - * RHCOS initramfs - * RHCOS kernel - * RHCOS rootfs - * QCOW2 image +* Step 2: Go to to: + * download your local command line tools (oc and kubectl) + * OpenShift pull secret (for inputting it into env.yaml) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. * Step 4: Run ansible-setup-linux/mac.sh shell script in the main directory (run ./ansible-setup-linux/mac.sh in terminal) depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 742a47b7..c2a74f80 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -22,11 +22,25 @@ # - name: virtualize bastion server # command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G +- name: check if bastion already exists + tags: kvm_host, bastionvm + community.libvirt.virt: + name: bastion + command: status + register: bastion_check + +- name: print status of bastion + tags: kvm_host, bastionvm + debug: + var: bastion_check + - name: start bastion install tags: kvm_host, bastionvm command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + when: bastion_check.failed == true - name: README - Pausing for 60 minutes for you to complete the bastion installation of rhel OS with your specific installation's requirements. Please go to your kvm host at to complete installation. Once you see the login prompt on the bastion's terminal, come back here and press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. tags: kvm_host, bastionvm pause: - minutes: 60 \ No newline at end of file + minutes: 60 + when: bastion_check.failed == true \ No newline at end of file diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 495c3a54..24c8fa3f 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -1,5 +1,17 @@ --- +- name: check if bootstrap already exists + tags: bootstrap + community.libvirt.virt: + name: bootstrap + command: status + register: bootstrap_check + +- name: print status of bootstrap + tags: bootstrap + debug: + var: bootstrap_check + - name: boot bootstrap tags: bootstrap command: | @@ -10,7 +22,9 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.133::9.60.86.1:255.255.254.0:bootstrap:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign" --noautoconsole + when: bastion_check.failed == true - name: Pause 15 minutes for installation. Once you see the login prompt on the bootstrap's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. pause: - minutes: 15 \ No newline at end of file + minutes: 15 + when: bastion_check.failed == true \ No newline at end of file diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index 6fe941f8..f4632189 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -1,5 +1,29 @@ --- +- name: check if compute-0 already exists + tags: compute + community.libvirt.virt: + name: compute-0 + command: status + register: compute-0_check + +- name: print status of compute-0 + tags: compute + debug: + var: compute-0_check + +- name: check if compute-1 already exists + tags: compute + community.libvirt.virt: + name: compute-1 + command: status + register: compute-1_check + +- name: print status of compute-1 + tags: compute + debug: + var: compute-1_check + - name: install CoreOS on compute-0 node tags: compute command: | @@ -10,11 +34,13 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.135::9.60.86.1:255.255.254.0:compute-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" --noautoconsole + when: compute-0_check.failed == true - name: pause 15 minutes tags: compute pause: minutes: 15 + when: compute-0_check.failed == true - name: install CoreOS on compute-1 node tags: compute @@ -26,8 +52,10 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.134::9.60.86.1:255.255.254.0:compute-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" --noautoconsole + when: compute-1_check.failed == true - name: pause 15 minutes tags: compute pause: - minutes: 15 \ No newline at end of file + minutes: 15 + when: compute-1_check.failed == true \ No newline at end of file diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 6f94ce6c..c0bb49e3 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -1,5 +1,41 @@ --- +- name: check if control-0 already exists + tags: control + community.libvirt.virt: + name: control-0 + command: status + register: control-0_check + +- name: print status of control-0 + tags: control + debug: + var: control-0_check + +- name: check if control-1 already exists + tags: control + community.libvirt.virt: + name: control-1 + command: status + register: control-1_check + +- name: print status of control-1 + tags: control + debug: + var: control-1_check + +- name: check if control-2 already exists + tags: control + community.libvirt.virt: + name: control-2 + command: status + register: control-2_check + +- name: print status of control-2 + tags: control + debug: + var: control-2_check + - name: install CoreOS on control-0 node tags: control command: | @@ -10,11 +46,13 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.138::9.60.86.1:255.255.254.0:control-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" --noautoconsole + when: control-0_check.failed == true - name: pause 15 minutes tags: control pause: minutes: 15 + when: control-0_check.failed == true - name: install CoreOS on control-1 node tags: control @@ -26,11 +64,13 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.137::9.60.86.1:255.255.254.0:control-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" --noautoconsole + when: control-1_check.failed == true - name: pause 15 minutes tags: control pause: minutes: 15 + when: control-1_check.failed == true - name: install CoreOS on control-2 node tags: control @@ -42,8 +82,10 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.136::9.60.86.1:255.255.254.0:control-2:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" --noautoconsole + when: control-2_check.failed == true - name: pause 15 minutes tags: control pause: minutes: 15 + when: control-2_check.failed == true diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index 0f4a2814..ed6b8345 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -49,8 +49,6 @@ path: ~/.ssh/{{ env_ssh_ocp_name }} passphrase: "{{ env_ssh_ocp_pass }}" backend: opensshbin - owner: root - comment: "bastion root authority" register: ssh_ocp when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false From 60273f67b745fc027dda11f93a9883e6d8c6cdb9 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 25 Aug 2021 16:06:45 -0500 Subject: [PATCH 363/885] Fully idempotent ssh key pair generation implemented --- roles/get-ocp/tasks/main.yaml | 4 ++++ roles/get-ocp/vars/main.yaml | 1 + roles/ssh-ocp-key-gen/tasks/main.yaml | 28 +++++++++++++++++++++------ 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 75da4625..ff29e585 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -87,12 +87,16 @@ template: src: install-config.yaml.j2 dest: roles/get-ocp/files/install-config.yaml + force: yes + backup: yes - name: Copy install-config.yaml to ocp install directory tags: getocp,bastion copy: src: install-config.yaml dest: /ocpinst/install-config.yaml + backup: yes + force: yes - name: Create Manifests tags: getocp,bastion diff --git a/roles/get-ocp/vars/main.yaml b/roles/get-ocp/vars/main.yaml index e69de29b..19a4bece 100644 --- a/roles/get-ocp/vars/main.yaml +++ b/roles/get-ocp/vars/main.yaml @@ -0,0 +1 @@ +env_ssh_key_ocp: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC0mdzK01/Vtc4y0sUnU6ZtbVwHfICQpaPc/PAjSmefjnJU2IjhgSi7AF4UYiezUIELiD34m/ZhwY1be7qemHCjkZH+umCCsJ1T2M858mqPXpchoErY+BXCrVP55XV8POnNn6fvFfkKOwp6wio/5Vjgv+6sW399XMAmrQIeEOE4sRTXbU7dNmQvhy63ekSclxvSSUQIgcwFNSfj2D0y2XLDGRqa0J18KCSiDL76MOVhdtD+0wP/Zo51aorwreF4tcEAt1mokkFpBkzI9v/A5W3akaXeyfaKjFTi91ALoNVXALkQ8uRi1isLwpJ282k3Hbblpm4eUaO3LByQSSgPRsMu6mOlSSqLSouVEgzPAXIFgejG53iyzYioKwF0vtjQ7cxz8jPj23BK+4Bnw+gntm2j97ozm95nK14tGb+pnNTz3gySu/h/gkDdpPi3hwJcNVCCycq4DYJ0CXz9cFWw8pYcqkrkILY7MWkjOruKBBDR4j3ubKPMAMXeyankWNHVRtcyCHry4tFPT7FVs1CMTno2x0yQXSzR3yxulmgvx6Nibue89x0+iFyeHlxobMkfyE2auREjcQw4N5acmyb3VBQiGBLxx+qng5WMwo75NXZNsJBfJ1aWuLPOk9L0wbUQpes9EAdUnqldRQHsrONcCJ22ksKMc5QvWlcJyDfefpx5aQ== diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index ed6b8345..b02c3799 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -43,30 +43,46 @@ debug: var: ssh_key_file_exists_check.results[1].stat.exists -- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key +- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key if it doesn't exist already tags: keymastr,getocp community.crypto.openssh_keypair: path: ~/.ssh/{{ env_ssh_ocp_name }} passphrase: "{{ env_ssh_ocp_pass }}" backend: opensshbin register: ssh_ocp - when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: Print results of ssh key generation tags: keymastr,getocp debug: var: ssh_ocp.public_key - when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + when: ssh_ocp.changed == true -- name: set public key fact +- name: set public key fact if generated just now tags: keymastr,getocp set_fact: env_ssh_key_ocp: ssh_ocp.public_key cacheable: yes - when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + when: ssh_ocp.changed == true + +- name: delete the public key in host_vars/bastion/main.yaml if it is already there + tags: keymastr, getocp + lineinfile: + path: roles/get-ocp/vars/main.yaml + state: absent + regexp: "^env_ssh_key_ocp" - name: fill host_vars/bastion/main.yaml file with generated ssh key tags: keymastr, getocp lineinfile: path: roles/get-ocp/vars/main.yaml - line: "env_ssh_key_ocp: '{{ ssh_ocp.public_key }}'" \ No newline at end of file + line: "env_ssh_key_ocp: '{{ ssh_ocp.public_key }}'" + when: ssh_ocp.changed == true + +- name: copy public key from .ssh folder if it was there already and move the contents to roles/get-ocp/vars/main.yaml + tags: keymastr, getocp + lineinfile: + path: roles/get-ocp/vars/main.yaml + regexp: "^{{ lookup('file', 'roles/get-ocp/vars/main.yaml') }}" + line: "env_ssh_key_ocp: {{ lookup('file', '~/.ssh/{{ env_ssh_ocp_name }}.pub') }}{{ lookup('file', 'roles/get-ocp/vars/main.yaml') }}" + when: ssh_ocp.changed == false \ No newline at end of file From fe187ba5ae117d4ad30e1a15d35d643008c9ad5c Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 25 Aug 2021 16:06:45 -0500 Subject: [PATCH 364/885] Fully idempotent ssh key pair generation implemented --- roles/get-ocp/tasks/main.yaml | 4 ++++ roles/get-ocp/vars/main.yaml | 1 + roles/ssh-ocp-key-gen/tasks/main.yaml | 28 +++++++++++++++++++++------ 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 75da4625..ff29e585 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -87,12 +87,16 @@ template: src: install-config.yaml.j2 dest: roles/get-ocp/files/install-config.yaml + force: yes + backup: yes - name: Copy install-config.yaml to ocp install directory tags: getocp,bastion copy: src: install-config.yaml dest: /ocpinst/install-config.yaml + backup: yes + force: yes - name: Create Manifests tags: getocp,bastion diff --git a/roles/get-ocp/vars/main.yaml b/roles/get-ocp/vars/main.yaml index e69de29b..19a4bece 100644 --- a/roles/get-ocp/vars/main.yaml +++ b/roles/get-ocp/vars/main.yaml @@ -0,0 +1 @@ +env_ssh_key_ocp: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC0mdzK01/Vtc4y0sUnU6ZtbVwHfICQpaPc/PAjSmefjnJU2IjhgSi7AF4UYiezUIELiD34m/ZhwY1be7qemHCjkZH+umCCsJ1T2M858mqPXpchoErY+BXCrVP55XV8POnNn6fvFfkKOwp6wio/5Vjgv+6sW399XMAmrQIeEOE4sRTXbU7dNmQvhy63ekSclxvSSUQIgcwFNSfj2D0y2XLDGRqa0J18KCSiDL76MOVhdtD+0wP/Zo51aorwreF4tcEAt1mokkFpBkzI9v/A5W3akaXeyfaKjFTi91ALoNVXALkQ8uRi1isLwpJ282k3Hbblpm4eUaO3LByQSSgPRsMu6mOlSSqLSouVEgzPAXIFgejG53iyzYioKwF0vtjQ7cxz8jPj23BK+4Bnw+gntm2j97ozm95nK14tGb+pnNTz3gySu/h/gkDdpPi3hwJcNVCCycq4DYJ0CXz9cFWw8pYcqkrkILY7MWkjOruKBBDR4j3ubKPMAMXeyankWNHVRtcyCHry4tFPT7FVs1CMTno2x0yQXSzR3yxulmgvx6Nibue89x0+iFyeHlxobMkfyE2auREjcQw4N5acmyb3VBQiGBLxx+qng5WMwo75NXZNsJBfJ1aWuLPOk9L0wbUQpes9EAdUnqldRQHsrONcCJ22ksKMc5QvWlcJyDfefpx5aQ== diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index ed6b8345..b02c3799 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -43,30 +43,46 @@ debug: var: ssh_key_file_exists_check.results[1].stat.exists -- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key +- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key if it doesn't exist already tags: keymastr,getocp community.crypto.openssh_keypair: path: ~/.ssh/{{ env_ssh_ocp_name }} passphrase: "{{ env_ssh_ocp_pass }}" backend: opensshbin register: ssh_ocp - when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: Print results of ssh key generation tags: keymastr,getocp debug: var: ssh_ocp.public_key - when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + when: ssh_ocp.changed == true -- name: set public key fact +- name: set public key fact if generated just now tags: keymastr,getocp set_fact: env_ssh_key_ocp: ssh_ocp.public_key cacheable: yes - when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false + when: ssh_ocp.changed == true + +- name: delete the public key in host_vars/bastion/main.yaml if it is already there + tags: keymastr, getocp + lineinfile: + path: roles/get-ocp/vars/main.yaml + state: absent + regexp: "^env_ssh_key_ocp" - name: fill host_vars/bastion/main.yaml file with generated ssh key tags: keymastr, getocp lineinfile: path: roles/get-ocp/vars/main.yaml - line: "env_ssh_key_ocp: '{{ ssh_ocp.public_key }}'" \ No newline at end of file + line: "env_ssh_key_ocp: '{{ ssh_ocp.public_key }}'" + when: ssh_ocp.changed == true + +- name: copy public key from .ssh folder if it was there already and move the contents to roles/get-ocp/vars/main.yaml + tags: keymastr, getocp + lineinfile: + path: roles/get-ocp/vars/main.yaml + regexp: "^{{ lookup('file', 'roles/get-ocp/vars/main.yaml') }}" + line: "env_ssh_key_ocp: {{ lookup('file', '~/.ssh/{{ env_ssh_ocp_name }}.pub') }}{{ lookup('file', 'roles/get-ocp/vars/main.yaml') }}" + when: ssh_ocp.changed == false \ No newline at end of file From 8c44e96ef3ece8bf41b18e674ead18bb3e439e20 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 25 Aug 2021 20:08:05 -0500 Subject: [PATCH 365/885] debugging variables run --- env.yaml | 6 ++- group_vars/all/main.yaml | 6 ++- main.yaml | 2 +- roles/create_bootstrap/tasks/main.yaml | 6 ++- roles/dns/tasks/main.yaml | 4 +- roles/get-ocp/tasks/main.yaml | 14 ++----- .../get-ocp/templates/install-config.yaml.j2 | 40 +++++++++---------- roles/get-ocp/vars/main.yaml | 2 +- roles/prep_kvm_guests/tasks/main.yaml | 2 +- 9 files changed, 40 insertions(+), 42 deletions(-) diff --git a/env.yaml b/env.yaml index 4acb8125..1b9be361 100644 --- a/env.yaml +++ b/env.yaml @@ -9,9 +9,11 @@ env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now env_host_prefix: 23 #default 23 for now env_network_type: OpenShiftSDN #set default OpenShiftSDN env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 -env_fips: false #true or false, set default false +env_fips: "false" # "true" or "false" (include quotes) + +env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"registry.connect.redhat.com":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"},"registry.redhat.io":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"}}}' #paste it into these single quotes here + -env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' # to fill inventory env_ip_kvm_host: 9.60.87.132 diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml index 2e750815..65ec7ec5 100644 --- a/group_vars/all/main.yaml +++ b/group_vars/all/main.yaml @@ -10,9 +10,11 @@ env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now env_host_prefix: 23 #default 23 for now env_network_type: OpenShiftSDN #set default OpenShiftSDN env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 -env_fips: false #true or false, set default false +env_fips: "false" # "true" or "false" (include quotes) + +env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"registry.connect.redhat.com":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"},"registry.redhat.io":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"}}}' #paste it into these single quotes here + -env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' # to fill inventory env_ip_kvm_host: 9.60.87.132 diff --git a/main.yaml b/main.yaml index a751c67e..e8c45e03 100644 --- a/main.yaml +++ b/main.yaml @@ -51,8 +51,8 @@ roles: - check_ssh #- install_packages - - set_firewall - set_selinux_permissive + - set_firewall - dns - haproxy - httpd diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 24c8fa3f..60106987 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -6,6 +6,7 @@ name: bootstrap command: status register: bootstrap_check + ignore_errors: yes - name: print status of bootstrap tags: bootstrap @@ -22,9 +23,10 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.133::9.60.86.1:255.255.254.0:bootstrap:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign" --noautoconsole - when: bastion_check.failed == true + when: bootstrap_check.failed == true - name: Pause 15 minutes for installation. Once you see the login prompt on the bootstrap's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. + tags: bootstrap pause: minutes: 15 - when: bastion_check.failed == true \ No newline at end of file + when: bootstrap_check.failed == true \ No newline at end of file diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index b2e959b4..e3dadf2c 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -22,7 +22,7 @@ mode: '0755' backup: yes -- name: Copy distribution.db file to bastion +- name: Copy DNS .db file to bastion tags: dns,bastion ansible.builtin.copy: src: distribution.db @@ -32,7 +32,7 @@ mode: '0755' backup: yes -- name: Copy distribution.rev file to bastion +- name: Copy DNS .rev file to bastion tags: dns,bastion ansible.builtin.copy: src: distribution.rev diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index ff29e585..f6acf193 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -14,7 +14,7 @@ - name: get ocp kernel tags: getocp,bastion get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin mode: '0755' @@ -83,20 +83,12 @@ mode: '0755' - name: Use template file to create install-config - tags: setup,get-ocp + tags: setup,getocp template: src: install-config.yaml.j2 - dest: roles/get-ocp/files/install-config.yaml - force: yes - backup: yes - -- name: Copy install-config.yaml to ocp install directory - tags: getocp,bastion - copy: - src: install-config.yaml dest: /ocpinst/install-config.yaml - backup: yes force: yes + backup: yes - name: Create Manifests tags: getocp,bastion diff --git a/roles/get-ocp/templates/install-config.yaml.j2 b/roles/get-ocp/templates/install-config.yaml.j2 index 1e933d6d..3a24f18d 100644 --- a/roles/get-ocp/templates/install-config.yaml.j2 +++ b/roles/get-ocp/templates/install-config.yaml.j2 @@ -1,26 +1,26 @@ apiVersion: v1 -baseDomain: "{{ env_baseDomain }}" -"compute\:" +baseDomain: {{ env_baseDomain }} +compute: - hyperthreading: Enabled name: worker replicas: 2 - architecture: "{{ env_compute_arch | default(s390x) }}" -"controlPlane\:" + architecture: {{ env_compute_arch | default(s390x) }} +controlPlane: hyperthreading: Enabled name: master - replicas: "{{ env_control_count | default(3) }}" - architecture: "{{ env_control_arch | default(s390x) }}" -"metadata\:" - name: "{{ env_metadata_name }}" -"networking\:" - "clusterNetwork\:" - - cidr: "{{ env_cidr | default("10.128.0.0/14") }}" - hostPrefix: "{{ env_host_prefix | default(23) }}" - networkType: "{{ env_network_type | default(OpenShiftSDN) }}" - "serviceNetwork\:" - - "{{ env_service_network | default("172.30.0.0/16") }}" -"platform/:" - "none\: {}" -fips: "{{ env_fips | default(false) }}" -pullSecret: "{{ env_pullSecret }}" -sshKey: "{{ env_ssh_key_ocp }}" \ No newline at end of file + replicas: {{ env_control_count | default(3) }} + architecture: {{ env_control_arch | default(s390x) }} +metadata: + name: {{ env_metadata_name }} +networking: + clusterNetwork: + - cidr: {{ env_cidr | default("10.128.0.0/14") }} + hostPrefix: {{ env_host_prefix | default(23) }} + networkType: {{ env_network_type | default(OpenShiftSDN) }} + serviceNetwork: + - {{ env_service_network | default("172.30.0.0/16") }} +platform: + none: {} +fips: {{ env_fips | default(false) }} +pullSecret: '{{ env_pullSecret }}' +sshKey: {{ env_ssh_key_ocp }} \ No newline at end of file diff --git a/roles/get-ocp/vars/main.yaml b/roles/get-ocp/vars/main.yaml index 19a4bece..4d5fd6e5 100644 --- a/roles/get-ocp/vars/main.yaml +++ b/roles/get-ocp/vars/main.yaml @@ -1 +1 @@ -env_ssh_key_ocp: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC0mdzK01/Vtc4y0sUnU6ZtbVwHfICQpaPc/PAjSmefjnJU2IjhgSi7AF4UYiezUIELiD34m/ZhwY1be7qemHCjkZH+umCCsJ1T2M858mqPXpchoErY+BXCrVP55XV8POnNn6fvFfkKOwp6wio/5Vjgv+6sW399XMAmrQIeEOE4sRTXbU7dNmQvhy63ekSclxvSSUQIgcwFNSfj2D0y2XLDGRqa0J18KCSiDL76MOVhdtD+0wP/Zo51aorwreF4tcEAt1mokkFpBkzI9v/A5W3akaXeyfaKjFTi91ALoNVXALkQ8uRi1isLwpJ282k3Hbblpm4eUaO3LByQSSgPRsMu6mOlSSqLSouVEgzPAXIFgejG53iyzYioKwF0vtjQ7cxz8jPj23BK+4Bnw+gntm2j97ozm95nK14tGb+pnNTz3gySu/h/gkDdpPi3hwJcNVCCycq4DYJ0CXz9cFWw8pYcqkrkILY7MWkjOruKBBDR4j3ubKPMAMXeyankWNHVRtcyCHry4tFPT7FVs1CMTno2x0yQXSzR3yxulmgvx6Nibue89x0+iFyeHlxobMkfyE2auREjcQw4N5acmyb3VBQiGBLxx+qng5WMwo75NXZNsJBfJ1aWuLPOk9L0wbUQpes9EAdUnqldRQHsrONcCJ22ksKMc5QvWlcJyDfefpx5aQ== +env_ssh_key_ocp: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC5pKQNbo5MT+szVl6LvfgJVbvTwiRpAXmgPsCPesUTmQ6J4gqrQZz42GQeWdwWfLDo4K/NdvVSOWofE7Jhk9pC0JqYzp7OoZaqwSHABqA+yPAx8tdHX+qt7c6tylfzwTvd+a8MlZ3Xj1uhHdZc2dSmiQbXobI9bccEGwCQ7M8+CHWOJsrl8bWqx/nRbd/wcad7D+oN8/8uY8c8AO/xHmxJII4idTazS52ggS91GpMzPewfwWnx7A9ck65vLvjTFhniQjQsM0J3kgSzowQfKPpWSVfHXWaoDbVUbOiTk5awai0zhyaEB51pWuplZTqMrS5sJVzvflkwGx+tRfsakp7zu8OenvjZOGQYzrwlVX18a9r99V4IKc8AbV8wILQvBeCwZbwa+T/uWFdQcT2TDJkFrytagzgR2hv6pv3LNM4s/ybaVZkAX/VGQuXSc4V5al+x1UX74NFV4VDvnngo63NHHI1A2IDffRb+ijiAaKXn97+cC2CIKEHoot5Km7J9wCQW2JSaZycXRqdKWAsSL5hn0Io0MUnB1ReIEcYhn8MalbA12QUy0SDTYtDmYyA6g5JB6bulH/jWfHayZEANNjcP7FpuMa3lr/L3RnnYM1csE3xlvTQnaBmN+Qu4qQTUJ5TlmMvSmMCgsimAF1xWkstfm0S+OWVrST/A/3yP659OnQ== diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 9e824095..687b21e9 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -10,7 +10,7 @@ - name: Unzip rhcos qcow2 files tags: kvm_host - command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz + command: gunzip -f /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz - name: get rhcos initramfs image tags: kvm_host From 5aff8a5311181111159f5c8a6c5d65b7f5850b6f Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 25 Aug 2021 20:08:05 -0500 Subject: [PATCH 366/885] debugging variables run --- group_vars/all/main.yaml | 6 ++- main.yaml | 2 +- roles/create_bootstrap/tasks/main.yaml | 6 ++- roles/dns/tasks/main.yaml | 4 +- roles/get-ocp/tasks/main.yaml | 14 ++----- .../get-ocp/templates/install-config.yaml.j2 | 40 +++++++++---------- roles/get-ocp/vars/main.yaml | 2 +- roles/prep_kvm_guests/tasks/main.yaml | 2 +- 8 files changed, 36 insertions(+), 40 deletions(-) diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml index 2e750815..65ec7ec5 100644 --- a/group_vars/all/main.yaml +++ b/group_vars/all/main.yaml @@ -10,9 +10,11 @@ env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now env_host_prefix: 23 #default 23 for now env_network_type: OpenShiftSDN #set default OpenShiftSDN env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 -env_fips: false #true or false, set default false +env_fips: "false" # "true" or "false" (include quotes) + +env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"registry.connect.redhat.com":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"},"registry.redhat.io":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"}}}' #paste it into these single quotes here + -env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' # to fill inventory env_ip_kvm_host: 9.60.87.132 diff --git a/main.yaml b/main.yaml index a751c67e..e8c45e03 100644 --- a/main.yaml +++ b/main.yaml @@ -51,8 +51,8 @@ roles: - check_ssh #- install_packages - - set_firewall - set_selinux_permissive + - set_firewall - dns - haproxy - httpd diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 24c8fa3f..60106987 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -6,6 +6,7 @@ name: bootstrap command: status register: bootstrap_check + ignore_errors: yes - name: print status of bootstrap tags: bootstrap @@ -22,9 +23,10 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.133::9.60.86.1:255.255.254.0:bootstrap:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign" --noautoconsole - when: bastion_check.failed == true + when: bootstrap_check.failed == true - name: Pause 15 minutes for installation. Once you see the login prompt on the bootstrap's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. + tags: bootstrap pause: minutes: 15 - when: bastion_check.failed == true \ No newline at end of file + when: bootstrap_check.failed == true \ No newline at end of file diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index b2e959b4..e3dadf2c 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -22,7 +22,7 @@ mode: '0755' backup: yes -- name: Copy distribution.db file to bastion +- name: Copy DNS .db file to bastion tags: dns,bastion ansible.builtin.copy: src: distribution.db @@ -32,7 +32,7 @@ mode: '0755' backup: yes -- name: Copy distribution.rev file to bastion +- name: Copy DNS .rev file to bastion tags: dns,bastion ansible.builtin.copy: src: distribution.rev diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index ff29e585..f6acf193 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -14,7 +14,7 @@ - name: get ocp kernel tags: getocp,bastion get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img + url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin mode: '0755' @@ -83,20 +83,12 @@ mode: '0755' - name: Use template file to create install-config - tags: setup,get-ocp + tags: setup,getocp template: src: install-config.yaml.j2 - dest: roles/get-ocp/files/install-config.yaml - force: yes - backup: yes - -- name: Copy install-config.yaml to ocp install directory - tags: getocp,bastion - copy: - src: install-config.yaml dest: /ocpinst/install-config.yaml - backup: yes force: yes + backup: yes - name: Create Manifests tags: getocp,bastion diff --git a/roles/get-ocp/templates/install-config.yaml.j2 b/roles/get-ocp/templates/install-config.yaml.j2 index 1e933d6d..3a24f18d 100644 --- a/roles/get-ocp/templates/install-config.yaml.j2 +++ b/roles/get-ocp/templates/install-config.yaml.j2 @@ -1,26 +1,26 @@ apiVersion: v1 -baseDomain: "{{ env_baseDomain }}" -"compute\:" +baseDomain: {{ env_baseDomain }} +compute: - hyperthreading: Enabled name: worker replicas: 2 - architecture: "{{ env_compute_arch | default(s390x) }}" -"controlPlane\:" + architecture: {{ env_compute_arch | default(s390x) }} +controlPlane: hyperthreading: Enabled name: master - replicas: "{{ env_control_count | default(3) }}" - architecture: "{{ env_control_arch | default(s390x) }}" -"metadata\:" - name: "{{ env_metadata_name }}" -"networking\:" - "clusterNetwork\:" - - cidr: "{{ env_cidr | default("10.128.0.0/14") }}" - hostPrefix: "{{ env_host_prefix | default(23) }}" - networkType: "{{ env_network_type | default(OpenShiftSDN) }}" - "serviceNetwork\:" - - "{{ env_service_network | default("172.30.0.0/16") }}" -"platform/:" - "none\: {}" -fips: "{{ env_fips | default(false) }}" -pullSecret: "{{ env_pullSecret }}" -sshKey: "{{ env_ssh_key_ocp }}" \ No newline at end of file + replicas: {{ env_control_count | default(3) }} + architecture: {{ env_control_arch | default(s390x) }} +metadata: + name: {{ env_metadata_name }} +networking: + clusterNetwork: + - cidr: {{ env_cidr | default("10.128.0.0/14") }} + hostPrefix: {{ env_host_prefix | default(23) }} + networkType: {{ env_network_type | default(OpenShiftSDN) }} + serviceNetwork: + - {{ env_service_network | default("172.30.0.0/16") }} +platform: + none: {} +fips: {{ env_fips | default(false) }} +pullSecret: '{{ env_pullSecret }}' +sshKey: {{ env_ssh_key_ocp }} \ No newline at end of file diff --git a/roles/get-ocp/vars/main.yaml b/roles/get-ocp/vars/main.yaml index 19a4bece..4d5fd6e5 100644 --- a/roles/get-ocp/vars/main.yaml +++ b/roles/get-ocp/vars/main.yaml @@ -1 +1 @@ -env_ssh_key_ocp: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC0mdzK01/Vtc4y0sUnU6ZtbVwHfICQpaPc/PAjSmefjnJU2IjhgSi7AF4UYiezUIELiD34m/ZhwY1be7qemHCjkZH+umCCsJ1T2M858mqPXpchoErY+BXCrVP55XV8POnNn6fvFfkKOwp6wio/5Vjgv+6sW399XMAmrQIeEOE4sRTXbU7dNmQvhy63ekSclxvSSUQIgcwFNSfj2D0y2XLDGRqa0J18KCSiDL76MOVhdtD+0wP/Zo51aorwreF4tcEAt1mokkFpBkzI9v/A5W3akaXeyfaKjFTi91ALoNVXALkQ8uRi1isLwpJ282k3Hbblpm4eUaO3LByQSSgPRsMu6mOlSSqLSouVEgzPAXIFgejG53iyzYioKwF0vtjQ7cxz8jPj23BK+4Bnw+gntm2j97ozm95nK14tGb+pnNTz3gySu/h/gkDdpPi3hwJcNVCCycq4DYJ0CXz9cFWw8pYcqkrkILY7MWkjOruKBBDR4j3ubKPMAMXeyankWNHVRtcyCHry4tFPT7FVs1CMTno2x0yQXSzR3yxulmgvx6Nibue89x0+iFyeHlxobMkfyE2auREjcQw4N5acmyb3VBQiGBLxx+qng5WMwo75NXZNsJBfJ1aWuLPOk9L0wbUQpes9EAdUnqldRQHsrONcCJ22ksKMc5QvWlcJyDfefpx5aQ== +env_ssh_key_ocp: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC5pKQNbo5MT+szVl6LvfgJVbvTwiRpAXmgPsCPesUTmQ6J4gqrQZz42GQeWdwWfLDo4K/NdvVSOWofE7Jhk9pC0JqYzp7OoZaqwSHABqA+yPAx8tdHX+qt7c6tylfzwTvd+a8MlZ3Xj1uhHdZc2dSmiQbXobI9bccEGwCQ7M8+CHWOJsrl8bWqx/nRbd/wcad7D+oN8/8uY8c8AO/xHmxJII4idTazS52ggS91GpMzPewfwWnx7A9ck65vLvjTFhniQjQsM0J3kgSzowQfKPpWSVfHXWaoDbVUbOiTk5awai0zhyaEB51pWuplZTqMrS5sJVzvflkwGx+tRfsakp7zu8OenvjZOGQYzrwlVX18a9r99V4IKc8AbV8wILQvBeCwZbwa+T/uWFdQcT2TDJkFrytagzgR2hv6pv3LNM4s/ybaVZkAX/VGQuXSc4V5al+x1UX74NFV4VDvnngo63NHHI1A2IDffRb+ijiAaKXn97+cC2CIKEHoot5Km7J9wCQW2JSaZycXRqdKWAsSL5hn0Io0MUnB1ReIEcYhn8MalbA12QUy0SDTYtDmYyA6g5JB6bulH/jWfHayZEANNjcP7FpuMa3lr/L3RnnYM1csE3xlvTQnaBmN+Qu4qQTUJ5TlmMvSmMCgsimAF1xWkstfm0S+OWVrST/A/3yP659OnQ== diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 9e824095..687b21e9 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -10,7 +10,7 @@ - name: Unzip rhcos qcow2 files tags: kvm_host - command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz + command: gunzip -f /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz - name: get rhcos initramfs image tags: kvm_host From ba8f7262387c89b0029fa8e01ef777fd218a7ea3 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Sat, 28 Aug 2021 10:35:35 -0500 Subject: [PATCH 367/885] Troubleshooting the implementation of variables. Successful run. --- README.md | 5 +- env.yaml | 5 +- main.yaml | 24 ++++-- roles/ansible_setup/tasks/main.yaml | 6 +- roles/connect_cluster/tasks/main.yaml | 19 +++++ roles/create_compute_nodes/tasks/main.yaml | 18 +++-- roles/create_control_nodes/tasks/main.yaml | 31 ++++---- roles/get-ocp/files/ocp_ssh_pub | 1 + roles/get-ocp/tasks/main.yaml | 13 ++++ .../get-ocp/templates/install-config.yaml.j2 | 2 +- roles/get-ocp/vars/main.yaml | 1 - roles/install_ansible/tasks/main.yaml | 10 +++ roles/set_firewall/tasks/main.yaml | 74 +++++++------------ roles/ssh-ocp-key-gen/tasks/main.yaml | 50 +++---------- roles/wait_for_bootkube/tasks/main.yaml | 2 + teardown.yaml | 7 ++ 16 files changed, 144 insertions(+), 124 deletions(-) create mode 100644 roles/connect_cluster/tasks/main.yaml create mode 100644 roles/get-ocp/files/ocp_ssh_pub delete mode 100644 roles/get-ocp/vars/main.yaml create mode 100644 roles/install_ansible/tasks/main.yaml create mode 100644 teardown.yaml diff --git a/README.md b/README.md index c1137109..a73874b4 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,7 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing ## Tags: * setup = first-time setup of ansible +* prep = run all setup playbooks * bastion = configuration of bastion for OCP * keymastr = ssh key configuration and testing * bastionvm = creation of Bastion KVM guest @@ -60,7 +61,9 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing * getocp = download of OCP installer and http server configuration * haproxy = configuration of haproxy on bastion kvm guest * httpconf = configuration of httpd server on bastion kvm guest -* kvmhost = tasks to apply to KVM host for OCP cluster +* kvm_host = tasks to apply to KVM host for OCP cluster +* kvm_prep = tasks from the first set of kvm plays +* create_nodes = tasks from the second set of kvm plays * localhost = for tasks that apply to the local machine running Ansible * firewall = for tasks related to firewall settings * selinux = for tasks related to SELinux settings diff --git a/env.yaml b/env.yaml index 1b9be361..a139e7df 100644 --- a/env.yaml +++ b/env.yaml @@ -33,9 +33,8 @@ env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first- env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) -# OpenShift cluster's ssh key pair filename -env_ssh_ocp_name: ocp -env_ssh_ocp_pass: ibmzrocks +# OpenShift cluster's ssh key comment +env_ssh_ocp_comm: "ocpz_distribution" # networking dns_nameserver: 9.60.87.139 diff --git a/main.yaml b/main.yaml index e8c45e03..71840c5b 100644 --- a/main.yaml +++ b/main.yaml @@ -1,7 +1,7 @@ --- - hosts: localhost - tags: localhost + tags: localhost, prep connection: local become: false gather_facts: no @@ -15,7 +15,7 @@ - ssh_copy_id - hosts: kvm_host - tags: kvm_host + tags: kvm_host,kvm_prep become: true vars_files: - env.yaml @@ -39,7 +39,6 @@ - ssh_target_ip: "{{ env_ip_bastion }}" roles: - ssh_copy_id # to connect to bastion - - ssh-ocp-key-gen # for bastion to connect to nodes - hosts: bastion tags: bastion @@ -47,10 +46,12 @@ vars_files: - env.yaml vars: - - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] + - packages: [ 'ansible', 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - check_ssh #- install_packages + #- install_ansible + - ssh-ocp-key-gen # for bastion to connect to nodes - set_selinux_permissive - set_firewall - dns @@ -59,7 +60,7 @@ - get-ocp - hosts: kvm_host - tags: kvm_host + tags: kvm_host,create_nodes become: true gather_facts: no roles: @@ -75,6 +76,19 @@ #roles: #- ssh_config_jump +#- hosts: bastion + #become: true + #gather_facts: no + #roles: + #- wait_for_bootstrap + +#- hosts: bastion + #tags: bastion,cluster + #become: true + #gather_facts: no + #roles: + #- connect_cluster + #- hosts: bastion #become: true #gather_facts: no diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index c2a1700e..89dc1d1d 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -33,8 +33,7 @@ - env_ssh_pass - env_ssh_ans_name - env_ssh_ans_pass - - env_ssh_ocp_name - - env_ssh_ocp_pass + - env_ssh_ocp_comm - dns_nameserver - default_gateway - netmask @@ -65,8 +64,7 @@ env_ssh_pass: "{{ env_ssh_pass }}" env_ssh_ans_name: "{{ env_ssh_ans_name }}" env_ssh_ans_pass: "{{ env_ssh_ans_pass }}" - env_ssh_ocp_name: "{{ env_ssh_ocp_name }}" - env_ssh_ocp_pass: "{{ env_ssh_ocp_pass }}" + env_ssh_ocp_comm: "{{ env_ssh_ocp_comm }}" dns_nameserver: "{{ dns_nameserver }}" default_gateway: "{{ default_gateway }}" netmask: "{{ netmask }}" diff --git a/roles/connect_cluster/tasks/main.yaml b/roles/connect_cluster/tasks/main.yaml new file mode 100644 index 00000000..cd6cc5be --- /dev/null +++ b/roles/connect_cluster/tasks/main.yaml @@ -0,0 +1,19 @@ +--- + +- name: export kube config file + tags: cluster + command: export KUBECONFIG=/ocpinst/auth/kubeconfig + +- name: check if system admin + command: oc whoami + register: whoami_check + failed_when: whoami_check.stdout != system:admin + +- name: get csr info + tags: cluster + command: oc get csr + register: csr + +- name: print csr info to terminal + debug: + var: csr.stdout \ No newline at end of file diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index f4632189..0514be05 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -5,24 +5,26 @@ community.libvirt.virt: name: compute-0 command: status - register: compute-0_check + register: compute_0_check + ignore_errors: yes - name: print status of compute-0 tags: compute debug: - var: compute-0_check + var: compute_0_check - name: check if compute-1 already exists tags: compute community.libvirt.virt: name: compute-1 command: status - register: compute-1_check + register: compute_1_check + ignore_errors: yes - name: print status of compute-1 tags: compute debug: - var: compute-1_check + var: compute_1_check - name: install CoreOS on compute-0 node tags: compute @@ -34,13 +36,13 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.135::9.60.86.1:255.255.254.0:compute-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" --noautoconsole - when: compute-0_check.failed == true + when: compute_0_check.failed == true - name: pause 15 minutes tags: compute pause: minutes: 15 - when: compute-0_check.failed == true + when: compute_0_check.failed == true - name: install CoreOS on compute-1 node tags: compute @@ -52,10 +54,10 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.134::9.60.86.1:255.255.254.0:compute-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" --noautoconsole - when: compute-1_check.failed == true + when: compute_1_check.failed == true - name: pause 15 minutes tags: compute pause: minutes: 15 - when: compute-1_check.failed == true \ No newline at end of file + when: compute_1_check.failed == true \ No newline at end of file diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index c0bb49e3..9d04ef68 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -5,36 +5,39 @@ community.libvirt.virt: name: control-0 command: status - register: control-0_check + register: control_0_check + ignore_errors: yes - name: print status of control-0 tags: control debug: - var: control-0_check + var: control_0_check - name: check if control-1 already exists tags: control community.libvirt.virt: name: control-1 command: status - register: control-1_check + register: control_1_check + ignore_errors: yes - name: print status of control-1 tags: control debug: - var: control-1_check + var: control_1_check - name: check if control-2 already exists tags: control community.libvirt.virt: name: control-2 command: status - register: control-2_check + register: control_2_check + ignore_errors: yes - name: print status of control-2 tags: control debug: - var: control-2_check + var: control_2_check - name: install CoreOS on control-0 node tags: control @@ -46,13 +49,13 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.138::9.60.86.1:255.255.254.0:control-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" --noautoconsole - when: control-0_check.failed == true + when: control_0_check.failed == true - name: pause 15 minutes tags: control pause: minutes: 15 - when: control-0_check.failed == true + when: control_0_check.failed == true - name: install CoreOS on control-1 node tags: control @@ -64,13 +67,13 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.137::9.60.86.1:255.255.254.0:control-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" --noautoconsole - when: control-1_check.failed == true + when: control_1_check.failed == true - name: pause 15 minutes tags: control pause: minutes: 15 - when: control-1_check.failed == true + when: control_1_check.failed == true - name: install CoreOS on control-2 node tags: control @@ -82,10 +85,10 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.136::9.60.86.1:255.255.254.0:control-2:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" --noautoconsole - when: control-2_check.failed == true + when: control_2_check.failed == true -- name: pause 15 minutes +- name: pause 45 minutes tags: control pause: - minutes: 15 - when: control-2_check.failed == true + minutes: 45 + when: control_2_check.failed == true diff --git a/roles/get-ocp/files/ocp_ssh_pub b/roles/get-ocp/files/ocp_ssh_pub new file mode 100644 index 00000000..e76e1d5b --- /dev/null +++ b/roles/get-ocp/files/ocp_ssh_pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8A2DcT4nRh8AT04kpIwCWGSSKRqAo9/M0Om2leuw2IDTCjDInCgdQ4kFuE11D/Z/bFiDgqQ92zAc/7MUA3qEgGWdX7/N1LfzIOv+RvHZ05Y5EH9TXsU+JLrc1Yk8ZOOOzn1a5B9hpimaxHeDMBkLxWrJToGdlzsL5livjUdk86xLzCcq3EjIX3mYv3gx6+/dg/Iz+z9reGIIN6+lbDt6d5ZpQ6kr1OfqUL3hNtn/cHwb9FzyHGRk1PpaQv6c3+pskWuc2RfZX88nTET+crDIzgCxK3yoB/jZi8d7DsB00ou4AxVCd14scNbqZyEfQbPBv39FSE02RfDY001Xcrlr9s2OMiXKY17KbiMUcFyRld3C40w7zT8Mp/jOQUL3Vpj4B85hu73azzV/TSOsXe0i5fthaokspaHGXGhdaR6GXbATU0u1bNVCeqdMjGDyQtIi4pLGopWgohsEl+/nYqy889tMo9zo1AyVzYMv2XKlkmzBBtW66EQHvG5jy2H7S5asBk7MH+ARzhmB7avfe8FdHFF6/O4YgMEeqOKXMW4Ffm6PchOqqL3EkSxvnq8WMUopOBSYS2ejV83dgf2c6/xqE2FwK9VfLiKm1CIZiGIfYAAR1FTGVo+Um6gO8zaKJLetHtkDGduzrAGBVWu67jBPYWY5cCmYu6g04GF7GuGz2ew== ocpz_distribution diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index f6acf193..17959d8d 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -17,6 +17,7 @@ url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin mode: '0755' + force: yes - name: get ocp initramfs tags: getocp,bastion @@ -24,6 +25,7 @@ url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin mode: '0755' + force: yes - name: get ocp rootfs tags: getocp,bastion @@ -31,6 +33,7 @@ url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin mode: '0755' + force: yes - name: create OCP download landing directory tags: getocp,bastion @@ -82,6 +85,13 @@ group: root mode: '0755' +- name: Fetch ssh key from bastion for use in install-config + tags: getocp,bastion + ansible.builtin.fetch: + src: ~/.ssh/id_rsa.pub + dest: roles/get-ocp/files/ocp_ssh_pub + flat: yes + - name: Use template file to create install-config tags: setup,getocp template: @@ -119,6 +129,7 @@ src: /ocpinst/bootstrap.ign dest: /var/www/html/ignition remote_src: yes + mode: '775' - name: Copy control plane Ignition file to web server tags: getocp,bastion @@ -126,6 +137,7 @@ src: /ocpinst/master.ign dest: /var/www/html/ignition remote_src: yes + mode: '775' - name: Copy worker Ignition file to web server tags: getocp,bastion @@ -133,3 +145,4 @@ src: /ocpinst/worker.ign dest: /var/www/html/ignition remote_src: yes + mode: '775' diff --git a/roles/get-ocp/templates/install-config.yaml.j2 b/roles/get-ocp/templates/install-config.yaml.j2 index 3a24f18d..29b0b930 100644 --- a/roles/get-ocp/templates/install-config.yaml.j2 +++ b/roles/get-ocp/templates/install-config.yaml.j2 @@ -23,4 +23,4 @@ platform: none: {} fips: {{ env_fips | default(false) }} pullSecret: '{{ env_pullSecret }}' -sshKey: {{ env_ssh_key_ocp }} \ No newline at end of file +sshKey: '{{ lookup('file', 'roles/get-ocp/files/ocp_ssh_pub') }}' \ No newline at end of file diff --git a/roles/get-ocp/vars/main.yaml b/roles/get-ocp/vars/main.yaml deleted file mode 100644 index 4d5fd6e5..00000000 --- a/roles/get-ocp/vars/main.yaml +++ /dev/null @@ -1 +0,0 @@ -env_ssh_key_ocp: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC5pKQNbo5MT+szVl6LvfgJVbvTwiRpAXmgPsCPesUTmQ6J4gqrQZz42GQeWdwWfLDo4K/NdvVSOWofE7Jhk9pC0JqYzp7OoZaqwSHABqA+yPAx8tdHX+qt7c6tylfzwTvd+a8MlZ3Xj1uhHdZc2dSmiQbXobI9bccEGwCQ7M8+CHWOJsrl8bWqx/nRbd/wcad7D+oN8/8uY8c8AO/xHmxJII4idTazS52ggS91GpMzPewfwWnx7A9ck65vLvjTFhniQjQsM0J3kgSzowQfKPpWSVfHXWaoDbVUbOiTk5awai0zhyaEB51pWuplZTqMrS5sJVzvflkwGx+tRfsakp7zu8OenvjZOGQYzrwlVX18a9r99V4IKc8AbV8wILQvBeCwZbwa+T/uWFdQcT2TDJkFrytagzgR2hv6pv3LNM4s/ybaVZkAX/VGQuXSc4V5al+x1UX74NFV4VDvnngo63NHHI1A2IDffRb+ijiAaKXn97+cC2CIKEHoot5Km7J9wCQW2JSaZycXRqdKWAsSL5hn0Io0MUnB1ReIEcYhn8MalbA12QUy0SDTYtDmYyA6g5JB6bulH/jWfHayZEANNjcP7FpuMa3lr/L3RnnYM1csE3xlvTQnaBmN+Qu4qQTUJ5TlmMvSmMCgsimAF1xWkstfm0S+OWVrST/A/3yP659OnQ== diff --git a/roles/install_ansible/tasks/main.yaml b/roles/install_ansible/tasks/main.yaml new file mode 100644 index 00000000..d9cdb4b6 --- /dev/null +++ b/roles/install_ansible/tasks/main.yaml @@ -0,0 +1,10 @@ +--- + +- name: install ansible dependencies on bastion + tags: bastion, ansible + command: "{{ item }}" + loop: + - subscription-manager repos --enable "codeready-builder-for-rhel-8-$(arch)-rpms" + - sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + - ansible-galaxy collection install community.crypto + - ansible-galaxy collection install community.general \ No newline at end of file diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 17a0c27d..650bf417 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -1,63 +1,32 @@ --- -- name: Allow all access to tcp port 8080 +- name: Add ports to firewalld tags: firewall,bastion - ansible.posix.firewalld: - port: 8080/tcp - permanent: yes - state: enabled - -- name: Allow all access to tcp port 80 - tags: firewall,bastion - ansible.posix.firewalld: - port: 80/tcp - permanent: yes - state: enabled - -- name: Allow all access to tcp port 443 - tags: firewall,bastion - ansible.posix.firewalld: - port: 443/tcp - permanent: yes - state: enabled - -- name: Allow all access to tcp port 4443 - tags: firewall,bastion - ansible.posix.firewalld: - port: 4443/tcp - permanent: yes - state: enabled - -- name: Allow all access to tcp port 6443 - tags: firewall,bastion - ansible.posix.firewalld: - port: 6443/tcp - permanent: yes - state: enabled - -- name: Allow all access to tcp port 22623 - tags: firewall,bastion - ansible.posix.firewalld: - port: 22623/tcp + firewalld: + port: "{{ item }}" permanent: yes state: enabled + loop: + - 8080/tcp + - 80/tcp + - 443/tcp + - 4443/tcp + - 6443/tcp + - 22623/tcp + - 53/udp -- name: Permit traffic in default zone for http +- name: Permit traffic in default zone for http and https tags: firewall,bastion ansible.posix.firewalld: - service: http - permanent: yes - state: enabled - -- name: Permit traffic in default zone for https - tags: firewall,bastion - ansible.posix.firewalld: - service: https + service: "{{ item }}" permanent: yes state: enabled + loop: + - http + - https - name: Ensure the default Apache port is 8080 - tags: httpconf,bastion + tags: httpconf,bastion.firewall lineinfile: path: /etc/httpd/conf/httpd.conf search_string: 'Listen 80' @@ -65,14 +34,21 @@ backup: yes - name: Ensure the SSL default port is 4443 - tags: httpconf,bastion + tags: httpconf,bastion,firewall replace: path: /etc/httpd/conf.d/ssl.conf regexp: '^Listen 443 https' replace: 'Listen 4443 https' backup: yes +- name: reload firewalld to reflect changes + tags: firewall,bastion + systemd: + name: firewalld + state: reloaded + - name: restart httpd + tags: firewall,bastion service: name: httpd state: restarted \ No newline at end of file diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index b02c3799..916b0c63 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Load in variables from env.yaml - tags: keymastr, getocp + tags: keymastr,getocp include_vars: env.yaml - name: Check to see if local .ssh directory exists @@ -11,12 +11,12 @@ register: ssh_directory_exists_check - name: Print results of .ssh directory check - tags: keymastr + tags: keymastr,bastion,getocp debug: var: ssh_directory_exists_check - name: Create .ssh local directory if it doesn't already exist - tags: keymastr, getocp + tags: keymastr,getocp file: path: ~/.ssh state: directory @@ -25,7 +25,7 @@ when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false - name: Print results of ssh directory creation - tags: keymastr + tags: keymastr,getocp debug: var: ssh_directory_creation @@ -35,8 +35,8 @@ path: ~/.ssh/{{item}} register: ssh_key_file_exists_check with_items: - - "{{env_ssh_ocp_name}}" - - "'{{env_ssh_ocp_name}}'.pub" + - "id_rsa" + - "id_rsa.pub" - name: Print results of ssh key pair files check tags: keymastr,getocp @@ -46,9 +46,12 @@ - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key if it doesn't exist already tags: keymastr,getocp community.crypto.openssh_keypair: - path: ~/.ssh/{{ env_ssh_ocp_name }} - passphrase: "{{ env_ssh_ocp_pass }}" + path: ~/.ssh/id_rsa backend: opensshbin + owner: root + passphrase: "" + comment: "{{ env_ssh_ocp_comm }}" + regenerate: full_idempotence register: ssh_ocp when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false @@ -56,33 +59,4 @@ tags: keymastr,getocp debug: var: ssh_ocp.public_key - when: ssh_ocp.changed == true - -- name: set public key fact if generated just now - tags: keymastr,getocp - set_fact: - env_ssh_key_ocp: ssh_ocp.public_key - cacheable: yes - when: ssh_ocp.changed == true - -- name: delete the public key in host_vars/bastion/main.yaml if it is already there - tags: keymastr, getocp - lineinfile: - path: roles/get-ocp/vars/main.yaml - state: absent - regexp: "^env_ssh_key_ocp" - -- name: fill host_vars/bastion/main.yaml file with generated ssh key - tags: keymastr, getocp - lineinfile: - path: roles/get-ocp/vars/main.yaml - line: "env_ssh_key_ocp: '{{ ssh_ocp.public_key }}'" - when: ssh_ocp.changed == true - -- name: copy public key from .ssh folder if it was there already and move the contents to roles/get-ocp/vars/main.yaml - tags: keymastr, getocp - lineinfile: - path: roles/get-ocp/vars/main.yaml - regexp: "^{{ lookup('file', 'roles/get-ocp/vars/main.yaml') }}" - line: "env_ssh_key_ocp: {{ lookup('file', '~/.ssh/{{ env_ssh_ocp_name }}.pub') }}{{ lookup('file', 'roles/get-ocp/vars/main.yaml') }}" - when: ssh_ocp.changed == false \ No newline at end of file + when: ssh_ocp.changed == true \ No newline at end of file diff --git a/roles/wait_for_bootkube/tasks/main.yaml b/roles/wait_for_bootkube/tasks/main.yaml index 0899e363..c4555da4 100644 --- a/roles/wait_for_bootkube/tasks/main.yaml +++ b/roles/wait_for_bootkube/tasks/main.yaml @@ -2,6 +2,8 @@ --- +#when waiting for bootstrap to come up for the first time, wait for "github.com/openshift/cluster-bootstrap/pkg/start/status.go:66:" + - name: ssh to bootstrap from bastion command: ssh core@9.60.87.133 diff --git a/teardown.yaml b/teardown.yaml new file mode 100644 index 00000000..5359091c --- /dev/null +++ b/teardown.yaml @@ -0,0 +1,7 @@ +--- + +#needed: +#- shutdown and destroy bootstrap, control, and compute nodes +#- delete /ocpinst/ +#- delete ssh keys from known hosts in bastion and delete ssh keys from ~/.ssh/id_rsa* +#- delete ssh key from roles/get-ocp/files/ocp_ssh_pub \ No newline at end of file From d81f883dc033f424d3bb0e87c823fd99c6ba451a Mon Sep 17 00:00:00 2001 From: jacobemery Date: Sat, 28 Aug 2021 10:35:35 -0500 Subject: [PATCH 368/885] Troubleshooting the implementation of variables. Successful run. --- README.md | 5 +- main.yaml | 24 ++++-- roles/ansible_setup/tasks/main.yaml | 6 +- roles/connect_cluster/tasks/main.yaml | 19 +++++ roles/create_compute_nodes/tasks/main.yaml | 18 +++-- roles/create_control_nodes/tasks/main.yaml | 31 ++++---- roles/get-ocp/files/ocp_ssh_pub | 1 + roles/get-ocp/tasks/main.yaml | 13 ++++ .../get-ocp/templates/install-config.yaml.j2 | 2 +- roles/get-ocp/vars/main.yaml | 1 - roles/install_ansible/tasks/main.yaml | 10 +++ roles/set_firewall/tasks/main.yaml | 74 +++++++------------ roles/ssh-ocp-key-gen/tasks/main.yaml | 50 +++---------- roles/wait_for_bootkube/tasks/main.yaml | 2 + teardown.yaml | 7 ++ 15 files changed, 142 insertions(+), 121 deletions(-) create mode 100644 roles/connect_cluster/tasks/main.yaml create mode 100644 roles/get-ocp/files/ocp_ssh_pub delete mode 100644 roles/get-ocp/vars/main.yaml create mode 100644 roles/install_ansible/tasks/main.yaml create mode 100644 teardown.yaml diff --git a/README.md b/README.md index c1137109..a73874b4 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,7 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing ## Tags: * setup = first-time setup of ansible +* prep = run all setup playbooks * bastion = configuration of bastion for OCP * keymastr = ssh key configuration and testing * bastionvm = creation of Bastion KVM guest @@ -60,7 +61,9 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing * getocp = download of OCP installer and http server configuration * haproxy = configuration of haproxy on bastion kvm guest * httpconf = configuration of httpd server on bastion kvm guest -* kvmhost = tasks to apply to KVM host for OCP cluster +* kvm_host = tasks to apply to KVM host for OCP cluster +* kvm_prep = tasks from the first set of kvm plays +* create_nodes = tasks from the second set of kvm plays * localhost = for tasks that apply to the local machine running Ansible * firewall = for tasks related to firewall settings * selinux = for tasks related to SELinux settings diff --git a/main.yaml b/main.yaml index e8c45e03..71840c5b 100644 --- a/main.yaml +++ b/main.yaml @@ -1,7 +1,7 @@ --- - hosts: localhost - tags: localhost + tags: localhost, prep connection: local become: false gather_facts: no @@ -15,7 +15,7 @@ - ssh_copy_id - hosts: kvm_host - tags: kvm_host + tags: kvm_host,kvm_prep become: true vars_files: - env.yaml @@ -39,7 +39,6 @@ - ssh_target_ip: "{{ env_ip_bastion }}" roles: - ssh_copy_id # to connect to bastion - - ssh-ocp-key-gen # for bastion to connect to nodes - hosts: bastion tags: bastion @@ -47,10 +46,12 @@ vars_files: - env.yaml vars: - - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] + - packages: [ 'ansible', 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - check_ssh #- install_packages + #- install_ansible + - ssh-ocp-key-gen # for bastion to connect to nodes - set_selinux_permissive - set_firewall - dns @@ -59,7 +60,7 @@ - get-ocp - hosts: kvm_host - tags: kvm_host + tags: kvm_host,create_nodes become: true gather_facts: no roles: @@ -75,6 +76,19 @@ #roles: #- ssh_config_jump +#- hosts: bastion + #become: true + #gather_facts: no + #roles: + #- wait_for_bootstrap + +#- hosts: bastion + #tags: bastion,cluster + #become: true + #gather_facts: no + #roles: + #- connect_cluster + #- hosts: bastion #become: true #gather_facts: no diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index c2a1700e..89dc1d1d 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -33,8 +33,7 @@ - env_ssh_pass - env_ssh_ans_name - env_ssh_ans_pass - - env_ssh_ocp_name - - env_ssh_ocp_pass + - env_ssh_ocp_comm - dns_nameserver - default_gateway - netmask @@ -65,8 +64,7 @@ env_ssh_pass: "{{ env_ssh_pass }}" env_ssh_ans_name: "{{ env_ssh_ans_name }}" env_ssh_ans_pass: "{{ env_ssh_ans_pass }}" - env_ssh_ocp_name: "{{ env_ssh_ocp_name }}" - env_ssh_ocp_pass: "{{ env_ssh_ocp_pass }}" + env_ssh_ocp_comm: "{{ env_ssh_ocp_comm }}" dns_nameserver: "{{ dns_nameserver }}" default_gateway: "{{ default_gateway }}" netmask: "{{ netmask }}" diff --git a/roles/connect_cluster/tasks/main.yaml b/roles/connect_cluster/tasks/main.yaml new file mode 100644 index 00000000..cd6cc5be --- /dev/null +++ b/roles/connect_cluster/tasks/main.yaml @@ -0,0 +1,19 @@ +--- + +- name: export kube config file + tags: cluster + command: export KUBECONFIG=/ocpinst/auth/kubeconfig + +- name: check if system admin + command: oc whoami + register: whoami_check + failed_when: whoami_check.stdout != system:admin + +- name: get csr info + tags: cluster + command: oc get csr + register: csr + +- name: print csr info to terminal + debug: + var: csr.stdout \ No newline at end of file diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index f4632189..0514be05 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -5,24 +5,26 @@ community.libvirt.virt: name: compute-0 command: status - register: compute-0_check + register: compute_0_check + ignore_errors: yes - name: print status of compute-0 tags: compute debug: - var: compute-0_check + var: compute_0_check - name: check if compute-1 already exists tags: compute community.libvirt.virt: name: compute-1 command: status - register: compute-1_check + register: compute_1_check + ignore_errors: yes - name: print status of compute-1 tags: compute debug: - var: compute-1_check + var: compute_1_check - name: install CoreOS on compute-0 node tags: compute @@ -34,13 +36,13 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.135::9.60.86.1:255.255.254.0:compute-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" --noautoconsole - when: compute-0_check.failed == true + when: compute_0_check.failed == true - name: pause 15 minutes tags: compute pause: minutes: 15 - when: compute-0_check.failed == true + when: compute_0_check.failed == true - name: install CoreOS on compute-1 node tags: compute @@ -52,10 +54,10 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.134::9.60.86.1:255.255.254.0:compute-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" --noautoconsole - when: compute-1_check.failed == true + when: compute_1_check.failed == true - name: pause 15 minutes tags: compute pause: minutes: 15 - when: compute-1_check.failed == true \ No newline at end of file + when: compute_1_check.failed == true \ No newline at end of file diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index c0bb49e3..9d04ef68 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -5,36 +5,39 @@ community.libvirt.virt: name: control-0 command: status - register: control-0_check + register: control_0_check + ignore_errors: yes - name: print status of control-0 tags: control debug: - var: control-0_check + var: control_0_check - name: check if control-1 already exists tags: control community.libvirt.virt: name: control-1 command: status - register: control-1_check + register: control_1_check + ignore_errors: yes - name: print status of control-1 tags: control debug: - var: control-1_check + var: control_1_check - name: check if control-2 already exists tags: control community.libvirt.virt: name: control-2 command: status - register: control-2_check + register: control_2_check + ignore_errors: yes - name: print status of control-2 tags: control debug: - var: control-2_check + var: control_2_check - name: install CoreOS on control-0 node tags: control @@ -46,13 +49,13 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.138::9.60.86.1:255.255.254.0:control-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" --noautoconsole - when: control-0_check.failed == true + when: control_0_check.failed == true - name: pause 15 minutes tags: control pause: minutes: 15 - when: control-0_check.failed == true + when: control_0_check.failed == true - name: install CoreOS on control-1 node tags: control @@ -64,13 +67,13 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.137::9.60.86.1:255.255.254.0:control-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" --noautoconsole - when: control-1_check.failed == true + when: control_1_check.failed == true - name: pause 15 minutes tags: control pause: minutes: 15 - when: control-1_check.failed == true + when: control_1_check.failed == true - name: install CoreOS on control-2 node tags: control @@ -82,10 +85,10 @@ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.136::9.60.86.1:255.255.254.0:control-2:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" --noautoconsole - when: control-2_check.failed == true + when: control_2_check.failed == true -- name: pause 15 minutes +- name: pause 45 minutes tags: control pause: - minutes: 15 - when: control-2_check.failed == true + minutes: 45 + when: control_2_check.failed == true diff --git a/roles/get-ocp/files/ocp_ssh_pub b/roles/get-ocp/files/ocp_ssh_pub new file mode 100644 index 00000000..e76e1d5b --- /dev/null +++ b/roles/get-ocp/files/ocp_ssh_pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8A2DcT4nRh8AT04kpIwCWGSSKRqAo9/M0Om2leuw2IDTCjDInCgdQ4kFuE11D/Z/bFiDgqQ92zAc/7MUA3qEgGWdX7/N1LfzIOv+RvHZ05Y5EH9TXsU+JLrc1Yk8ZOOOzn1a5B9hpimaxHeDMBkLxWrJToGdlzsL5livjUdk86xLzCcq3EjIX3mYv3gx6+/dg/Iz+z9reGIIN6+lbDt6d5ZpQ6kr1OfqUL3hNtn/cHwb9FzyHGRk1PpaQv6c3+pskWuc2RfZX88nTET+crDIzgCxK3yoB/jZi8d7DsB00ou4AxVCd14scNbqZyEfQbPBv39FSE02RfDY001Xcrlr9s2OMiXKY17KbiMUcFyRld3C40w7zT8Mp/jOQUL3Vpj4B85hu73azzV/TSOsXe0i5fthaokspaHGXGhdaR6GXbATU0u1bNVCeqdMjGDyQtIi4pLGopWgohsEl+/nYqy889tMo9zo1AyVzYMv2XKlkmzBBtW66EQHvG5jy2H7S5asBk7MH+ARzhmB7avfe8FdHFF6/O4YgMEeqOKXMW4Ffm6PchOqqL3EkSxvnq8WMUopOBSYS2ejV83dgf2c6/xqE2FwK9VfLiKm1CIZiGIfYAAR1FTGVo+Um6gO8zaKJLetHtkDGduzrAGBVWu67jBPYWY5cCmYu6g04GF7GuGz2ew== ocpz_distribution diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index f6acf193..17959d8d 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -17,6 +17,7 @@ url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x dest: /var/www/html/bin mode: '0755' + force: yes - name: get ocp initramfs tags: getocp,bastion @@ -24,6 +25,7 @@ url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img dest: /var/www/html/bin mode: '0755' + force: yes - name: get ocp rootfs tags: getocp,bastion @@ -31,6 +33,7 @@ url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img dest: /var/www/html/bin mode: '0755' + force: yes - name: create OCP download landing directory tags: getocp,bastion @@ -82,6 +85,13 @@ group: root mode: '0755' +- name: Fetch ssh key from bastion for use in install-config + tags: getocp,bastion + ansible.builtin.fetch: + src: ~/.ssh/id_rsa.pub + dest: roles/get-ocp/files/ocp_ssh_pub + flat: yes + - name: Use template file to create install-config tags: setup,getocp template: @@ -119,6 +129,7 @@ src: /ocpinst/bootstrap.ign dest: /var/www/html/ignition remote_src: yes + mode: '775' - name: Copy control plane Ignition file to web server tags: getocp,bastion @@ -126,6 +137,7 @@ src: /ocpinst/master.ign dest: /var/www/html/ignition remote_src: yes + mode: '775' - name: Copy worker Ignition file to web server tags: getocp,bastion @@ -133,3 +145,4 @@ src: /ocpinst/worker.ign dest: /var/www/html/ignition remote_src: yes + mode: '775' diff --git a/roles/get-ocp/templates/install-config.yaml.j2 b/roles/get-ocp/templates/install-config.yaml.j2 index 3a24f18d..29b0b930 100644 --- a/roles/get-ocp/templates/install-config.yaml.j2 +++ b/roles/get-ocp/templates/install-config.yaml.j2 @@ -23,4 +23,4 @@ platform: none: {} fips: {{ env_fips | default(false) }} pullSecret: '{{ env_pullSecret }}' -sshKey: {{ env_ssh_key_ocp }} \ No newline at end of file +sshKey: '{{ lookup('file', 'roles/get-ocp/files/ocp_ssh_pub') }}' \ No newline at end of file diff --git a/roles/get-ocp/vars/main.yaml b/roles/get-ocp/vars/main.yaml deleted file mode 100644 index 4d5fd6e5..00000000 --- a/roles/get-ocp/vars/main.yaml +++ /dev/null @@ -1 +0,0 @@ -env_ssh_key_ocp: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC5pKQNbo5MT+szVl6LvfgJVbvTwiRpAXmgPsCPesUTmQ6J4gqrQZz42GQeWdwWfLDo4K/NdvVSOWofE7Jhk9pC0JqYzp7OoZaqwSHABqA+yPAx8tdHX+qt7c6tylfzwTvd+a8MlZ3Xj1uhHdZc2dSmiQbXobI9bccEGwCQ7M8+CHWOJsrl8bWqx/nRbd/wcad7D+oN8/8uY8c8AO/xHmxJII4idTazS52ggS91GpMzPewfwWnx7A9ck65vLvjTFhniQjQsM0J3kgSzowQfKPpWSVfHXWaoDbVUbOiTk5awai0zhyaEB51pWuplZTqMrS5sJVzvflkwGx+tRfsakp7zu8OenvjZOGQYzrwlVX18a9r99V4IKc8AbV8wILQvBeCwZbwa+T/uWFdQcT2TDJkFrytagzgR2hv6pv3LNM4s/ybaVZkAX/VGQuXSc4V5al+x1UX74NFV4VDvnngo63NHHI1A2IDffRb+ijiAaKXn97+cC2CIKEHoot5Km7J9wCQW2JSaZycXRqdKWAsSL5hn0Io0MUnB1ReIEcYhn8MalbA12QUy0SDTYtDmYyA6g5JB6bulH/jWfHayZEANNjcP7FpuMa3lr/L3RnnYM1csE3xlvTQnaBmN+Qu4qQTUJ5TlmMvSmMCgsimAF1xWkstfm0S+OWVrST/A/3yP659OnQ== diff --git a/roles/install_ansible/tasks/main.yaml b/roles/install_ansible/tasks/main.yaml new file mode 100644 index 00000000..d9cdb4b6 --- /dev/null +++ b/roles/install_ansible/tasks/main.yaml @@ -0,0 +1,10 @@ +--- + +- name: install ansible dependencies on bastion + tags: bastion, ansible + command: "{{ item }}" + loop: + - subscription-manager repos --enable "codeready-builder-for-rhel-8-$(arch)-rpms" + - sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + - ansible-galaxy collection install community.crypto + - ansible-galaxy collection install community.general \ No newline at end of file diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 17a0c27d..650bf417 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -1,63 +1,32 @@ --- -- name: Allow all access to tcp port 8080 +- name: Add ports to firewalld tags: firewall,bastion - ansible.posix.firewalld: - port: 8080/tcp - permanent: yes - state: enabled - -- name: Allow all access to tcp port 80 - tags: firewall,bastion - ansible.posix.firewalld: - port: 80/tcp - permanent: yes - state: enabled - -- name: Allow all access to tcp port 443 - tags: firewall,bastion - ansible.posix.firewalld: - port: 443/tcp - permanent: yes - state: enabled - -- name: Allow all access to tcp port 4443 - tags: firewall,bastion - ansible.posix.firewalld: - port: 4443/tcp - permanent: yes - state: enabled - -- name: Allow all access to tcp port 6443 - tags: firewall,bastion - ansible.posix.firewalld: - port: 6443/tcp - permanent: yes - state: enabled - -- name: Allow all access to tcp port 22623 - tags: firewall,bastion - ansible.posix.firewalld: - port: 22623/tcp + firewalld: + port: "{{ item }}" permanent: yes state: enabled + loop: + - 8080/tcp + - 80/tcp + - 443/tcp + - 4443/tcp + - 6443/tcp + - 22623/tcp + - 53/udp -- name: Permit traffic in default zone for http +- name: Permit traffic in default zone for http and https tags: firewall,bastion ansible.posix.firewalld: - service: http - permanent: yes - state: enabled - -- name: Permit traffic in default zone for https - tags: firewall,bastion - ansible.posix.firewalld: - service: https + service: "{{ item }}" permanent: yes state: enabled + loop: + - http + - https - name: Ensure the default Apache port is 8080 - tags: httpconf,bastion + tags: httpconf,bastion.firewall lineinfile: path: /etc/httpd/conf/httpd.conf search_string: 'Listen 80' @@ -65,14 +34,21 @@ backup: yes - name: Ensure the SSL default port is 4443 - tags: httpconf,bastion + tags: httpconf,bastion,firewall replace: path: /etc/httpd/conf.d/ssl.conf regexp: '^Listen 443 https' replace: 'Listen 4443 https' backup: yes +- name: reload firewalld to reflect changes + tags: firewall,bastion + systemd: + name: firewalld + state: reloaded + - name: restart httpd + tags: firewall,bastion service: name: httpd state: restarted \ No newline at end of file diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh-ocp-key-gen/tasks/main.yaml index b02c3799..916b0c63 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh-ocp-key-gen/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Load in variables from env.yaml - tags: keymastr, getocp + tags: keymastr,getocp include_vars: env.yaml - name: Check to see if local .ssh directory exists @@ -11,12 +11,12 @@ register: ssh_directory_exists_check - name: Print results of .ssh directory check - tags: keymastr + tags: keymastr,bastion,getocp debug: var: ssh_directory_exists_check - name: Create .ssh local directory if it doesn't already exist - tags: keymastr, getocp + tags: keymastr,getocp file: path: ~/.ssh state: directory @@ -25,7 +25,7 @@ when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false - name: Print results of ssh directory creation - tags: keymastr + tags: keymastr,getocp debug: var: ssh_directory_creation @@ -35,8 +35,8 @@ path: ~/.ssh/{{item}} register: ssh_key_file_exists_check with_items: - - "{{env_ssh_ocp_name}}" - - "'{{env_ssh_ocp_name}}'.pub" + - "id_rsa" + - "id_rsa.pub" - name: Print results of ssh key pair files check tags: keymastr,getocp @@ -46,9 +46,12 @@ - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key if it doesn't exist already tags: keymastr,getocp community.crypto.openssh_keypair: - path: ~/.ssh/{{ env_ssh_ocp_name }} - passphrase: "{{ env_ssh_ocp_pass }}" + path: ~/.ssh/id_rsa backend: opensshbin + owner: root + passphrase: "" + comment: "{{ env_ssh_ocp_comm }}" + regenerate: full_idempotence register: ssh_ocp when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false @@ -56,33 +59,4 @@ tags: keymastr,getocp debug: var: ssh_ocp.public_key - when: ssh_ocp.changed == true - -- name: set public key fact if generated just now - tags: keymastr,getocp - set_fact: - env_ssh_key_ocp: ssh_ocp.public_key - cacheable: yes - when: ssh_ocp.changed == true - -- name: delete the public key in host_vars/bastion/main.yaml if it is already there - tags: keymastr, getocp - lineinfile: - path: roles/get-ocp/vars/main.yaml - state: absent - regexp: "^env_ssh_key_ocp" - -- name: fill host_vars/bastion/main.yaml file with generated ssh key - tags: keymastr, getocp - lineinfile: - path: roles/get-ocp/vars/main.yaml - line: "env_ssh_key_ocp: '{{ ssh_ocp.public_key }}'" - when: ssh_ocp.changed == true - -- name: copy public key from .ssh folder if it was there already and move the contents to roles/get-ocp/vars/main.yaml - tags: keymastr, getocp - lineinfile: - path: roles/get-ocp/vars/main.yaml - regexp: "^{{ lookup('file', 'roles/get-ocp/vars/main.yaml') }}" - line: "env_ssh_key_ocp: {{ lookup('file', '~/.ssh/{{ env_ssh_ocp_name }}.pub') }}{{ lookup('file', 'roles/get-ocp/vars/main.yaml') }}" - when: ssh_ocp.changed == false \ No newline at end of file + when: ssh_ocp.changed == true \ No newline at end of file diff --git a/roles/wait_for_bootkube/tasks/main.yaml b/roles/wait_for_bootkube/tasks/main.yaml index 0899e363..c4555da4 100644 --- a/roles/wait_for_bootkube/tasks/main.yaml +++ b/roles/wait_for_bootkube/tasks/main.yaml @@ -2,6 +2,8 @@ --- +#when waiting for bootstrap to come up for the first time, wait for "github.com/openshift/cluster-bootstrap/pkg/start/status.go:66:" + - name: ssh to bootstrap from bastion command: ssh core@9.60.87.133 diff --git a/teardown.yaml b/teardown.yaml new file mode 100644 index 00000000..5359091c --- /dev/null +++ b/teardown.yaml @@ -0,0 +1,7 @@ +--- + +#needed: +#- shutdown and destroy bootstrap, control, and compute nodes +#- delete /ocpinst/ +#- delete ssh keys from known hosts in bastion and delete ssh keys from ~/.ssh/id_rsa* +#- delete ssh key from roles/get-ocp/files/ocp_ssh_pub \ No newline at end of file From e7bd89e3aafba77d49b6277f24caa06ee8ff37a2 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Sat, 28 Aug 2021 11:19:35 -0500 Subject: [PATCH 369/885] Updated steps in readme to include steps after the main playbook runs. --- README.md | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index a73874b4..65fe25f9 100644 --- a/README.md +++ b/README.md @@ -31,20 +31,22 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing * OpenShift pull secret (for inputting it into env.yaml) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. -* Step 4: Run ansible-setup-linux/mac.sh shell script in the main directory (run ./ansible-setup-linux/mac.sh in terminal) depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) -* Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: - "ansible-playbook main.yaml --ask-become-pass" -* Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. -* Step 6: When create_bastion playbook runs, open cockpit at < URL > and complete installation with these options: +* Step 4: Run the appropriate Ansible setup shell script, which can be found in the main directory. While in the main directory, run "./ansible-setup-mac.sh" or "./ansible-setup-linux.sh" depending on your operating system to download the required Ansible modules and packages. +* Step 5: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass". Watch Ansible as it completes the installation, correcting errors if they arise. If the process fails in error, you should be able to run the same shell command to start the process from the top. Alternatively, use tags to run only the tasks that have that tag. See list of tags below for reference. +* Step 6: Once the create_bastion playbook runs, open the cockpit at :9090>, go to the "Virtual Machines" tab, and complete the bastion's installation with these options: - list options here - list options here - list options here -* Step 7: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. -* Step 8: approve certs... need more detail here -* Step 9: Shutdown and destroy bootstrap (or optionally convert bootstrap to worker node) -* Step 8: Verify installation by running: - "./openshift-install --dir=/ocpinst wait-for install-complete" -* Step 9: ./openshift-install create cluster +* Step 7: When the playbooks for creating nodes run, watch them on the cockpit at "https://:9090". Go to the "Virtual Machines" tab and click on the VM you created. Once the operating system installs, it will power down. Click the blue "Run" button to start it back up. It will then run some more setup. Then, when you see " login" come back to the terminal here where you ran ansible and press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to stop the process. +* Step 8: Repeat Step 7 with the Bootstrap and Control nodes. Then, SSH into the bastion (run "ssh " in the terminal). From there, change to root user (run "su root"). Then ssh into the bootstrap ("ssh core@") and run "journalctl -u bootkube.service" to watch the bootstrap connect to the control nodes (hold space to get to the bottom of the log). Expect lots of errors, as all the nodes may not be entirely up yet. Once all control nodes are connected, the bootkube log will read "bootkube.service complete". +* Step 9: Repeat Step 7 with the Compute nodes. +* Step 10: Once all the Compute nodes up and prompting login, log in to the bastion and run "export KUBECONFIG=/ocpinst/auth/kubeconfig". Then run "oc get csr". It will bring up a list of certificates that need approval. For each cert that is "Pending", run "oc adm certiciate approve ". The csr names will be something like "csr-v8qqv". Once you approve all the certificates, double check that there are not more that have appeared by running "oc get csr" again. Once all certs are "Approved, Issued". You're ready for the next step. +* Step 11: From the bastion, run "oc get nodes". Once all nodes are "Ready", run "oc get clusteroperators". Wait for them to all read "True" under the "Available" column. This may take hours. +* Step 8: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" +* Step 9: Running the command in the above step will give you some information about how to log into the OpenShift cluster's dashboard. Copy the URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. +* Step 10: Celebrate! Your OpenShift cluster installation is complete. + +* Optional: Leave the bootstrap running as is, shut it down and destroy it, or convert it into a compute node. ## Tags: @@ -54,8 +56,8 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing * keymastr = ssh key configuration and testing * bastionvm = creation of Bastion KVM guest * boostrap = creation of Boostrap KVM guest -* compute = creation of the Compute nodes KVM guests (minimum 2) -* control = creation of the Control nodes KVM guests (minimum 3) +* compute = creation of the Compute nodes KVM guests +* control = creation of the Control nodes KVM guests * ssh-copy-id = for copying ssh id * dns = configuration of dns server on bastion * getocp = download of OCP installer and http server configuration From 0cfc7bfa91c012feeb96f90063f9a32c9b226c04 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Sat, 28 Aug 2021 11:19:35 -0500 Subject: [PATCH 370/885] Updated steps in readme to include steps after the main playbook runs. --- README.md | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index a73874b4..65fe25f9 100644 --- a/README.md +++ b/README.md @@ -31,20 +31,22 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing * OpenShift pull secret (for inputting it into env.yaml) * Step 2: Fill out the required variables for your specific installation in the env.yaml file * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. -* Step 4: Run ansible-setup-linux/mac.sh shell script in the main directory (run ./ansible-setup-linux/mac.sh in terminal) depending on your operating system to download required Ansible modules and programs. First change permissions by running "chmod 755 " (choose one of linux or mac) -* Step 4: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: - "ansible-playbook main.yaml --ask-become-pass" -* Step 5: Watch Ansible as it completes the installation, correcting errors if they arise. -* Step 6: When create_bastion playbook runs, open cockpit at < URL > and complete installation with these options: +* Step 4: Run the appropriate Ansible setup shell script, which can be found in the main directory. While in the main directory, run "./ansible-setup-mac.sh" or "./ansible-setup-linux.sh" depending on your operating system to download the required Ansible modules and packages. +* Step 5: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass". Watch Ansible as it completes the installation, correcting errors if they arise. If the process fails in error, you should be able to run the same shell command to start the process from the top. Alternatively, use tags to run only the tasks that have that tag. See list of tags below for reference. +* Step 6: Once the create_bastion playbook runs, open the cockpit at :9090>, go to the "Virtual Machines" tab, and complete the bastion's installation with these options: - list options here - list options here - list options here -* Step 7: When the playbooks for creating nodes run, watch them on the cockpit. When you see "< node-name > login" press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to abort. -* Step 8: approve certs... need more detail here -* Step 9: Shutdown and destroy bootstrap (or optionally convert bootstrap to worker node) -* Step 8: Verify installation by running: - "./openshift-install --dir=/ocpinst wait-for install-complete" -* Step 9: ./openshift-install create cluster +* Step 7: When the playbooks for creating nodes run, watch them on the cockpit at "https://:9090". Go to the "Virtual Machines" tab and click on the VM you created. Once the operating system installs, it will power down. Click the blue "Run" button to start it back up. It will then run some more setup. Then, when you see " login" come back to the terminal here where you ran ansible and press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to stop the process. +* Step 8: Repeat Step 7 with the Bootstrap and Control nodes. Then, SSH into the bastion (run "ssh " in the terminal). From there, change to root user (run "su root"). Then ssh into the bootstrap ("ssh core@") and run "journalctl -u bootkube.service" to watch the bootstrap connect to the control nodes (hold space to get to the bottom of the log). Expect lots of errors, as all the nodes may not be entirely up yet. Once all control nodes are connected, the bootkube log will read "bootkube.service complete". +* Step 9: Repeat Step 7 with the Compute nodes. +* Step 10: Once all the Compute nodes up and prompting login, log in to the bastion and run "export KUBECONFIG=/ocpinst/auth/kubeconfig". Then run "oc get csr". It will bring up a list of certificates that need approval. For each cert that is "Pending", run "oc adm certiciate approve ". The csr names will be something like "csr-v8qqv". Once you approve all the certificates, double check that there are not more that have appeared by running "oc get csr" again. Once all certs are "Approved, Issued". You're ready for the next step. +* Step 11: From the bastion, run "oc get nodes". Once all nodes are "Ready", run "oc get clusteroperators". Wait for them to all read "True" under the "Available" column. This may take hours. +* Step 8: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" +* Step 9: Running the command in the above step will give you some information about how to log into the OpenShift cluster's dashboard. Copy the URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. +* Step 10: Celebrate! Your OpenShift cluster installation is complete. + +* Optional: Leave the bootstrap running as is, shut it down and destroy it, or convert it into a compute node. ## Tags: @@ -54,8 +56,8 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing * keymastr = ssh key configuration and testing * bastionvm = creation of Bastion KVM guest * boostrap = creation of Boostrap KVM guest -* compute = creation of the Compute nodes KVM guests (minimum 2) -* control = creation of the Control nodes KVM guests (minimum 3) +* compute = creation of the Compute nodes KVM guests +* control = creation of the Control nodes KVM guests * ssh-copy-id = for copying ssh id * dns = configuration of dns server on bastion * getocp = download of OCP installer and http server configuration From 2f6d49a06b87b10fbebda629975d6ebfa7f7f640 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Sat, 28 Aug 2021 11:27:12 -0500 Subject: [PATCH 371/885] Updated readme --- README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 65fe25f9..544ad3c7 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ## Scope -The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method. +The goal of this playbook is to setup and deploy an OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. ## Supported operating systems for the localhost (the starting workstation) are: * Linux (RedHat and Debian families) @@ -10,12 +10,12 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing ## Pre-requisites: -* Python3 intalled on your local computer [how-to:] (https://realpython.com/installing-python/) -* Ansible installed on your local computer [how-to:] (https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) -* If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: - * homebrew package manager installed ( how-to: https://brew.sh/ ) - * Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) -* A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: +* Python3 intalled on your local computer (how-to: https://realpython.com/installing-python/) +* Ansible installed on your local computer (how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) +* If you are using Mac OS X for your localhost workstation to run Ansible, you also need to have: + * homebrew package manager installed (how-to: https://brew.sh/) + * Updated software for command line tools (run "softwareupdate --all --install" in your terminal) +* Access to a logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: * 6 Integrated Facilities for Linux (IFLs) * 75 GB of RAM * 1 TB of disk space @@ -25,12 +25,12 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing ## When you are ready: -* Step 1: Download this Git repository to a folder on your local computer +* Step 1: Clone this Git repository to a folder on your local computer. * Step 2: Go to to: * download your local command line tools (oc and kubectl) - * OpenShift pull secret (for inputting it into env.yaml) -* Step 2: Fill out the required variables for your specific installation in the env.yaml file -* Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. + * copy the OpenShift pull secret (for inputting it into env.yaml) in the next step +* Step 2: Fill out all of the required variables for your specific installation in the env.yaml file +* Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf) or have them pre-defined by your networking team. And place them in the roles/dns/files folder. * Step 4: Run the appropriate Ansible setup shell script, which can be found in the main directory. While in the main directory, run "./ansible-setup-mac.sh" or "./ansible-setup-linux.sh" depending on your operating system to download the required Ansible modules and packages. * Step 5: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass". Watch Ansible as it completes the installation, correcting errors if they arise. If the process fails in error, you should be able to run the same shell command to start the process from the top. Alternatively, use tags to run only the tasks that have that tag. See list of tags below for reference. * Step 6: Once the create_bastion playbook runs, open the cockpit at :9090>, go to the "Virtual Machines" tab, and complete the bastion's installation with these options: From f3974dc78717cb5c4e3b0c62e6e9f186405b204e Mon Sep 17 00:00:00 2001 From: jacobemery Date: Sat, 28 Aug 2021 11:27:12 -0500 Subject: [PATCH 372/885] Updated readme --- README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 65fe25f9..544ad3c7 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ## Scope -The goal of this playbook is to setup and deploy an OpenShift cluster utilizing KVM as the virtualization method. +The goal of this playbook is to setup and deploy an OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. ## Supported operating systems for the localhost (the starting workstation) are: * Linux (RedHat and Debian families) @@ -10,12 +10,12 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing ## Pre-requisites: -* Python3 intalled on your local computer [how-to:] (https://realpython.com/installing-python/) -* Ansible installed on your local computer [how-to:] (https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) -* If you are using Mac OS X for your localhost workstation to run Ansible, you need to have: - * homebrew package manager installed ( how-to: https://brew.sh/ ) - * Updated software for command line tools ( run "softwareupdate --all --install" in your terminal ) -* A logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: +* Python3 intalled on your local computer (how-to: https://realpython.com/installing-python/) +* Ansible installed on your local computer (how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) +* If you are using Mac OS X for your localhost workstation to run Ansible, you also need to have: + * homebrew package manager installed (how-to: https://brew.sh/) + * Updated software for command line tools (run "softwareupdate --all --install" in your terminal) +* Access to a logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: * 6 Integrated Facilities for Linux (IFLs) * 75 GB of RAM * 1 TB of disk space @@ -25,12 +25,12 @@ The goal of this playbook is to setup and deploy an OpenShift cluster utilizing ## When you are ready: -* Step 1: Download this Git repository to a folder on your local computer +* Step 1: Clone this Git repository to a folder on your local computer. * Step 2: Go to to: * download your local command line tools (oc and kubectl) - * OpenShift pull secret (for inputting it into env.yaml) -* Step 2: Fill out the required variables for your specific installation in the env.yaml file -* Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf configuration files) or have it pre-defined by your networking team. And place them in the roles/dns/files folder. + * copy the OpenShift pull secret (for inputting it into env.yaml) in the next step +* Step 2: Fill out all of the required variables for your specific installation in the env.yaml file +* Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf) or have them pre-defined by your networking team. And place them in the roles/dns/files folder. * Step 4: Run the appropriate Ansible setup shell script, which can be found in the main directory. While in the main directory, run "./ansible-setup-mac.sh" or "./ansible-setup-linux.sh" depending on your operating system to download the required Ansible modules and packages. * Step 5: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass". Watch Ansible as it completes the installation, correcting errors if they arise. If the process fails in error, you should be able to run the same shell command to start the process from the top. Alternatively, use tags to run only the tasks that have that tag. See list of tags below for reference. * Step 6: Once the create_bastion playbook runs, open the cockpit at :9090>, go to the "Virtual Machines" tab, and complete the bastion's installation with these options: From e979463a16f85d0fe32c394b550ab1df08dc829b Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Sun, 29 Aug 2021 12:22:23 -0500 Subject: [PATCH 373/885] Added UPI to scope in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 544ad3c7..aecd9157 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ## Scope -The goal of this playbook is to setup and deploy an OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. +The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. ## Supported operating systems for the localhost (the starting workstation) are: * Linux (RedHat and Debian families) From ac76949256c0af403474a5d42deb2657d2ae4e91 Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Sun, 29 Aug 2021 12:22:23 -0500 Subject: [PATCH 374/885] Added UPI to scope in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 544ad3c7..aecd9157 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ## Scope -The goal of this playbook is to setup and deploy an OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. +The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. ## Supported operating systems for the localhost (the starting workstation) are: * Linux (RedHat and Debian families) From 8f0ab056453d639eb9a52e89ef8e45f347a06218 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 12:44:47 -0500 Subject: [PATCH 375/885] Added a teardown playbook and updated the readme with information on how to use it. --- README.md | 35 +++++++++++++++++++++++------- main.yaml | 2 +- roles/mount_rhel/tasks/main.yaml | 4 ++-- roles/reset_files/tasks/main.yaml | 0 roles/teardown_vms/tasks/main.yaml | 0 teardown.yaml | 28 +++++++++++++++++++++++- 6 files changed, 57 insertions(+), 12 deletions(-) create mode 100644 roles/reset_files/tasks/main.yaml create mode 100644 roles/teardown_vms/tasks/main.yaml diff --git a/README.md b/README.md index aecd9157..10dc842f 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,14 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu * 6 Integrated Facilities for Linux (IFLs) * 75 GB of RAM * 1 TB of disk space -* On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed +* On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed with the following options enabled: + * server + * hardware monitoring utilities + * networking file system client + * remote management for linux + * virtualization hypervisor + * headless management + * system tools * On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses * Fully Qualified Domain Names (FQDN) names for all IPv4 addresses @@ -33,21 +40,31 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf) or have them pre-defined by your networking team. And place them in the roles/dns/files folder. * Step 4: Run the appropriate Ansible setup shell script, which can be found in the main directory. While in the main directory, run "./ansible-setup-mac.sh" or "./ansible-setup-linux.sh" depending on your operating system to download the required Ansible modules and packages. * Step 5: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass". Watch Ansible as it completes the installation, correcting errors if they arise. If the process fails in error, you should be able to run the same shell command to start the process from the top. Alternatively, use tags to run only the tasks that have that tag. See list of tags below for reference. -* Step 6: Once the create_bastion playbook runs, open the cockpit at :9090>, go to the "Virtual Machines" tab, and complete the bastion's installation with these options: - - list options here - - list options here - - list options here +* Step 6: Once the create_bastion playbook runs, open the cockpit at :9090>, go to the "Virtual Machines" tab, and complete the bastion's installation with these options enabled: + * server + * hardware monitoring utilities + * networking file system client + * remote management for linux + * headless mgmt + * system tools + * basic web server + * network servers * Step 7: When the playbooks for creating nodes run, watch them on the cockpit at "https://:9090". Go to the "Virtual Machines" tab and click on the VM you created. Once the operating system installs, it will power down. Click the blue "Run" button to start it back up. It will then run some more setup. Then, when you see " login" come back to the terminal here where you ran ansible and press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to stop the process. * Step 8: Repeat Step 7 with the Bootstrap and Control nodes. Then, SSH into the bastion (run "ssh " in the terminal). From there, change to root user (run "su root"). Then ssh into the bootstrap ("ssh core@") and run "journalctl -u bootkube.service" to watch the bootstrap connect to the control nodes (hold space to get to the bottom of the log). Expect lots of errors, as all the nodes may not be entirely up yet. Once all control nodes are connected, the bootkube log will read "bootkube.service complete". * Step 9: Repeat Step 7 with the Compute nodes. * Step 10: Once all the Compute nodes up and prompting login, log in to the bastion and run "export KUBECONFIG=/ocpinst/auth/kubeconfig". Then run "oc get csr". It will bring up a list of certificates that need approval. For each cert that is "Pending", run "oc adm certiciate approve ". The csr names will be something like "csr-v8qqv". Once you approve all the certificates, double check that there are not more that have appeared by running "oc get csr" again. Once all certs are "Approved, Issued". You're ready for the next step. * Step 11: From the bastion, run "oc get nodes". Once all nodes are "Ready", run "oc get clusteroperators". Wait for them to all read "True" under the "Available" column. This may take hours. -* Step 8: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" -* Step 9: Running the command in the above step will give you some information about how to log into the OpenShift cluster's dashboard. Copy the URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. -* Step 10: Celebrate! Your OpenShift cluster installation is complete. +* Step 12: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" +* Step 13: Running the command in the above step will give you some information about how to log into the OpenShift cluster's dashboard. Copy the URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. +* Step 14: Celebrate! Your OpenShift cluster installation is complete. * Optional: Leave the bootstrap running as is, shut it down and destroy it, or convert it into a compute node. +## Teardown: +* If you would like to teardown your VMs, run "ansible-playbook teardown.yaml --ask-become-pass --tags "partial/full". Choose either the partial or full tag. +* Use the "full" tag to teardown all VMs running on your KVM host. Once you run the full teardown, to start the main.yaml playbook back from that point, run with tags "bastionvm,bastion,create_nodes". +* Use the "partial" tag to teardown to the point where nothing except the bastion is running on your KVM host. Once you run the partial teardown, to start the main.yaml playbook back from that point, run with tags "bastion,create_nodes". + ## Tags: * setup = first-time setup of ansible @@ -69,3 +86,5 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu * localhost = for tasks that apply to the local machine running Ansible * firewall = for tasks related to firewall settings * selinux = for tasks related to SELinux settings +* partial = for use with teardown.yaml to bring down VMs except bastion +* full = for use with teardown.yaml to bring down all VMs diff --git a/main.yaml b/main.yaml index 71840c5b..d577c31f 100644 --- a/main.yaml +++ b/main.yaml @@ -51,7 +51,7 @@ - check_ssh #- install_packages #- install_ansible - - ssh-ocp-key-gen # for bastion to connect to nodes + - ssh-ocp-key-gen # SSH key for bastion to connect to nodes - set_selinux_permissive - set_firewall - dns diff --git a/roles/mount_rhel/tasks/main.yaml b/roles/mount_rhel/tasks/main.yaml index b86b3454..62bb95c4 100644 --- a/roles/mount_rhel/tasks/main.yaml +++ b/roles/mount_rhel/tasks/main.yaml @@ -1,12 +1,12 @@ --- -- name: Check to see if local .ssh directory exists +- name: Check to see if rhcos core install directory already exists tags: keymastr stat: path: "/rhcos-install/" register: rhcos_mount -- name: Print results of .ssh directory check +- name: Print results of rhcos core install directory check tags: keymastr debug: var: rhcos_mount diff --git a/roles/reset_files/tasks/main.yaml b/roles/reset_files/tasks/main.yaml new file mode 100644 index 00000000..e69de29b diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml new file mode 100644 index 00000000..e69de29b diff --git a/teardown.yaml b/teardown.yaml index 5359091c..2dc6a1ba 100644 --- a/teardown.yaml +++ b/teardown.yaml @@ -4,4 +4,30 @@ #- shutdown and destroy bootstrap, control, and compute nodes #- delete /ocpinst/ #- delete ssh keys from known hosts in bastion and delete ssh keys from ~/.ssh/id_rsa* -#- delete ssh key from roles/get-ocp/files/ocp_ssh_pub \ No newline at end of file +#- delete ssh key from roles/get-ocp/files/ocp_ssh_pub + +# Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastionvm,bastion,create_nodes'" +- hosts: kvm_host + tags: full + become: true + gather_facts: no + vars: + - vms: ['bastion', bootstrap', 'control-0', 'control-1', 'control-2', 'compute-0', 'compute-1'] + roles: + - teardown_vms + +# Use the "partial" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastion,create_nodes'" +- hosts: bastion + tags: partial + become: true + gather_facts: no + - reset_files + +- hosts: kvm_host + tags: partial + become: true + gather_facts: no + vars: + - vms: ['bootstrap', 'control-0', 'control-1', 'control-2', 'compute-0', 'compute-1'] + roles: + - teardown_vms From 8313ecc414678856966d65ec6313c4dd6d4cef04 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 12:44:47 -0500 Subject: [PATCH 376/885] Added a teardown playbook and updated the readme with information on how to use it. --- README.md | 35 +++++++++++++++++++++++------- main.yaml | 2 +- roles/mount_rhel/tasks/main.yaml | 4 ++-- roles/reset_files/tasks/main.yaml | 0 roles/teardown_vms/tasks/main.yaml | 0 teardown.yaml | 28 +++++++++++++++++++++++- 6 files changed, 57 insertions(+), 12 deletions(-) create mode 100644 roles/reset_files/tasks/main.yaml create mode 100644 roles/teardown_vms/tasks/main.yaml diff --git a/README.md b/README.md index aecd9157..10dc842f 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,14 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu * 6 Integrated Facilities for Linux (IFLs) * 75 GB of RAM * 1 TB of disk space -* On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed +* On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed with the following options enabled: + * server + * hardware monitoring utilities + * networking file system client + * remote management for linux + * virtualization hypervisor + * headless management + * system tools * On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses * Fully Qualified Domain Names (FQDN) names for all IPv4 addresses @@ -33,21 +40,31 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu * Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf) or have them pre-defined by your networking team. And place them in the roles/dns/files folder. * Step 4: Run the appropriate Ansible setup shell script, which can be found in the main directory. While in the main directory, run "./ansible-setup-mac.sh" or "./ansible-setup-linux.sh" depending on your operating system to download the required Ansible modules and packages. * Step 5: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass". Watch Ansible as it completes the installation, correcting errors if they arise. If the process fails in error, you should be able to run the same shell command to start the process from the top. Alternatively, use tags to run only the tasks that have that tag. See list of tags below for reference. -* Step 6: Once the create_bastion playbook runs, open the cockpit at :9090>, go to the "Virtual Machines" tab, and complete the bastion's installation with these options: - - list options here - - list options here - - list options here +* Step 6: Once the create_bastion playbook runs, open the cockpit at :9090>, go to the "Virtual Machines" tab, and complete the bastion's installation with these options enabled: + * server + * hardware monitoring utilities + * networking file system client + * remote management for linux + * headless mgmt + * system tools + * basic web server + * network servers * Step 7: When the playbooks for creating nodes run, watch them on the cockpit at "https://:9090". Go to the "Virtual Machines" tab and click on the VM you created. Once the operating system installs, it will power down. Click the blue "Run" button to start it back up. It will then run some more setup. Then, when you see " login" come back to the terminal here where you ran ansible and press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to stop the process. * Step 8: Repeat Step 7 with the Bootstrap and Control nodes. Then, SSH into the bastion (run "ssh " in the terminal). From there, change to root user (run "su root"). Then ssh into the bootstrap ("ssh core@") and run "journalctl -u bootkube.service" to watch the bootstrap connect to the control nodes (hold space to get to the bottom of the log). Expect lots of errors, as all the nodes may not be entirely up yet. Once all control nodes are connected, the bootkube log will read "bootkube.service complete". * Step 9: Repeat Step 7 with the Compute nodes. * Step 10: Once all the Compute nodes up and prompting login, log in to the bastion and run "export KUBECONFIG=/ocpinst/auth/kubeconfig". Then run "oc get csr". It will bring up a list of certificates that need approval. For each cert that is "Pending", run "oc adm certiciate approve ". The csr names will be something like "csr-v8qqv". Once you approve all the certificates, double check that there are not more that have appeared by running "oc get csr" again. Once all certs are "Approved, Issued". You're ready for the next step. * Step 11: From the bastion, run "oc get nodes". Once all nodes are "Ready", run "oc get clusteroperators". Wait for them to all read "True" under the "Available" column. This may take hours. -* Step 8: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" -* Step 9: Running the command in the above step will give you some information about how to log into the OpenShift cluster's dashboard. Copy the URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. -* Step 10: Celebrate! Your OpenShift cluster installation is complete. +* Step 12: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" +* Step 13: Running the command in the above step will give you some information about how to log into the OpenShift cluster's dashboard. Copy the URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. +* Step 14: Celebrate! Your OpenShift cluster installation is complete. * Optional: Leave the bootstrap running as is, shut it down and destroy it, or convert it into a compute node. +## Teardown: +* If you would like to teardown your VMs, run "ansible-playbook teardown.yaml --ask-become-pass --tags "partial/full". Choose either the partial or full tag. +* Use the "full" tag to teardown all VMs running on your KVM host. Once you run the full teardown, to start the main.yaml playbook back from that point, run with tags "bastionvm,bastion,create_nodes". +* Use the "partial" tag to teardown to the point where nothing except the bastion is running on your KVM host. Once you run the partial teardown, to start the main.yaml playbook back from that point, run with tags "bastion,create_nodes". + ## Tags: * setup = first-time setup of ansible @@ -69,3 +86,5 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu * localhost = for tasks that apply to the local machine running Ansible * firewall = for tasks related to firewall settings * selinux = for tasks related to SELinux settings +* partial = for use with teardown.yaml to bring down VMs except bastion +* full = for use with teardown.yaml to bring down all VMs diff --git a/main.yaml b/main.yaml index 71840c5b..d577c31f 100644 --- a/main.yaml +++ b/main.yaml @@ -51,7 +51,7 @@ - check_ssh #- install_packages #- install_ansible - - ssh-ocp-key-gen # for bastion to connect to nodes + - ssh-ocp-key-gen # SSH key for bastion to connect to nodes - set_selinux_permissive - set_firewall - dns diff --git a/roles/mount_rhel/tasks/main.yaml b/roles/mount_rhel/tasks/main.yaml index b86b3454..62bb95c4 100644 --- a/roles/mount_rhel/tasks/main.yaml +++ b/roles/mount_rhel/tasks/main.yaml @@ -1,12 +1,12 @@ --- -- name: Check to see if local .ssh directory exists +- name: Check to see if rhcos core install directory already exists tags: keymastr stat: path: "/rhcos-install/" register: rhcos_mount -- name: Print results of .ssh directory check +- name: Print results of rhcos core install directory check tags: keymastr debug: var: rhcos_mount diff --git a/roles/reset_files/tasks/main.yaml b/roles/reset_files/tasks/main.yaml new file mode 100644 index 00000000..e69de29b diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml new file mode 100644 index 00000000..e69de29b diff --git a/teardown.yaml b/teardown.yaml index 5359091c..2dc6a1ba 100644 --- a/teardown.yaml +++ b/teardown.yaml @@ -4,4 +4,30 @@ #- shutdown and destroy bootstrap, control, and compute nodes #- delete /ocpinst/ #- delete ssh keys from known hosts in bastion and delete ssh keys from ~/.ssh/id_rsa* -#- delete ssh key from roles/get-ocp/files/ocp_ssh_pub \ No newline at end of file +#- delete ssh key from roles/get-ocp/files/ocp_ssh_pub + +# Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastionvm,bastion,create_nodes'" +- hosts: kvm_host + tags: full + become: true + gather_facts: no + vars: + - vms: ['bastion', bootstrap', 'control-0', 'control-1', 'control-2', 'compute-0', 'compute-1'] + roles: + - teardown_vms + +# Use the "partial" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastion,create_nodes'" +- hosts: bastion + tags: partial + become: true + gather_facts: no + - reset_files + +- hosts: kvm_host + tags: partial + become: true + gather_facts: no + vars: + - vms: ['bootstrap', 'control-0', 'control-1', 'control-2', 'compute-0', 'compute-1'] + roles: + - teardown_vms From 56031492961d1e0c7914d3fa759776626bff8817 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 17:46:55 -0500 Subject: [PATCH 377/885] Completed and tested teardown.yaml --- README.md | 1 + roles/get-ocp/tasks/main.yaml | 20 ++++++++++++++++++++ roles/reset_files/tasks/main.yaml | 25 +++++++++++++++++++++++++ roles/teardown_vms/tasks/main.yaml | 27 +++++++++++++++++++++++++++ teardown.yaml | 15 ++++++++------- 5 files changed, 81 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 10dc842f..ec1beeed 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,7 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu ## Teardown: * If you would like to teardown your VMs, run "ansible-playbook teardown.yaml --ask-become-pass --tags "partial/full". Choose either the partial or full tag. +* If you have provisioned more than the minimum number of nodes for your installation, add them to the respective list found in roles/teardown_vms/tasks/main.yaml. * Use the "full" tag to teardown all VMs running on your KVM host. Once you run the full teardown, to start the main.yaml playbook back from that point, run with tags "bastionvm,bastion,create_nodes". * Use the "partial" tag to teardown to the point where nothing except the bastion is running on your KVM host. Once you run the partial teardown, to start the main.yaml playbook back from that point, run with tags "bastion,create_nodes". diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 17959d8d..db320964 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -85,6 +85,26 @@ group: root mode: '0755' +- name: check to see if ocp_ssh_pub file exists + tags: getocp,bastion + stat: + path: roles/get-ocp/files/ocp_ssh_pub + register: ocp_ssh_pub + +- name: delete ocp_ssh_pub file + tags: getocp, bastion + file: + state: absent + path: roles/get-ocp/files/ocp_ssh_pub + when: ocp_ssh_pub.stat.exists == true + register: ocp_ssh_pub_del + +- name: create ocp_ssh_pub if it needs to be + file: + path: roles/get-ocp/files/ocp_ssh_pub + mode: '0755' + when: ocp_ssh_pub_del.changed == true or ocp_ssh_pub.stat.exists == false + - name: Fetch ssh key from bastion for use in install-config tags: getocp,bastion ansible.builtin.fetch: diff --git a/roles/reset_files/tasks/main.yaml b/roles/reset_files/tasks/main.yaml index e69de29b..67fbd152 100644 --- a/roles/reset_files/tasks/main.yaml +++ b/roles/reset_files/tasks/main.yaml @@ -0,0 +1,25 @@ +--- + +- name: delete /ocpinst directory + file: + path: /ocpinst + state: absent + +- name: delete OCP SSH keys from bastion's ssh folder + file: + path: ~/.ssh/{{ item }} + state: absent + loop: + - id_rsa + - id_rsa.pub + +- name: delete bastion's ssh known hosts file to remove fingerprints + file: + path: ~/.ssh/known_hosts + state: absent + +- name: create empty bastion ssh known hosts file + file: + path: ~/.ssh/known_hosts + state: touch + mode: '644' \ No newline at end of file diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml index e69de29b..439d6344 100644 --- a/roles/teardown_vms/tasks/main.yaml +++ b/roles/teardown_vms/tasks/main.yaml @@ -0,0 +1,27 @@ +--- + +- name: print list of vms given from teardown.yaml + debug: + var: vms + +- name: print the number of vms given from teardown.yaml + debug: + var: "{{ vms | length }}" + +- name: shutdown vms + tags: test + community.libvirt.virt: + name: "{{ item }}" + command: shutdown + loop: "{{ vms }}" + +- name: wait 5 minute for VMs to shutdown gracefully + pause: + minutes: 5 + +- name: undefine vms + tags: test + community.libvirt.virt: + name: "{{ item }}" + command: undefine + loop: "{{ vms }}" \ No newline at end of file diff --git a/teardown.yaml b/teardown.yaml index 2dc6a1ba..5ce3da30 100644 --- a/teardown.yaml +++ b/teardown.yaml @@ -1,12 +1,9 @@ --- -#needed: -#- shutdown and destroy bootstrap, control, and compute nodes -#- delete /ocpinst/ -#- delete ssh keys from known hosts in bastion and delete ssh keys from ~/.ssh/id_rsa* -#- delete ssh key from roles/get-ocp/files/ocp_ssh_pub +# Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. +# If you have more nodes than what is present in the "vms" list below, feel free to add more to the list. +# After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastionvm,bastion,create_nodes'" -# Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastionvm,bastion,create_nodes'" - hosts: kvm_host tags: full become: true @@ -16,11 +13,15 @@ roles: - teardown_vms -# Use the "partial" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastion,create_nodes'" +# Use the "partial" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. +# If you have more nodes than what is present in the "vms" list below, feel free to add more to the list. +# After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastion,create_nodes'" + - hosts: bastion tags: partial become: true gather_facts: no + roles: - reset_files - hosts: kvm_host From cd929275cdb8384e8c91d706296601e54ce683ea Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 17:46:55 -0500 Subject: [PATCH 378/885] Completed and tested teardown.yaml --- README.md | 1 + roles/get-ocp/tasks/main.yaml | 20 ++++++++++++++++++++ roles/reset_files/tasks/main.yaml | 25 +++++++++++++++++++++++++ roles/teardown_vms/tasks/main.yaml | 27 +++++++++++++++++++++++++++ teardown.yaml | 15 ++++++++------- 5 files changed, 81 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 10dc842f..ec1beeed 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,7 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu ## Teardown: * If you would like to teardown your VMs, run "ansible-playbook teardown.yaml --ask-become-pass --tags "partial/full". Choose either the partial or full tag. +* If you have provisioned more than the minimum number of nodes for your installation, add them to the respective list found in roles/teardown_vms/tasks/main.yaml. * Use the "full" tag to teardown all VMs running on your KVM host. Once you run the full teardown, to start the main.yaml playbook back from that point, run with tags "bastionvm,bastion,create_nodes". * Use the "partial" tag to teardown to the point where nothing except the bastion is running on your KVM host. Once you run the partial teardown, to start the main.yaml playbook back from that point, run with tags "bastion,create_nodes". diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 17959d8d..db320964 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -85,6 +85,26 @@ group: root mode: '0755' +- name: check to see if ocp_ssh_pub file exists + tags: getocp,bastion + stat: + path: roles/get-ocp/files/ocp_ssh_pub + register: ocp_ssh_pub + +- name: delete ocp_ssh_pub file + tags: getocp, bastion + file: + state: absent + path: roles/get-ocp/files/ocp_ssh_pub + when: ocp_ssh_pub.stat.exists == true + register: ocp_ssh_pub_del + +- name: create ocp_ssh_pub if it needs to be + file: + path: roles/get-ocp/files/ocp_ssh_pub + mode: '0755' + when: ocp_ssh_pub_del.changed == true or ocp_ssh_pub.stat.exists == false + - name: Fetch ssh key from bastion for use in install-config tags: getocp,bastion ansible.builtin.fetch: diff --git a/roles/reset_files/tasks/main.yaml b/roles/reset_files/tasks/main.yaml index e69de29b..67fbd152 100644 --- a/roles/reset_files/tasks/main.yaml +++ b/roles/reset_files/tasks/main.yaml @@ -0,0 +1,25 @@ +--- + +- name: delete /ocpinst directory + file: + path: /ocpinst + state: absent + +- name: delete OCP SSH keys from bastion's ssh folder + file: + path: ~/.ssh/{{ item }} + state: absent + loop: + - id_rsa + - id_rsa.pub + +- name: delete bastion's ssh known hosts file to remove fingerprints + file: + path: ~/.ssh/known_hosts + state: absent + +- name: create empty bastion ssh known hosts file + file: + path: ~/.ssh/known_hosts + state: touch + mode: '644' \ No newline at end of file diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml index e69de29b..439d6344 100644 --- a/roles/teardown_vms/tasks/main.yaml +++ b/roles/teardown_vms/tasks/main.yaml @@ -0,0 +1,27 @@ +--- + +- name: print list of vms given from teardown.yaml + debug: + var: vms + +- name: print the number of vms given from teardown.yaml + debug: + var: "{{ vms | length }}" + +- name: shutdown vms + tags: test + community.libvirt.virt: + name: "{{ item }}" + command: shutdown + loop: "{{ vms }}" + +- name: wait 5 minute for VMs to shutdown gracefully + pause: + minutes: 5 + +- name: undefine vms + tags: test + community.libvirt.virt: + name: "{{ item }}" + command: undefine + loop: "{{ vms }}" \ No newline at end of file diff --git a/teardown.yaml b/teardown.yaml index 2dc6a1ba..5ce3da30 100644 --- a/teardown.yaml +++ b/teardown.yaml @@ -1,12 +1,9 @@ --- -#needed: -#- shutdown and destroy bootstrap, control, and compute nodes -#- delete /ocpinst/ -#- delete ssh keys from known hosts in bastion and delete ssh keys from ~/.ssh/id_rsa* -#- delete ssh key from roles/get-ocp/files/ocp_ssh_pub +# Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. +# If you have more nodes than what is present in the "vms" list below, feel free to add more to the list. +# After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastionvm,bastion,create_nodes'" -# Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastionvm,bastion,create_nodes'" - hosts: kvm_host tags: full become: true @@ -16,11 +13,15 @@ roles: - teardown_vms -# Use the "partial" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastion,create_nodes'" +# Use the "partial" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. +# If you have more nodes than what is present in the "vms" list below, feel free to add more to the list. +# After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastion,create_nodes'" + - hosts: bastion tags: partial become: true gather_facts: no + roles: - reset_files - hosts: kvm_host From 6f77756bf746cd27f92021478b0440fd65beb103 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 18:12:20 -0500 Subject: [PATCH 379/885] Updated install_packages to be idempotent --- README.md | 1 + main.yaml | 6 +++--- roles/install_packages/tasks/main.yaml | 21 ++++++++++----------- roles/teardown_vms/tasks/main.yaml | 12 +++++------- 4 files changed, 19 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index ec1beeed..5b054508 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu * setup = first-time setup of ansible * prep = run all setup playbooks +* pkg = install and update all packages * bastion = configuration of bastion for OCP * keymastr = ssh key configuration and testing * bastionvm = creation of Bastion KVM guest diff --git a/main.yaml b/main.yaml index d577c31f..771d200a 100644 --- a/main.yaml +++ b/main.yaml @@ -19,8 +19,8 @@ become: true vars_files: - env.yaml - vars: - - packages: [ 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img', 'libvirt' ] + vars: # feel free to add more packages as needed + - packages: [ 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img', 'libvirt' ] roles: - check_ssh - install_packages @@ -45,7 +45,7 @@ become: true vars_files: - env.yaml - vars: + vars: # feel free to add more packages as needed - packages: [ 'ansible', 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - check_ssh diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index ce342bab..762a3a15 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,15 +1,14 @@ --- -- name: Installing required packages +- name: print the list of packages to be installed and updated + tags: pkg + debug: + var: packages + +- name: installing required packages + tags: pkg ansible.builtin.package: - name: - - "{{ packages[0] | default(omit) }}" - - "{{ packages[1] | default(omit) }}" - - "{{ packages[2] | default(omit) }}" - - "{{ packages[3] | default(omit) }}" - - "{{ packages[4] | default(omit) }}" - - "{{ packages[5] | default(omit) }}" - - "{{ packages[6] | default(omit) }}" - - "{{ packages[7] | default(omit) }}" + name: "{{ item }}" state: latest - update_cache: yes \ No newline at end of file + update_cache: yes + loop: "{{ packages }}" \ No newline at end of file diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml index 439d6344..97ace6a3 100644 --- a/roles/teardown_vms/tasks/main.yaml +++ b/roles/teardown_vms/tasks/main.yaml @@ -1,26 +1,24 @@ --- -- name: print list of vms given from teardown.yaml +- name: print the list of VMs given from teardown.yaml debug: var: vms -- name: print the number of vms given from teardown.yaml +- name: print the number of VMs given from teardown.yaml debug: var: "{{ vms | length }}" -- name: shutdown vms - tags: test +- name: shutdown listed VMs community.libvirt.virt: name: "{{ item }}" command: shutdown loop: "{{ vms }}" -- name: wait 5 minute for VMs to shutdown gracefully +- name: wait up to 5 minute for VMs to shutdown gracefully pause: minutes: 5 -- name: undefine vms - tags: test +- name: undefine listed VMs community.libvirt.virt: name: "{{ item }}" command: undefine From 8b83e23343d735e272bc8d2dddee9405df3c4535 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 18:12:20 -0500 Subject: [PATCH 380/885] Updated install_packages to be idempotent --- README.md | 1 + main.yaml | 6 +++--- roles/install_packages/tasks/main.yaml | 21 ++++++++++----------- roles/teardown_vms/tasks/main.yaml | 12 +++++------- 4 files changed, 19 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index ec1beeed..5b054508 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu * setup = first-time setup of ansible * prep = run all setup playbooks +* pkg = install and update all packages * bastion = configuration of bastion for OCP * keymastr = ssh key configuration and testing * bastionvm = creation of Bastion KVM guest diff --git a/main.yaml b/main.yaml index d577c31f..771d200a 100644 --- a/main.yaml +++ b/main.yaml @@ -19,8 +19,8 @@ become: true vars_files: - env.yaml - vars: - - packages: [ 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img', 'libvirt' ] + vars: # feel free to add more packages as needed + - packages: [ 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img', 'libvirt' ] roles: - check_ssh - install_packages @@ -45,7 +45,7 @@ become: true vars_files: - env.yaml - vars: + vars: # feel free to add more packages as needed - packages: [ 'ansible', 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - check_ssh diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index ce342bab..762a3a15 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,15 +1,14 @@ --- -- name: Installing required packages +- name: print the list of packages to be installed and updated + tags: pkg + debug: + var: packages + +- name: installing required packages + tags: pkg ansible.builtin.package: - name: - - "{{ packages[0] | default(omit) }}" - - "{{ packages[1] | default(omit) }}" - - "{{ packages[2] | default(omit) }}" - - "{{ packages[3] | default(omit) }}" - - "{{ packages[4] | default(omit) }}" - - "{{ packages[5] | default(omit) }}" - - "{{ packages[6] | default(omit) }}" - - "{{ packages[7] | default(omit) }}" + name: "{{ item }}" state: latest - update_cache: yes \ No newline at end of file + update_cache: yes + loop: "{{ packages }}" \ No newline at end of file diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml index 439d6344..97ace6a3 100644 --- a/roles/teardown_vms/tasks/main.yaml +++ b/roles/teardown_vms/tasks/main.yaml @@ -1,26 +1,24 @@ --- -- name: print list of vms given from teardown.yaml +- name: print the list of VMs given from teardown.yaml debug: var: vms -- name: print the number of vms given from teardown.yaml +- name: print the number of VMs given from teardown.yaml debug: var: "{{ vms | length }}" -- name: shutdown vms - tags: test +- name: shutdown listed VMs community.libvirt.virt: name: "{{ item }}" command: shutdown loop: "{{ vms }}" -- name: wait 5 minute for VMs to shutdown gracefully +- name: wait up to 5 minute for VMs to shutdown gracefully pause: minutes: 5 -- name: undefine vms - tags: test +- name: undefine listed VMs community.libvirt.virt: name: "{{ item }}" command: undefine From b4e14fa951c4b29fe2deef158501f2fabfacd009 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 18:27:38 -0500 Subject: [PATCH 381/885] Added a pause for Linux VM test to allow time to install packages on the KVM host --- main.yaml | 6 +++--- roles/check_ssh/tasks/main.yaml | 6 +++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/main.yaml b/main.yaml index 771d200a..142a5260 100644 --- a/main.yaml +++ b/main.yaml @@ -23,7 +23,7 @@ - packages: [ 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img', 'libvirt' ] roles: - check_ssh - - install_packages + #- install_packages - set_selinux_permissive - enable_packages - macvtap @@ -49,8 +49,8 @@ - packages: [ 'ansible', 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - check_ssh - #- install_packages - #- install_ansible + - install_packages + - install_ansible - ssh-ocp-key-gen # SSH key for bastion to connect to nodes - set_selinux_permissive - set_firewall diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index ce7d56bf..f185a71a 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -9,4 +9,8 @@ - name: print the connectivity test results tags: keymastr debug: - var: ssh_connection_test.stdout_lines \ No newline at end of file + var: ssh_connection_test.stdout_lines + +- name: pause for you to do any necessary setup + pause: + minutes: 60 \ No newline at end of file From 51936a821662a73485a12ccbb3f8691161716355 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 18:27:38 -0500 Subject: [PATCH 382/885] Added a pause for Linux VM test to allow time to install packages on the KVM host --- main.yaml | 6 +++--- roles/check_ssh/tasks/main.yaml | 6 +++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/main.yaml b/main.yaml index 771d200a..142a5260 100644 --- a/main.yaml +++ b/main.yaml @@ -23,7 +23,7 @@ - packages: [ 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img', 'libvirt' ] roles: - check_ssh - - install_packages + #- install_packages - set_selinux_permissive - enable_packages - macvtap @@ -49,8 +49,8 @@ - packages: [ 'ansible', 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - check_ssh - #- install_packages - #- install_ansible + - install_packages + - install_ansible - ssh-ocp-key-gen # SSH key for bastion to connect to nodes - set_selinux_permissive - set_firewall diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index ce7d56bf..f185a71a 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -9,4 +9,8 @@ - name: print the connectivity test results tags: keymastr debug: - var: ssh_connection_test.stdout_lines \ No newline at end of file + var: ssh_connection_test.stdout_lines + +- name: pause for you to do any necessary setup + pause: + minutes: 60 \ No newline at end of file From 911865f91b7b05230c4c8c6ecedab05426d00797 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 18:51:41 -0500 Subject: [PATCH 383/885] Removed non-idempotent conditional in ssh_key_gen --- roles/ssh_key_gen/tasks/main.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index 562b7091..c60f4a70 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -44,7 +44,6 @@ path: ~/.ssh/{{ env_ssh_ans_name }} passphrase: "" register: ssh_key_creation - failed_when: ssh_key_creation.rc != 0 when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: Print results of ssh key pair creation From bdabf9429b2d820f69fb7b25e787460700e35dd7 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 18:51:41 -0500 Subject: [PATCH 384/885] Removed non-idempotent conditional in ssh_key_gen --- roles/ssh_key_gen/tasks/main.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index 562b7091..c60f4a70 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -44,7 +44,6 @@ path: ~/.ssh/{{ env_ssh_ans_name }} passphrase: "" register: ssh_key_creation - failed_when: ssh_key_creation.rc != 0 when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: Print results of ssh key pair creation From aa87e937ad6e7713027d767d2e8e63883ee4c1a7 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 18:53:49 -0500 Subject: [PATCH 385/885] Removed non-idempotent conditional in ssh_copy_id --- roles/ssh_copy_id/tasks/main.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index 7ad3e169..faee38c3 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -4,7 +4,6 @@ tags: ssh,ssh-copy-id shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution - failed_when: ssh_copy_id_execution.rc != 0 - name: Print results of copying ssh id to remote host. tags: ssh,ssh-copy-id From 1beaf0f92880e68c5072f7997fbef1758ef0531e Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 18:53:49 -0500 Subject: [PATCH 386/885] Removed non-idempotent conditional in ssh_copy_id --- roles/ssh_copy_id/tasks/main.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index 7ad3e169..faee38c3 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -4,7 +4,6 @@ tags: ssh,ssh-copy-id shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" register: ssh_copy_id_execution - failed_when: ssh_copy_id_execution.rc != 0 - name: Print results of copying ssh id to remote host. tags: ssh,ssh-copy-id From 3f6b4b6414c7200f8edee0237790d639ae9ee824 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 19:02:07 -0500 Subject: [PATCH 387/885] New method for ssh-copy-id --- roles/ssh_copy_id/tasks/main.yaml | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index faee38c3..beaca344 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,11 +1,19 @@ --- -- name: distribute the ssh key to a remote host - tags: ssh,ssh-copy-id - shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" - register: ssh_copy_id_execution +#- name: distribute the ssh key to a remote host +# tags: ssh,ssh-copy-id +# shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" +# register: ssh_copy_id_execution -- name: Print results of copying ssh id to remote host. - tags: ssh,ssh-copy-id - debug: - var: ssh_copy_id_execution +#- name: Print results of copying ssh id to remote host. +# tags: ssh,ssh-copy-id +# debug: +# var: ssh_copy_id_execution + +- name: copy ssh key to remote host + tags: ssh-copy-id + copy: + src: ~/.ssh/{{ env_ssh_ans_name }}.pub + dest: ~/.ssh/{{ env_ssh_ans_name }}.pub + mode: '644' + owner: root \ No newline at end of file From ae8d564c67a54f8940e3bd1e2b953059f25bacea Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 19:02:07 -0500 Subject: [PATCH 388/885] New method for ssh-copy-id --- roles/ssh_copy_id/tasks/main.yaml | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index faee38c3..beaca344 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,11 +1,19 @@ --- -- name: distribute the ssh key to a remote host - tags: ssh,ssh-copy-id - shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" - register: ssh_copy_id_execution +#- name: distribute the ssh key to a remote host +# tags: ssh,ssh-copy-id +# shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" +# register: ssh_copy_id_execution -- name: Print results of copying ssh id to remote host. - tags: ssh,ssh-copy-id - debug: - var: ssh_copy_id_execution +#- name: Print results of copying ssh id to remote host. +# tags: ssh,ssh-copy-id +# debug: +# var: ssh_copy_id_execution + +- name: copy ssh key to remote host + tags: ssh-copy-id + copy: + src: ~/.ssh/{{ env_ssh_ans_name }}.pub + dest: ~/.ssh/{{ env_ssh_ans_name }}.pub + mode: '644' + owner: root \ No newline at end of file From e8a0adc512e09e216a89149e38799ee768f65b28 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 19:09:34 -0500 Subject: [PATCH 389/885] Deleted deprecated roles directories --- roles/bastion_server/.DS_Store | Bin 6148 -> 0 bytes .../bastion_server/tasks/connect_cluster.yaml | 12 - .../tasks/fill_install_config.yaml | 44 ---- roles/bastion_server/tasks/fix-sched.yaml | 37 --- roles/bastion_server/tasks/get-images.yaml | 30 --- roles/bastion_server/tasks/get-ocp.yaml | 94 ------- roles/bastion_server/tasks/http_setup.yaml | 101 -------- roles/bastion_server/tasks/load_balancer.yaml | 28 --- roles/bastion_server/tasks/main.yaml | 236 ------------------ .../templates/install-config.yaml | 26 -- roles/bootstrap_server/.DS_Store | Bin 6148 -> 0 bytes .../tasks/bootstrap_verify.yaml | 11 - roles/control_nodes/.DS_Store | Bin 6148 -> 0 bytes roles/kvm_host/tasks/create_bastion.yaml | 11 - roles/kvm_host/tasks/create_bootstrap.yaml | 15 -- roles/kvm_host/tasks/create_nodes.yaml | 61 ----- roles/kvm_host/tasks/define_macvtap.yaml | 24 -- roles/kvm_host/tasks/main.yaml | 20 -- roles/kvm_host/tasks/prep_kvm_guests.yaml | 14 -- roles/kvm_host/templates/macvtap.xml.j2 | 6 - roles/ssh_copy_id/tasks/main.yaml | 24 +- roles/worker_nodes/.DS_Store | Bin 6148 -> 0 bytes 22 files changed, 8 insertions(+), 786 deletions(-) delete mode 100644 roles/bastion_server/.DS_Store delete mode 100644 roles/bastion_server/tasks/connect_cluster.yaml delete mode 100644 roles/bastion_server/tasks/fill_install_config.yaml delete mode 100644 roles/bastion_server/tasks/fix-sched.yaml delete mode 100644 roles/bastion_server/tasks/get-images.yaml delete mode 100644 roles/bastion_server/tasks/get-ocp.yaml delete mode 100644 roles/bastion_server/tasks/http_setup.yaml delete mode 100644 roles/bastion_server/tasks/load_balancer.yaml delete mode 100644 roles/bastion_server/tasks/main.yaml delete mode 100644 roles/bastion_server/templates/install-config.yaml delete mode 100644 roles/bootstrap_server/.DS_Store delete mode 100644 roles/bootstrap_server/tasks/bootstrap_verify.yaml delete mode 100644 roles/control_nodes/.DS_Store delete mode 100644 roles/kvm_host/tasks/create_bastion.yaml delete mode 100644 roles/kvm_host/tasks/create_bootstrap.yaml delete mode 100644 roles/kvm_host/tasks/create_nodes.yaml delete mode 100644 roles/kvm_host/tasks/define_macvtap.yaml delete mode 100644 roles/kvm_host/tasks/main.yaml delete mode 100644 roles/kvm_host/tasks/prep_kvm_guests.yaml delete mode 100644 roles/kvm_host/templates/macvtap.xml.j2 delete mode 100644 roles/worker_nodes/.DS_Store diff --git a/roles/bastion_server/.DS_Store b/roles/bastion_server/.DS_Store deleted file mode 100644 index 1fbd689229062191b69aea2e8fd10736d076ce3d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%}T>S5T4Z*f*$PAi;yP}`UbJYgCJh|0=11=F*U_1UOePA1o0Jo0FOR{UVW50 zf3%zKsyC4`12f-dcV>6y3)#&Afa~2AU4RY%B&dX~Ei@6Kane9Gv}Xs=@f@?GvKkG` zY8d!hhcRFb{67Zh+ik-+TtW$F;D5igB^;pc&sjKi&$GPf_D9uZ%Kc)z(Mgr(x(_6; zCm)Z4<2RbC;ZgkJF&zghBrt|kxQ2qWy$SMt!8QDR1OG_Yfsdbi@%jF&xkvn#75wBE zsiO_ISjP;mRx5COD;0R2k=(seKkJA4`O|{&#Y&TEnQz=9lBv8|v+KwJp4lYHwxC&K zz!)$F77Wn-kfIW1inSnb9ccIpfY`!r7Hrdx1I46@nPM%7BPdQn5hc{=6~jq5_NVHX zDb|7#4o)v0PFHq%Lvg%1&Y!Y7xJ=NjF<=Zd8Q7G^4xRsp-{1e6LH1+}7z1m?fNSOb zyoV*}*%}l_XKji)KqVo+T98A~=yA*qI*NBuS#T_+24bdI3t|t7{Rl7(W{iP9W#9|1 CY>%%1 diff --git a/roles/bastion_server/tasks/connect_cluster.yaml b/roles/bastion_server/tasks/connect_cluster.yaml deleted file mode 100644 index 82c0ca2f..00000000 --- a/roles/bastion_server/tasks/connect_cluster.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - - - name: create csr-name variable - command: oc get csr - register: csr-name - - - name: approve all certificate signing requests - command: oc adm certificate approve {{ csr-name }} diff --git a/roles/bastion_server/tasks/fill_install_config.yaml b/roles/bastion_server/tasks/fill_install_config.yaml deleted file mode 100644 index 55e817d6..00000000 --- a/roles/bastion_server/tasks/fill_install_config.yaml +++ /dev/null @@ -1,44 +0,0 @@ -##Needs variables from host_vars for baseDomain, cluster_name, pullsecret, and ssh-public-key. -##I think it also needs cidr (pod's IP range) and service network IP range. -##Ensure PATHs are correct ---- - -- hosts: bastion_server - become: true - tasks: - - - name: create install-config.yaml - file: - path: "~/files/install-config.yaml" - state: touch - - - name: Fill contents of install-config.yaml file - copy: - dest: "~/files/install-config.yaml" - content: | - apiVersion: v1 - baseDomain: ocpz.wsclab.endicott.ibm.com - compute: - - architecture: s390x - hyperthreading: Enabled - name: worker - replicas: 0 - controlPlane: - architecture: s390x - hyperthreading: Enabled - name: master - replicas: 3 - metadata: - name: distribution - networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN - serviceNetwork: - - 172.30.0.0/16 - platform: - none: {} - fips: false - pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' - sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDudjhbUkA7ekvuzZe7WpkkF2KOoLGwWU05Ch6Ty0OWDW0vdbtrG7TIgy8FYOvBnArnIJcy1AP6tpH6BcbmHRIosVFbZ3GrLkKEOroBD+8+6qGzN+fIXBzss20iRvMvDdlDaVJ1BqmTvNnkbeWdgCg1MbICQ1OYOExm+xgvGWHOjoJkUzkw3oBnAnpZHHTkvM2dY0SNtzV9i5FcSvdBM2kindvBpvDZ9PjTGkcFMnbVRpBXUV+axNg4yuX0YWFDWOFlQ4DjvasdN6jMQ2MrWBYtYF1RAklsOhSXTPKAYaxZZsfeEBnMVUFshaDYy7PlbCXcUEVEFEkMlTAbe//bBXx5l+2fVFtOUyhfws28bLg+rlJLivINoLW4EB+ERViYBTCEx5njl9mK9EcPpCJlLTR82kr8vEwE/sJ9ro3dH/aEUQcTkVYqlOCw2lsFZpkZtthArknZiHk91OtWgXj4GsAjihehfBvEL/GbEuO0HTzvl+QiTnC+mL4KcObl31K9LdlAbKA53l6G68py0u7plUervFN8H7rIFbv4ndUTAo2rODty/QoYH8jBu460LLgLyaRfaDFrGitnAdSiCS/8dEr8iAVxj5X5sgcTQlsJYyvMv61YCzItmlipDpbf+M/mJyJuxyuYWqudItOIWchFktstcpp3dx1iM/pr0o4HitpeQ== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' diff --git a/roles/bastion_server/tasks/fix-sched.yaml b/roles/bastion_server/tasks/fix-sched.yaml deleted file mode 100644 index 17cfef11..00000000 --- a/roles/bastion_server/tasks/fix-sched.yaml +++ /dev/null @@ -1,37 +0,0 @@ -- hosts: bastion_server - become: true - tasks: - -# - name: Set mastersSchedulable parameter to False -# replace: -# path: /ocpinst/manifests/cluster-scheduler-02-config.yml -# regexp: ': true' -# replace: ': false' - - - name: Create Ignition files - ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ - become: yes - - - name: create Ignition directory on webserver - file: - path: /var/www/html/ignition - state: directory - - - name: Copy bootstrap Ignition file to web server - copy: - src: /ocpinst/bootstrap.ign - dest: /var/www/html/ignition - remote_src: yes - - - name: Copy control plane Ignition file to web server - copy: - src: /ocpinst/master.ign - dest: /var/www/html/ignition - remote_src: yes - - - name: Copy worker Ignition file to web server - copy: - src: /ocpinst/worker.ign - dest: /var/www/html/ignition - remote_src: yes - diff --git a/roles/bastion_server/tasks/get-images.yaml b/roles/bastion_server/tasks/get-images.yaml deleted file mode 100644 index 9563e17b..00000000 --- a/roles/bastion_server/tasks/get-images.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - -hosts: kvm_host -become: true -tasks: - -- name: download RHCOS initramfs - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/lib/libvirt/images - mode: 0755 - -- name: download RHCOS kernel - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/lib/libvirt/images - mode: 0755 - -- name: download RHCOS rootfs - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/lib/libvirt/images - mode: 0755 - -- name: download QCOW2 image - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - dest: /var/lib/libvirt/images - mode: 0755 - diff --git a/roles/bastion_server/tasks/get-ocp.yaml b/roles/bastion_server/tasks/get-ocp.yaml deleted file mode 100644 index 5b42a9e5..00000000 --- a/roles/bastion_server/tasks/get-ocp.yaml +++ /dev/null @@ -1,94 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - - - name: create OCP download landing directory - file: - path: /ocpinst/ - state: directory - - - name: Unzip OCP Client - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz - dest: /ocpinst/ - remote_src: yes - - - name: Unzip OCP Installer - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz - dest: /ocpinst/ - remote_src: yes - - - name: Copy kubectl file - ansible.builtin.copy: - src: /ocpinst/kubectl - dest: /usr/local/bin/kubectl - remote_src: yes - owner: root - group: root - mode: '0755' - - - name: Copy oc file - ansible.builtin.copy: - src: /ocpinst/oc - dest: /usr/local/bin/oc - remote_src: yes - owner: root - group: root - mode: '0755' - - - name: Copy openshift-install file - ansible.builtin.copy: - src: /ocpinst/openshift-install - dest: /usr/local/bin/openshift-install - remote_src: yes - owner: root - group: root - mode: '0755' - - - name: Copy install-config.yaml to ocp install directory - copy: - src: install-config.yaml - dest: /ocpinst/install-config.yaml - - - name: Create Manifests - command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ - become: yes - - - name: Set mastersSchedulable parameter to False - replace: - path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: ': true' - replace: ': false' - - - name: Create Ignition files - command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ - become: yes - - - name: create Ignition directory on webserver - file: - path: /var/www/html/ignition - state: directory - - - name: Copy bootstrap Ignition file to web server - copy: - src: /ocpinst/bootstrap.ign - dest: /var/www/html/ignition - remote_src: yes - - - name: Copy control plane Ignition file to web server - copy: - src: /ocpinst/master.ign - dest: /var/www/html/ignition - remote_src: yes - - - name: Copy compute Ignition file to web server - copy: - src: /ocpinst/worker.ign - dest: /var/www/html/ignition - remote_src: yes - - - diff --git a/roles/bastion_server/tasks/http_setup.yaml b/roles/bastion_server/tasks/http_setup.yaml deleted file mode 100644 index 313fab32..00000000 --- a/roles/bastion_server/tasks/http_setup.yaml +++ /dev/null @@ -1,101 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - - ## - name: update repository index - ## dnf: - ## update_cache: yes - - ## - name: install httpd - ## dnf: - ## name: httpd - ## state: latest - - - name: Ensure the default Apache port is 8080 - replace: - path: /etc/httpd/conf/httpd.conf - regexp: '^Listen 80' - replace: 'Listen 8080' - backup: yes - - - name: Ensure the SSL default port is 4443 - replace: - path: /etc/httpd/conf.d/ssl.conf - regexp: '^Listen 443 https' - replace: 'Listen 4443 https' - backup: yes - - - name: restart httpd to reflect changes to port - service: - name: httpd - state: restarted - -# - name: Allow all access to tcp port 8080 -# community.general.ufw: -# rule: allow -# port: '8080' -# proto: tcp -# -# - name: Allow all access to tcp port 80 -# community.general.ufw: -# rule: allow -# port: '80' -# proto: tcp -# -# - name: Allow all access to tcp port 443 -# community.general.ufw: -# rule: allow -# port: '443' -# proto: tcp -# -# - name: Allow all access to tcp port 4443 -# community.general.ufw: -# rule: allow -# port: '4443' -# proto: tcp - - - name: create directory bin for mirrors - file: - path: /var/www/html/bin - state: directory - mode: '0755' - - - name: create directory bootstrap for mirrors - file: - path: /var/www/html/bootstrap - state: directory - mode: '0755' - - - name: get mirrors 1 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - - - name: get mirrors 2 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - - - name: get mirrors 3 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - - - name: check to make sure httpd is started - service: - name: httpd - state: started - - - name: check httpd status - service: - state: started - name: httpd - diff --git a/roles/bastion_server/tasks/load_balancer.yaml b/roles/bastion_server/tasks/load_balancer.yaml deleted file mode 100644 index acfde8ff..00000000 --- a/roles/bastion_server/tasks/load_balancer.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - -# required plugin: ansible-galaxy collection install community.general - -##- name: install haproxy -## dnf: -## - haproxy - - - name: move haproxy config file to bastion - copy: - src: haproxy.cfg - dest: /etc/haproxy/haproxy.cfg - force: yes - backup: yes - - - name: enable haproxy - systemd: - state: enabled - named: haproxy - - - name: Start haproxy - systemd: - state: restarted - name: haproxy diff --git a/roles/bastion_server/tasks/main.yaml b/roles/bastion_server/tasks/main.yaml deleted file mode 100644 index 0d6d8024..00000000 --- a/roles/bastion_server/tasks/main.yaml +++ /dev/null @@ -1,236 +0,0 @@ -#This is the main task book for the bastion server to set up the load balancer, http server, and download OCP install and ignition files - -# required plugin: ansible-galaxy collection install community.general - -#- name: install haproxy -# dnf: -# - haproxy - -- name: move haproxy config file to bastion - tags: bastion - copy: - src: haproxy.cfg - dest: /etc/haproxy/haproxy.cfg - force: yes - backup: yes - -- name: enable haproxy - tags: bastion - systemd: - state: enabled - named: haproxy - -- name: Start haproxy - tags: bastion - systemd: - state: restarted - name: haproxy - -## - name: update repository index -## dnf: -## update_cache: yes - -## - name: install httpd -## dnf: -## name: httpd -## state: latest - -- name: Ensure the default Apache port is 8080 - tags: bastion - replace: - path: /etc/httpd/conf/httpd.conf - regexp: '^Listen 80' - replace: 'Listen 8080' - backup: yes - -- name: Ensure the SSL default port is 4443 - tags: bastion - replace: - path: /etc/httpd/conf.d/ssl.conf - regexp: '^Listen 443 https' - replace: 'Listen 4443 https' - backup: yes - -- name: restart httpd to reflect changes to port - tags: bastion - service: - name: httpd - state: restarted - -# - name: Allow all access to tcp port 8080 -# tags: bastion -# community.general.ufw: -# rule: allow -# port: '8080' -# proto: tcp -# -# - name: Allow all access to tcp port 80 -# tags: bastion -# community.general.ufw: -# rule: allow -# port: '80' -# proto: tcp -# -# - name: Allow all access to tcp port 443 -# tags: bastion -# community.general.ufw: -# rule: allow -# port: '443' -# proto: tcp -# -# - name: Allow all access to tcp port 4443 -# tags: bastion -# community.general.ufw: -# rule: allow -# port: '4443' -# proto: tcp - -- name: create directory bin for mirrors - tags: bastion - file: - path: /var/www/html/bin - state: directory - mode: '0755' - -- name: create directory bootstrap for mirrors - tags: bastion - file: - path: /var/www/html/bootstrap - state: directory - mode: '0755' - -- name: get mirrors 1 - tags: bastion - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - -- name: get mirrors 2 - tags: bastion - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - -- name: get mirrors 3 - tags: bastion - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - -- name: check to make sure httpd is started - tags: bastion - service: - name: httpd - state: started - -- name: check httpd status - tags: bastion - service: - state: started - name: httpd - -- name: create OCP download landing directory - tags: bastion - file: - path: /ocpinst/ - state: directory - -- name: Unzip OCP Client - tags: bastion - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz - dest: /ocpinst/ - remote_src: yes - -- name: Unzip OCP Installer - tags: bastion - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz - dest: /ocpinst/ - remote_src: yes - -- name: Copy kubectl file - tags: bastion - ansible.builtin.copy: - src: /ocpinst/kubectl - dest: /usr/local/bin/kubectl - remote_src: yes - owner: root - group: root - mode: '0755' - -- name: Copy oc file - tags: bastion - ansible.builtin.copy: - src: /ocpinst/oc - dest: /usr/local/bin/oc - remote_src: yes - owner: root - group: root - mode: '0755' - -- name: Copy openshift-install file - tags: bastion - ansible.builtin.copy: - src: /ocpinst/openshift-install - dest: /usr/local/bin/openshift-install - remote_src: yes - owner: root - group: root - mode: '0755' - -- name: Copy install-config.yaml to ocp install directory - tags: bastion - copy: - src: install-config.yaml - dest: /ocpinst/install-config.yaml - -- name: Create Manifests - tags: bastion - command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ - become: yes - -- name: Set mastersSchedulable parameter to False - tags: bastion - replace: - path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: ': true' - replace: ': false' - -- name: Create Ignition files - tags: bastion - command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ - become: yes - -- name: create Ignition directory on webserver - tags: bastion - file: - path: /var/www/html/ignition - state: directory - -- name: Copy bootstrap Ignition file to web server - tags: bastion - copy: - src: /ocpinst/bootstrap.ign - dest: /var/www/html/ignition - remote_src: yes - -- name: Copy control plane Ignition file to web server - tags: bastion - copy: - src: /ocpinst/master.ign - dest: /var/www/html/ignition - remote_src: yes - -- name: Copy worker Ignition file to web server - tags: bastion - copy: - src: /ocpinst/worker.ign - dest: /var/www/html/ignition - remote_src: yes diff --git a/roles/bastion_server/templates/install-config.yaml b/roles/bastion_server/templates/install-config.yaml deleted file mode 100644 index c9134ad7..00000000 --- a/roles/bastion_server/templates/install-config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -baseDomain: ocpz.wsclab.endicott.ibm.com -compute: -- hyperthreading: Enabled - name: worker - replicas: 0 - architecture : s390x -controlPlane: - hyperthreading: Enabled - name: master - replicas: 3 - architecture : s390x -metadata: - name: distribution -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN - serviceNetwork: - - 172.30.0.0/16 -platform: - none: {} -fips: false -pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDppZ1cEGLhC3T9rvex+ZTVq4V1IKgSUYclRkZPNFPyrMjUw+scSVw5O4wlgnhZF2ZB6XdTPKUOolvNv1TH6YdtMOulAsJ6mvs2KnRW2Sd1jdw9dJ3ijkMOAz2CBHvt9q4r0u+87WaAAMlGvHAEqCJvBawPTtYkNSXTDp7dagb2wt8+/j3HI+em7LSY4obNtYmXHsQLiEYSPQMcalHPmbgpTLerST19x214DiDCfHHkk2ek+BEdtgQEJNIc6ufyrwk6KUVv7MCgCyQB2o5r3G/KX31Va3hyRRAca1MDI3Ee+C5XpYfPZvJRhL/FKa2FRATffl7Kn3zZLH2ZtOV4Cs2zebjX0dOGUipyjnf/MacsTIeKK7Bm91IPgqgTSlcZbiIcYXoGSIO0u2pytzPJTUywmPvy2AP/NnPZn+NR39Zf/DYys8vWVPRWOKbJBSHDO0yikzH1xUt62+XS5Kl6kmPsQz3VEghA/lvVJ6KpW5PSgFXMkn8e+CcHV+bIO2zc5oc= root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file diff --git a/roles/bootstrap_server/.DS_Store b/roles/bootstrap_server/.DS_Store deleted file mode 100644 index b50ed442a4efc075cb023c35ad50912059289d56..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKJ5Iwu5S@X5EYY~6+>SaoFp(*c=zIZiB7%eyM7M%Q7fPPdZr@aN`MtG4I&obL~QQ9n&GK> zSU2r@iK^f}Bp3(=f`MQl7}%QuKR2?Iy)z;(7zhUb3j?x0Bs9Uou`|@I1D!qrfD@Qi zpv%Qsa*|`=*crkDVM_&CD*F?IEgkmcafM@NXz9fM_+X#;RI^{ul#1DXVgZkMgti)91-qo6xS&L?o^g1p<9=3BW-1 fkyCY=KZ%aG!m%@yDq`1kU|a;0km!PeA7J1e0y8^y diff --git a/roles/bootstrap_server/tasks/bootstrap_verify.yaml b/roles/bootstrap_server/tasks/bootstrap_verify.yaml deleted file mode 100644 index 993c10b3..00000000 --- a/roles/bootstrap_server/tasks/bootstrap_verify.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- name: connect bootstrap - command: virsh console bootstrap - -- name: Verify bootstrap install process until complete - command: journalctl -u bootkube.service - register: result - until: result.stdout.find("bootkube.service complete") != -1 - retries: 100 - delay: 300 diff --git a/roles/control_nodes/.DS_Store b/roles/control_nodes/.DS_Store deleted file mode 100644 index 78fb190302cbe70ee2c1103346ff465fe5b0e274..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKOG*Pl5UtV(18%Z(*;fd7fH6!Kg1cNGCL?AslL?CZIY}<#8C-h;&!De9N``5{ zm55Y9^{ejsbkBq74-xVFsh$x{iKu`IvKSeW;jHV#gU^7hH5$632YS42x=p6gUmTLP z7qq2Y+EGnM>t8qB8a^TB4SM;m0xJ{W+|QPquHCEGno-TRt17Qq)UPjx)y?x&u2$RC zKZ@ILM{^!cA7(HR3?0TIG=CBsaiwE#C{<)!(}8gjP(q>!27ZBo55y5YxBvhE diff --git a/roles/kvm_host/tasks/create_bastion.yaml b/roles/kvm_host/tasks/create_bastion.yaml deleted file mode 100644 index 79c9af24..00000000 --- a/roles/kvm_host/tasks/create_bastion.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: virtualize bastion server - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - - - name: start bastion install - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/roles/kvm_host/tasks/create_bootstrap.yaml b/roles/kvm_host/tasks/create_bootstrap.yaml deleted file mode 100644 index 1fc4728d..00000000 --- a/roles/kvm_host/tasks/create_bootstrap.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: virtualize bootstrap - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - - - name: boot bootstrap - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - diff --git a/roles/kvm_host/tasks/create_nodes.yaml b/roles/kvm_host/tasks/create_nodes.yaml deleted file mode 100644 index 9f3d9389..00000000 --- a/roles/kvm_host/tasks/create_nodes.yaml +++ /dev/null @@ -1,61 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: virtualize control-0 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G - - - name: virtualize control-1 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G - - - name: virtualize control-2 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G - - - name: virtualize compute-0 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G - - - name: virtualize compute-1 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - - - name: install CoreOS on control-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on control-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on control-2 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on compute-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on compute-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images diff --git a/roles/kvm_host/tasks/define_macvtap.yaml b/roles/kvm_host/tasks/define_macvtap.yaml deleted file mode 100644 index 57a66471..00000000 --- a/roles/kvm_host/tasks/define_macvtap.yaml +++ /dev/null @@ -1,24 +0,0 @@ -## Playbook works, need to change absolute path to relative or variable. Need to move to roles file for kvm_host ---- - -- hosts: kvm_host - become: true - tasks: - - - name: Set up macvtap bridge - community.libvirt.virt_net: - command: define - name: macvtap-net - autostart: true - xml: "{{ lookup ('template', '/home/jacob/.git/Ansible-OpenShift-Provisioning/roles/kvm_host/kvm_host_s90x/templates/macvtap.xml.j2') }}" - - - name: Start macvtap-net - community.libvirt.virt_net: - autostart: yes - command: start - name: macvtap-net - - - name: Set autostart for macvtap-net - community.libvirt.virt_net: - autostart: yes - name: macvtap-net diff --git a/roles/kvm_host/tasks/main.yaml b/roles/kvm_host/tasks/main.yaml deleted file mode 100644 index 8c3c8296..00000000 --- a/roles/kvm_host/tasks/main.yaml +++ /dev/null @@ -1,20 +0,0 @@ -- name: Set up macvtap bridge - tags: kvmhost - community.libvirt.virt_net: - command: define - name: macvtap-net - autostart: true - xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" - -- name: Start macvtap-net - tags: kvmhost - community.libvirt.virt_net: - autostart: yes - command: start - name: macvtap-net - -- name: Set autostart for macvtap-net - tags: kvmhost - community.libvirt.virt_net: - autostart: yes - name: macvtap-net diff --git a/roles/kvm_host/tasks/prep_kvm_guests.yaml b/roles/kvm_host/tasks/prep_kvm_guests.yaml deleted file mode 100644 index 75abf5aa..00000000 --- a/roles/kvm_host/tasks/prep_kvm_guests.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: get rhcos qcow2 files - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - dest: /var/lib/libvirt/images/ - - - - name: Unzip OCP dependencies - command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz diff --git a/roles/kvm_host/templates/macvtap.xml.j2 b/roles/kvm_host/templates/macvtap.xml.j2 deleted file mode 100644 index 388477ea..00000000 --- a/roles/kvm_host/templates/macvtap.xml.j2 +++ /dev/null @@ -1,6 +0,0 @@ - - macvtap-net - - - - diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index beaca344..f9fb4a29 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,19 +1,11 @@ --- -#- name: distribute the ssh key to a remote host -# tags: ssh,ssh-copy-id -# shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" -# register: ssh_copy_id_execution +- name: distribute the ssh key to a remote host + tags: ssh,ssh-copy-id + shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" + register: ssh_copy_id_execution -#- name: Print results of copying ssh id to remote host. -# tags: ssh,ssh-copy-id -# debug: -# var: ssh_copy_id_execution - -- name: copy ssh key to remote host - tags: ssh-copy-id - copy: - src: ~/.ssh/{{ env_ssh_ans_name }}.pub - dest: ~/.ssh/{{ env_ssh_ans_name }}.pub - mode: '644' - owner: root \ No newline at end of file +- name: Print results of copying ssh id to remote host. + tags: ssh,ssh-copy-id + debug: + var: ssh_copy_id_execution \ No newline at end of file diff --git a/roles/worker_nodes/.DS_Store b/roles/worker_nodes/.DS_Store deleted file mode 100644 index 1fbd689229062191b69aea2e8fd10736d076ce3d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%}T>S5T4Z*f*$PAi;yP}`UbJYgCJh|0=11=F*U_1UOePA1o0Jo0FOR{UVW50 zf3%zKsyC4`12f-dcV>6y3)#&Afa~2AU4RY%B&dX~Ei@6Kane9Gv}Xs=@f@?GvKkG` zY8d!hhcRFb{67Zh+ik-+TtW$F;D5igB^;pc&sjKi&$GPf_D9uZ%Kc)z(Mgr(x(_6; zCm)Z4<2RbC;ZgkJF&zghBrt|kxQ2qWy$SMt!8QDR1OG_Yfsdbi@%jF&xkvn#75wBE zsiO_ISjP;mRx5COD;0R2k=(seKkJA4`O|{&#Y&TEnQz=9lBv8|v+KwJp4lYHwxC&K zz!)$F77Wn-kfIW1inSnb9ccIpfY`!r7Hrdx1I46@nPM%7BPdQn5hc{=6~jq5_NVHX zDb|7#4o)v0PFHq%Lvg%1&Y!Y7xJ=NjF<=Zd8Q7G^4xRsp-{1e6LH1+}7z1m?fNSOb zyoV*}*%}l_XKji)KqVo+T98A~=yA*qI*NBuS#T_+24bdI3t|t7{Rl7(W{iP9W#9|1 CY>%%1 From 7c96cbc157ba4608e515f73171da239dc9d86801 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Mon, 30 Aug 2021 19:09:34 -0500 Subject: [PATCH 390/885] Deleted deprecated roles directories --- roles/bastion_server/.DS_Store | Bin 6148 -> 0 bytes .../bastion_server/tasks/connect_cluster.yaml | 12 - .../tasks/fill_install_config.yaml | 44 ---- roles/bastion_server/tasks/fix-sched.yaml | 37 --- roles/bastion_server/tasks/get-images.yaml | 30 --- roles/bastion_server/tasks/get-ocp.yaml | 94 ------- roles/bastion_server/tasks/http_setup.yaml | 101 -------- roles/bastion_server/tasks/load_balancer.yaml | 28 --- roles/bastion_server/tasks/main.yaml | 236 ------------------ .../templates/install-config.yaml | 26 -- roles/bootstrap_server/.DS_Store | Bin 6148 -> 0 bytes .../tasks/bootstrap_verify.yaml | 11 - roles/control_nodes/.DS_Store | Bin 6148 -> 0 bytes roles/kvm_host/tasks/create_bastion.yaml | 11 - roles/kvm_host/tasks/create_bootstrap.yaml | 15 -- roles/kvm_host/tasks/create_nodes.yaml | 61 ----- roles/kvm_host/tasks/define_macvtap.yaml | 24 -- roles/kvm_host/tasks/main.yaml | 20 -- roles/kvm_host/tasks/prep_kvm_guests.yaml | 14 -- roles/kvm_host/templates/macvtap.xml.j2 | 6 - roles/ssh_copy_id/tasks/main.yaml | 24 +- roles/worker_nodes/.DS_Store | Bin 6148 -> 0 bytes 22 files changed, 8 insertions(+), 786 deletions(-) delete mode 100644 roles/bastion_server/.DS_Store delete mode 100644 roles/bastion_server/tasks/connect_cluster.yaml delete mode 100644 roles/bastion_server/tasks/fill_install_config.yaml delete mode 100644 roles/bastion_server/tasks/fix-sched.yaml delete mode 100644 roles/bastion_server/tasks/get-images.yaml delete mode 100644 roles/bastion_server/tasks/get-ocp.yaml delete mode 100644 roles/bastion_server/tasks/http_setup.yaml delete mode 100644 roles/bastion_server/tasks/load_balancer.yaml delete mode 100644 roles/bastion_server/tasks/main.yaml delete mode 100644 roles/bastion_server/templates/install-config.yaml delete mode 100644 roles/bootstrap_server/.DS_Store delete mode 100644 roles/bootstrap_server/tasks/bootstrap_verify.yaml delete mode 100644 roles/control_nodes/.DS_Store delete mode 100644 roles/kvm_host/tasks/create_bastion.yaml delete mode 100644 roles/kvm_host/tasks/create_bootstrap.yaml delete mode 100644 roles/kvm_host/tasks/create_nodes.yaml delete mode 100644 roles/kvm_host/tasks/define_macvtap.yaml delete mode 100644 roles/kvm_host/tasks/main.yaml delete mode 100644 roles/kvm_host/tasks/prep_kvm_guests.yaml delete mode 100644 roles/kvm_host/templates/macvtap.xml.j2 delete mode 100644 roles/worker_nodes/.DS_Store diff --git a/roles/bastion_server/.DS_Store b/roles/bastion_server/.DS_Store deleted file mode 100644 index 1fbd689229062191b69aea2e8fd10736d076ce3d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%}T>S5T4Z*f*$PAi;yP}`UbJYgCJh|0=11=F*U_1UOePA1o0Jo0FOR{UVW50 zf3%zKsyC4`12f-dcV>6y3)#&Afa~2AU4RY%B&dX~Ei@6Kane9Gv}Xs=@f@?GvKkG` zY8d!hhcRFb{67Zh+ik-+TtW$F;D5igB^;pc&sjKi&$GPf_D9uZ%Kc)z(Mgr(x(_6; zCm)Z4<2RbC;ZgkJF&zghBrt|kxQ2qWy$SMt!8QDR1OG_Yfsdbi@%jF&xkvn#75wBE zsiO_ISjP;mRx5COD;0R2k=(seKkJA4`O|{&#Y&TEnQz=9lBv8|v+KwJp4lYHwxC&K zz!)$F77Wn-kfIW1inSnb9ccIpfY`!r7Hrdx1I46@nPM%7BPdQn5hc{=6~jq5_NVHX zDb|7#4o)v0PFHq%Lvg%1&Y!Y7xJ=NjF<=Zd8Q7G^4xRsp-{1e6LH1+}7z1m?fNSOb zyoV*}*%}l_XKji)KqVo+T98A~=yA*qI*NBuS#T_+24bdI3t|t7{Rl7(W{iP9W#9|1 CY>%%1 diff --git a/roles/bastion_server/tasks/connect_cluster.yaml b/roles/bastion_server/tasks/connect_cluster.yaml deleted file mode 100644 index 82c0ca2f..00000000 --- a/roles/bastion_server/tasks/connect_cluster.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - - - name: create csr-name variable - command: oc get csr - register: csr-name - - - name: approve all certificate signing requests - command: oc adm certificate approve {{ csr-name }} diff --git a/roles/bastion_server/tasks/fill_install_config.yaml b/roles/bastion_server/tasks/fill_install_config.yaml deleted file mode 100644 index 55e817d6..00000000 --- a/roles/bastion_server/tasks/fill_install_config.yaml +++ /dev/null @@ -1,44 +0,0 @@ -##Needs variables from host_vars for baseDomain, cluster_name, pullsecret, and ssh-public-key. -##I think it also needs cidr (pod's IP range) and service network IP range. -##Ensure PATHs are correct ---- - -- hosts: bastion_server - become: true - tasks: - - - name: create install-config.yaml - file: - path: "~/files/install-config.yaml" - state: touch - - - name: Fill contents of install-config.yaml file - copy: - dest: "~/files/install-config.yaml" - content: | - apiVersion: v1 - baseDomain: ocpz.wsclab.endicott.ibm.com - compute: - - architecture: s390x - hyperthreading: Enabled - name: worker - replicas: 0 - controlPlane: - architecture: s390x - hyperthreading: Enabled - name: master - replicas: 3 - metadata: - name: distribution - networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN - serviceNetwork: - - 172.30.0.0/16 - platform: - none: {} - fips: false - pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' - sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDudjhbUkA7ekvuzZe7WpkkF2KOoLGwWU05Ch6Ty0OWDW0vdbtrG7TIgy8FYOvBnArnIJcy1AP6tpH6BcbmHRIosVFbZ3GrLkKEOroBD+8+6qGzN+fIXBzss20iRvMvDdlDaVJ1BqmTvNnkbeWdgCg1MbICQ1OYOExm+xgvGWHOjoJkUzkw3oBnAnpZHHTkvM2dY0SNtzV9i5FcSvdBM2kindvBpvDZ9PjTGkcFMnbVRpBXUV+axNg4yuX0YWFDWOFlQ4DjvasdN6jMQ2MrWBYtYF1RAklsOhSXTPKAYaxZZsfeEBnMVUFshaDYy7PlbCXcUEVEFEkMlTAbe//bBXx5l+2fVFtOUyhfws28bLg+rlJLivINoLW4EB+ERViYBTCEx5njl9mK9EcPpCJlLTR82kr8vEwE/sJ9ro3dH/aEUQcTkVYqlOCw2lsFZpkZtthArknZiHk91OtWgXj4GsAjihehfBvEL/GbEuO0HTzvl+QiTnC+mL4KcObl31K9LdlAbKA53l6G68py0u7plUervFN8H7rIFbv4ndUTAo2rODty/QoYH8jBu460LLgLyaRfaDFrGitnAdSiCS/8dEr8iAVxj5X5sgcTQlsJYyvMv61YCzItmlipDpbf+M/mJyJuxyuYWqudItOIWchFktstcpp3dx1iM/pr0o4HitpeQ== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' diff --git a/roles/bastion_server/tasks/fix-sched.yaml b/roles/bastion_server/tasks/fix-sched.yaml deleted file mode 100644 index 17cfef11..00000000 --- a/roles/bastion_server/tasks/fix-sched.yaml +++ /dev/null @@ -1,37 +0,0 @@ -- hosts: bastion_server - become: true - tasks: - -# - name: Set mastersSchedulable parameter to False -# replace: -# path: /ocpinst/manifests/cluster-scheduler-02-config.yml -# regexp: ': true' -# replace: ': false' - - - name: Create Ignition files - ansible.builtin.command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ - become: yes - - - name: create Ignition directory on webserver - file: - path: /var/www/html/ignition - state: directory - - - name: Copy bootstrap Ignition file to web server - copy: - src: /ocpinst/bootstrap.ign - dest: /var/www/html/ignition - remote_src: yes - - - name: Copy control plane Ignition file to web server - copy: - src: /ocpinst/master.ign - dest: /var/www/html/ignition - remote_src: yes - - - name: Copy worker Ignition file to web server - copy: - src: /ocpinst/worker.ign - dest: /var/www/html/ignition - remote_src: yes - diff --git a/roles/bastion_server/tasks/get-images.yaml b/roles/bastion_server/tasks/get-images.yaml deleted file mode 100644 index 9563e17b..00000000 --- a/roles/bastion_server/tasks/get-images.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - -hosts: kvm_host -become: true -tasks: - -- name: download RHCOS initramfs - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/lib/libvirt/images - mode: 0755 - -- name: download RHCOS kernel - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/lib/libvirt/images - mode: 0755 - -- name: download RHCOS rootfs - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/lib/libvirt/images - mode: 0755 - -- name: download QCOW2 image - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - dest: /var/lib/libvirt/images - mode: 0755 - diff --git a/roles/bastion_server/tasks/get-ocp.yaml b/roles/bastion_server/tasks/get-ocp.yaml deleted file mode 100644 index 5b42a9e5..00000000 --- a/roles/bastion_server/tasks/get-ocp.yaml +++ /dev/null @@ -1,94 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - - - name: create OCP download landing directory - file: - path: /ocpinst/ - state: directory - - - name: Unzip OCP Client - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz - dest: /ocpinst/ - remote_src: yes - - - name: Unzip OCP Installer - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz - dest: /ocpinst/ - remote_src: yes - - - name: Copy kubectl file - ansible.builtin.copy: - src: /ocpinst/kubectl - dest: /usr/local/bin/kubectl - remote_src: yes - owner: root - group: root - mode: '0755' - - - name: Copy oc file - ansible.builtin.copy: - src: /ocpinst/oc - dest: /usr/local/bin/oc - remote_src: yes - owner: root - group: root - mode: '0755' - - - name: Copy openshift-install file - ansible.builtin.copy: - src: /ocpinst/openshift-install - dest: /usr/local/bin/openshift-install - remote_src: yes - owner: root - group: root - mode: '0755' - - - name: Copy install-config.yaml to ocp install directory - copy: - src: install-config.yaml - dest: /ocpinst/install-config.yaml - - - name: Create Manifests - command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ - become: yes - - - name: Set mastersSchedulable parameter to False - replace: - path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: ': true' - replace: ': false' - - - name: Create Ignition files - command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ - become: yes - - - name: create Ignition directory on webserver - file: - path: /var/www/html/ignition - state: directory - - - name: Copy bootstrap Ignition file to web server - copy: - src: /ocpinst/bootstrap.ign - dest: /var/www/html/ignition - remote_src: yes - - - name: Copy control plane Ignition file to web server - copy: - src: /ocpinst/master.ign - dest: /var/www/html/ignition - remote_src: yes - - - name: Copy compute Ignition file to web server - copy: - src: /ocpinst/worker.ign - dest: /var/www/html/ignition - remote_src: yes - - - diff --git a/roles/bastion_server/tasks/http_setup.yaml b/roles/bastion_server/tasks/http_setup.yaml deleted file mode 100644 index 313fab32..00000000 --- a/roles/bastion_server/tasks/http_setup.yaml +++ /dev/null @@ -1,101 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - - ## - name: update repository index - ## dnf: - ## update_cache: yes - - ## - name: install httpd - ## dnf: - ## name: httpd - ## state: latest - - - name: Ensure the default Apache port is 8080 - replace: - path: /etc/httpd/conf/httpd.conf - regexp: '^Listen 80' - replace: 'Listen 8080' - backup: yes - - - name: Ensure the SSL default port is 4443 - replace: - path: /etc/httpd/conf.d/ssl.conf - regexp: '^Listen 443 https' - replace: 'Listen 4443 https' - backup: yes - - - name: restart httpd to reflect changes to port - service: - name: httpd - state: restarted - -# - name: Allow all access to tcp port 8080 -# community.general.ufw: -# rule: allow -# port: '8080' -# proto: tcp -# -# - name: Allow all access to tcp port 80 -# community.general.ufw: -# rule: allow -# port: '80' -# proto: tcp -# -# - name: Allow all access to tcp port 443 -# community.general.ufw: -# rule: allow -# port: '443' -# proto: tcp -# -# - name: Allow all access to tcp port 4443 -# community.general.ufw: -# rule: allow -# port: '4443' -# proto: tcp - - - name: create directory bin for mirrors - file: - path: /var/www/html/bin - state: directory - mode: '0755' - - - name: create directory bootstrap for mirrors - file: - path: /var/www/html/bootstrap - state: directory - mode: '0755' - - - name: get mirrors 1 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - - - name: get mirrors 2 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - - - name: get mirrors 3 - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - - - name: check to make sure httpd is started - service: - name: httpd - state: started - - - name: check httpd status - service: - state: started - name: httpd - diff --git a/roles/bastion_server/tasks/load_balancer.yaml b/roles/bastion_server/tasks/load_balancer.yaml deleted file mode 100644 index acfde8ff..00000000 --- a/roles/bastion_server/tasks/load_balancer.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- - -- hosts: bastion_server - become: true - tasks: - -# required plugin: ansible-galaxy collection install community.general - -##- name: install haproxy -## dnf: -## - haproxy - - - name: move haproxy config file to bastion - copy: - src: haproxy.cfg - dest: /etc/haproxy/haproxy.cfg - force: yes - backup: yes - - - name: enable haproxy - systemd: - state: enabled - named: haproxy - - - name: Start haproxy - systemd: - state: restarted - name: haproxy diff --git a/roles/bastion_server/tasks/main.yaml b/roles/bastion_server/tasks/main.yaml deleted file mode 100644 index 0d6d8024..00000000 --- a/roles/bastion_server/tasks/main.yaml +++ /dev/null @@ -1,236 +0,0 @@ -#This is the main task book for the bastion server to set up the load balancer, http server, and download OCP install and ignition files - -# required plugin: ansible-galaxy collection install community.general - -#- name: install haproxy -# dnf: -# - haproxy - -- name: move haproxy config file to bastion - tags: bastion - copy: - src: haproxy.cfg - dest: /etc/haproxy/haproxy.cfg - force: yes - backup: yes - -- name: enable haproxy - tags: bastion - systemd: - state: enabled - named: haproxy - -- name: Start haproxy - tags: bastion - systemd: - state: restarted - name: haproxy - -## - name: update repository index -## dnf: -## update_cache: yes - -## - name: install httpd -## dnf: -## name: httpd -## state: latest - -- name: Ensure the default Apache port is 8080 - tags: bastion - replace: - path: /etc/httpd/conf/httpd.conf - regexp: '^Listen 80' - replace: 'Listen 8080' - backup: yes - -- name: Ensure the SSL default port is 4443 - tags: bastion - replace: - path: /etc/httpd/conf.d/ssl.conf - regexp: '^Listen 443 https' - replace: 'Listen 4443 https' - backup: yes - -- name: restart httpd to reflect changes to port - tags: bastion - service: - name: httpd - state: restarted - -# - name: Allow all access to tcp port 8080 -# tags: bastion -# community.general.ufw: -# rule: allow -# port: '8080' -# proto: tcp -# -# - name: Allow all access to tcp port 80 -# tags: bastion -# community.general.ufw: -# rule: allow -# port: '80' -# proto: tcp -# -# - name: Allow all access to tcp port 443 -# tags: bastion -# community.general.ufw: -# rule: allow -# port: '443' -# proto: tcp -# -# - name: Allow all access to tcp port 4443 -# tags: bastion -# community.general.ufw: -# rule: allow -# port: '4443' -# proto: tcp - -- name: create directory bin for mirrors - tags: bastion - file: - path: /var/www/html/bin - state: directory - mode: '0755' - -- name: create directory bootstrap for mirrors - tags: bastion - file: - path: /var/www/html/bootstrap - state: directory - mode: '0755' - -- name: get mirrors 1 - tags: bastion - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - -- name: get mirrors 2 - tags: bastion - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - -- name: get mirrors 3 - tags: bastion - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/www/html/bin - remote_src: yes - mode: '0755' - -- name: check to make sure httpd is started - tags: bastion - service: - name: httpd - state: started - -- name: check httpd status - tags: bastion - service: - state: started - name: httpd - -- name: create OCP download landing directory - tags: bastion - file: - path: /ocpinst/ - state: directory - -- name: Unzip OCP Client - tags: bastion - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz - dest: /ocpinst/ - remote_src: yes - -- name: Unzip OCP Installer - tags: bastion - ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz - dest: /ocpinst/ - remote_src: yes - -- name: Copy kubectl file - tags: bastion - ansible.builtin.copy: - src: /ocpinst/kubectl - dest: /usr/local/bin/kubectl - remote_src: yes - owner: root - group: root - mode: '0755' - -- name: Copy oc file - tags: bastion - ansible.builtin.copy: - src: /ocpinst/oc - dest: /usr/local/bin/oc - remote_src: yes - owner: root - group: root - mode: '0755' - -- name: Copy openshift-install file - tags: bastion - ansible.builtin.copy: - src: /ocpinst/openshift-install - dest: /usr/local/bin/openshift-install - remote_src: yes - owner: root - group: root - mode: '0755' - -- name: Copy install-config.yaml to ocp install directory - tags: bastion - copy: - src: install-config.yaml - dest: /ocpinst/install-config.yaml - -- name: Create Manifests - tags: bastion - command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ - become: yes - -- name: Set mastersSchedulable parameter to False - tags: bastion - replace: - path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: ': true' - replace: ': false' - -- name: Create Ignition files - tags: bastion - command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ - become: yes - -- name: create Ignition directory on webserver - tags: bastion - file: - path: /var/www/html/ignition - state: directory - -- name: Copy bootstrap Ignition file to web server - tags: bastion - copy: - src: /ocpinst/bootstrap.ign - dest: /var/www/html/ignition - remote_src: yes - -- name: Copy control plane Ignition file to web server - tags: bastion - copy: - src: /ocpinst/master.ign - dest: /var/www/html/ignition - remote_src: yes - -- name: Copy worker Ignition file to web server - tags: bastion - copy: - src: /ocpinst/worker.ign - dest: /var/www/html/ignition - remote_src: yes diff --git a/roles/bastion_server/templates/install-config.yaml b/roles/bastion_server/templates/install-config.yaml deleted file mode 100644 index c9134ad7..00000000 --- a/roles/bastion_server/templates/install-config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -baseDomain: ocpz.wsclab.endicott.ibm.com -compute: -- hyperthreading: Enabled - name: worker - replicas: 0 - architecture : s390x -controlPlane: - hyperthreading: Enabled - name: master - replicas: 3 - architecture : s390x -metadata: - name: distribution -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN - serviceNetwork: - - 172.30.0.0/16 -platform: - none: {} -fips: false -pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDppZ1cEGLhC3T9rvex+ZTVq4V1IKgSUYclRkZPNFPyrMjUw+scSVw5O4wlgnhZF2ZB6XdTPKUOolvNv1TH6YdtMOulAsJ6mvs2KnRW2Sd1jdw9dJ3ijkMOAz2CBHvt9q4r0u+87WaAAMlGvHAEqCJvBawPTtYkNSXTDp7dagb2wt8+/j3HI+em7LSY4obNtYmXHsQLiEYSPQMcalHPmbgpTLerST19x214DiDCfHHkk2ek+BEdtgQEJNIc6ufyrwk6KUVv7MCgCyQB2o5r3G/KX31Va3hyRRAca1MDI3Ee+C5XpYfPZvJRhL/FKa2FRATffl7Kn3zZLH2ZtOV4Cs2zebjX0dOGUipyjnf/MacsTIeKK7Bm91IPgqgTSlcZbiIcYXoGSIO0u2pytzPJTUywmPvy2AP/NnPZn+NR39Zf/DYys8vWVPRWOKbJBSHDO0yikzH1xUt62+XS5Kl6kmPsQz3VEghA/lvVJ6KpW5PSgFXMkn8e+CcHV+bIO2zc5oc= root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file diff --git a/roles/bootstrap_server/.DS_Store b/roles/bootstrap_server/.DS_Store deleted file mode 100644 index b50ed442a4efc075cb023c35ad50912059289d56..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKJ5Iwu5S@X5EYY~6+>SaoFp(*c=zIZiB7%eyM7M%Q7fPPdZr@aN`MtG4I&obL~QQ9n&GK> zSU2r@iK^f}Bp3(=f`MQl7}%QuKR2?Iy)z;(7zhUb3j?x0Bs9Uou`|@I1D!qrfD@Qi zpv%Qsa*|`=*crkDVM_&CD*F?IEgkmcafM@NXz9fM_+X#;RI^{ul#1DXVgZkMgti)91-qo6xS&L?o^g1p<9=3BW-1 fkyCY=KZ%aG!m%@yDq`1kU|a;0km!PeA7J1e0y8^y diff --git a/roles/bootstrap_server/tasks/bootstrap_verify.yaml b/roles/bootstrap_server/tasks/bootstrap_verify.yaml deleted file mode 100644 index 993c10b3..00000000 --- a/roles/bootstrap_server/tasks/bootstrap_verify.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- name: connect bootstrap - command: virsh console bootstrap - -- name: Verify bootstrap install process until complete - command: journalctl -u bootkube.service - register: result - until: result.stdout.find("bootkube.service complete") != -1 - retries: 100 - delay: 300 diff --git a/roles/control_nodes/.DS_Store b/roles/control_nodes/.DS_Store deleted file mode 100644 index 78fb190302cbe70ee2c1103346ff465fe5b0e274..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHKOG*Pl5UtV(18%Z(*;fd7fH6!Kg1cNGCL?AslL?CZIY}<#8C-h;&!De9N``5{ zm55Y9^{ejsbkBq74-xVFsh$x{iKu`IvKSeW;jHV#gU^7hH5$632YS42x=p6gUmTLP z7qq2Y+EGnM>t8qB8a^TB4SM;m0xJ{W+|QPquHCEGno-TRt17Qq)UPjx)y?x&u2$RC zKZ@ILM{^!cA7(HR3?0TIG=CBsaiwE#C{<)!(}8gjP(q>!27ZBo55y5YxBvhE diff --git a/roles/kvm_host/tasks/create_bastion.yaml b/roles/kvm_host/tasks/create_bastion.yaml deleted file mode 100644 index 79c9af24..00000000 --- a/roles/kvm_host/tasks/create_bastion.yaml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: virtualize bastion server - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - - - name: start bastion install - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole diff --git a/roles/kvm_host/tasks/create_bootstrap.yaml b/roles/kvm_host/tasks/create_bootstrap.yaml deleted file mode 100644 index 1fc4728d..00000000 --- a/roles/kvm_host/tasks/create_bootstrap.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: virtualize bootstrap - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bootstrap.qcow2 100G - - - name: boot bootstrap - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign ip=9.60.87.133::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name bootstrap --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/bootstrap.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/bootstrap.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - diff --git a/roles/kvm_host/tasks/create_nodes.yaml b/roles/kvm_host/tasks/create_nodes.yaml deleted file mode 100644 index 9f3d9389..00000000 --- a/roles/kvm_host/tasks/create_nodes.yaml +++ /dev/null @@ -1,61 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: virtualize control-0 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G - - - name: virtualize control-1 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G - - - name: virtualize control-2 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G - - - name: virtualize compute-0 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G - - - name: virtualize compute-1 node - command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G - - - name: install CoreOS on control-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.138::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on control-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.137::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on control-2 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign ip=9.60.87.136::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name control-2 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/control-2.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/master.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on compute-0 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.135::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-0 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-0.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images - - - name: pause 8 minutes - pause: - minutes: 8 - - - name: install CoreOS on compute-1 node - command: virt-install --boot kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign ip=9.60.87.134::9.60.86.1:255.255.254.0:::none nameserver=9.60.87.139' --connect qemu:///system --name compute-1 --memory 16384 --vcpus 4 --disk /var/lib/libvirt/images/compute-1.qcow2 --accelerate --import --network network=macvtap-net --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/www/html/ignition/worker.ign,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - args: - chdir: /var/lib/libvirt/images diff --git a/roles/kvm_host/tasks/define_macvtap.yaml b/roles/kvm_host/tasks/define_macvtap.yaml deleted file mode 100644 index 57a66471..00000000 --- a/roles/kvm_host/tasks/define_macvtap.yaml +++ /dev/null @@ -1,24 +0,0 @@ -## Playbook works, need to change absolute path to relative or variable. Need to move to roles file for kvm_host ---- - -- hosts: kvm_host - become: true - tasks: - - - name: Set up macvtap bridge - community.libvirt.virt_net: - command: define - name: macvtap-net - autostart: true - xml: "{{ lookup ('template', '/home/jacob/.git/Ansible-OpenShift-Provisioning/roles/kvm_host/kvm_host_s90x/templates/macvtap.xml.j2') }}" - - - name: Start macvtap-net - community.libvirt.virt_net: - autostart: yes - command: start - name: macvtap-net - - - name: Set autostart for macvtap-net - community.libvirt.virt_net: - autostart: yes - name: macvtap-net diff --git a/roles/kvm_host/tasks/main.yaml b/roles/kvm_host/tasks/main.yaml deleted file mode 100644 index 8c3c8296..00000000 --- a/roles/kvm_host/tasks/main.yaml +++ /dev/null @@ -1,20 +0,0 @@ -- name: Set up macvtap bridge - tags: kvmhost - community.libvirt.virt_net: - command: define - name: macvtap-net - autostart: true - xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" - -- name: Start macvtap-net - tags: kvmhost - community.libvirt.virt_net: - autostart: yes - command: start - name: macvtap-net - -- name: Set autostart for macvtap-net - tags: kvmhost - community.libvirt.virt_net: - autostart: yes - name: macvtap-net diff --git a/roles/kvm_host/tasks/prep_kvm_guests.yaml b/roles/kvm_host/tasks/prep_kvm_guests.yaml deleted file mode 100644 index 75abf5aa..00000000 --- a/roles/kvm_host/tasks/prep_kvm_guests.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- - -- hosts: kvm_host - become: true - tasks: - - - name: get rhcos qcow2 files - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - dest: /var/lib/libvirt/images/ - - - - name: Unzip OCP dependencies - command: gunzip /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz diff --git a/roles/kvm_host/templates/macvtap.xml.j2 b/roles/kvm_host/templates/macvtap.xml.j2 deleted file mode 100644 index 388477ea..00000000 --- a/roles/kvm_host/templates/macvtap.xml.j2 +++ /dev/null @@ -1,6 +0,0 @@ - - macvtap-net - - - - diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index beaca344..f9fb4a29 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,19 +1,11 @@ --- -#- name: distribute the ssh key to a remote host -# tags: ssh,ssh-copy-id -# shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" -# register: ssh_copy_id_execution +- name: distribute the ssh key to a remote host + tags: ssh,ssh-copy-id + shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" + register: ssh_copy_id_execution -#- name: Print results of copying ssh id to remote host. -# tags: ssh,ssh-copy-id -# debug: -# var: ssh_copy_id_execution - -- name: copy ssh key to remote host - tags: ssh-copy-id - copy: - src: ~/.ssh/{{ env_ssh_ans_name }}.pub - dest: ~/.ssh/{{ env_ssh_ans_name }}.pub - mode: '644' - owner: root \ No newline at end of file +- name: Print results of copying ssh id to remote host. + tags: ssh,ssh-copy-id + debug: + var: ssh_copy_id_execution \ No newline at end of file diff --git a/roles/worker_nodes/.DS_Store b/roles/worker_nodes/.DS_Store deleted file mode 100644 index 1fbd689229062191b69aea2e8fd10736d076ce3d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeHK%}T>S5T4Z*f*$PAi;yP}`UbJYgCJh|0=11=F*U_1UOePA1o0Jo0FOR{UVW50 zf3%zKsyC4`12f-dcV>6y3)#&Afa~2AU4RY%B&dX~Ei@6Kane9Gv}Xs=@f@?GvKkG` zY8d!hhcRFb{67Zh+ik-+TtW$F;D5igB^;pc&sjKi&$GPf_D9uZ%Kc)z(Mgr(x(_6; zCm)Z4<2RbC;ZgkJF&zghBrt|kxQ2qWy$SMt!8QDR1OG_Yfsdbi@%jF&xkvn#75wBE zsiO_ISjP;mRx5COD;0R2k=(seKkJA4`O|{&#Y&TEnQz=9lBv8|v+KwJp4lYHwxC&K zz!)$F77Wn-kfIW1inSnb9ccIpfY`!r7Hrdx1I46@nPM%7BPdQn5hc{=6~jq5_NVHX zDb|7#4o)v0PFHq%Lvg%1&Y!Y7xJ=NjF<=Zd8Q7G^4xRsp-{1e6LH1+}7z1m?fNSOb zyoV*}*%}l_XKji)KqVo+T98A~=yA*qI*NBuS#T_+24bdI3t|t7{Rl7(W{iP9W#9|1 CY>%%1 From 4912e6ae562e5f902f939a3f8cfd614134ec1438 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 31 Aug 2021 15:19:19 -0500 Subject: [PATCH 391/885] debugging recorded run --- group_vars/all/main.yaml | 5 +- main.yaml | 16 ++-- roles/connect_cluster/tasks/main.yaml | 85 +++++++++++++++++++++- roles/create_bastion/tasks/main.yaml | 1 + roles/create_bootstrap/tasks/main.yaml | 2 +- roles/create_compute_nodes/tasks/main.yaml | 4 +- roles/create_control_nodes/tasks/main.yaml | 6 +- roles/get-ocp/files/ocp_ssh_pub | 2 +- roles/get-ocp/tasks/main.yaml | 1 + roles/install_ansible/tasks/main.yaml | 6 +- 10 files changed, 104 insertions(+), 24 deletions(-) diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml index 65ec7ec5..3f97f6a6 100644 --- a/group_vars/all/main.yaml +++ b/group_vars/all/main.yaml @@ -34,9 +34,8 @@ env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first- env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) -# OpenShift cluster's ssh key pair filename -env_ssh_ocp_name: ocp -env_ssh_ocp_pass: ibmzrocks +# OpenShift cluster's ssh key comment +env_ssh_ocp_comm: "ocpz_distribution" # networking dns_nameserver: 9.60.87.139 diff --git a/main.yaml b/main.yaml index 142a5260..0d0b5d5e 100644 --- a/main.yaml +++ b/main.yaml @@ -46,11 +46,11 @@ vars_files: - env.yaml vars: # feel free to add more packages as needed - - packages: [ 'ansible', 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] + - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - check_ssh + #- install_ansible - install_packages - - install_ansible - ssh-ocp-key-gen # SSH key for bastion to connect to nodes - set_selinux_permissive - set_firewall @@ -82,12 +82,12 @@ #roles: #- wait_for_bootstrap -#- hosts: bastion - #tags: bastion,cluster - #become: true - #gather_facts: no - #roles: - #- connect_cluster +- hosts: bastion + tags: bastion,cluster + become: true + gather_facts: no + roles: + - connect_cluster #- hosts: bastion #become: true diff --git a/roles/connect_cluster/tasks/main.yaml b/roles/connect_cluster/tasks/main.yaml index cd6cc5be..fb361d27 100644 --- a/roles/connect_cluster/tasks/main.yaml +++ b/roles/connect_cluster/tasks/main.yaml @@ -1,13 +1,79 @@ --- +#- name: Add another bin dir to system-wide $PATH. +# copy: +# dest: /etc/profile.d/custom-path.sh +# content: 'PATH=$PATH:{{ my_custom_path_var }}' + +- name: echo path + tags: cluster + shell: "echo $PATH" + register: path_check + +- name: print results of path check + tags: cluster + debug: + var: path_check.stdout + +- name: check who the user is + tags: cluster + shell: "whoami" + register: root_check + +- name: print results of checking what user is running tasks + tags: cluster + debug: + var: root_check.stdout + - name: export kube config file tags: cluster - command: export KUBECONFIG=/ocpinst/auth/kubeconfig + shell: "export KUBECONFIG=/ocpinst/auth/kubeconfig" + args: + chdir: / + +- name: export kube config file + tags: cluster + shell: "export KUBECONFIG=/ocpinst/auth/kubeconfig" + args: + chdir: / - name: check if system admin - command: oc whoami + tags: cluster + command: "oc whoami" register: whoami_check - failed_when: whoami_check.stdout != system:admin + #until: whoami_check.stdout.find("system:admin") != -1 + #retries: 5 + #delay: 30 + +- name: print whoami_check results to terminal + tags: cluster + debug: + var: whoami_check.stdout + +- name: get csr info + tags: cluster + command: oc get csr + register: csr + +- name: print csr info to terminal + tags: cluster + debug: + var: csr.stdout + +- name: approve all pending certificates + tags: cluster + command: "for i in `oc get csr --no-headers | grep -i pending | awk '{ print $1 }'`; do oc adm certificate approve $i; done" + register: csr_approve + +- name: print results from csr approval + tags: cluster + debug: + var: csr_approve.stdout + +- name: wait 5 minutes + tags: cluster + pause: + minutes: 5 - name: get csr info tags: cluster @@ -15,5 +81,16 @@ register: csr - name: print csr info to terminal + tags: cluster + debug: + var: csr.stdout + +- name: approve all pending certificates + tags: cluster + command: for i in `oc get csr --no-headers | grep -i pending | awk '{ print $1 }'`; do oc adm certificate approve $i; done + register: csr_approve + +- name: print results from csr approval + tags: cluster debug: - var: csr.stdout \ No newline at end of file + var: csr_approve.stdout \ No newline at end of file diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index c2a74f80..bd5e5bbb 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -28,6 +28,7 @@ name: bastion command: status register: bastion_check + ignore_errors: true - name: print status of bastion tags: kvm_host, bastionvm diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 60106987..f4ec9532 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -22,7 +22,7 @@ --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.133::9.60.86.1:255.255.254.0:bootstrap:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign" - --noautoconsole + --noautoconsole --wait=-1 when: bootstrap_check.failed == true - name: Pause 15 minutes for installation. Once you see the login prompt on the bootstrap's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index 0514be05..14ef6a1e 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -35,7 +35,7 @@ --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.135::9.60.86.1:255.255.254.0:compute-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" - --noautoconsole + --noautoconsole --wait=-1 when: compute_0_check.failed == true - name: pause 15 minutes @@ -53,7 +53,7 @@ --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.134::9.60.86.1:255.255.254.0:compute-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" - --noautoconsole + --noautoconsole --wait=-1 when: compute_1_check.failed == true - name: pause 15 minutes diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 9d04ef68..94b91c28 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -48,7 +48,7 @@ --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.138::9.60.86.1:255.255.254.0:control-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" - --noautoconsole + --noautoconsole --wait=-1 when: control_0_check.failed == true - name: pause 15 minutes @@ -66,7 +66,7 @@ --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.137::9.60.86.1:255.255.254.0:control-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" - --noautoconsole + --noautoconsole --wait=-1 when: control_1_check.failed == true - name: pause 15 minutes @@ -84,7 +84,7 @@ --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.136::9.60.86.1:255.255.254.0:control-2:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" - --noautoconsole + --noautoconsole --wait=-1 when: control_2_check.failed == true - name: pause 45 minutes diff --git a/roles/get-ocp/files/ocp_ssh_pub b/roles/get-ocp/files/ocp_ssh_pub index e76e1d5b..6f580fed 100644 --- a/roles/get-ocp/files/ocp_ssh_pub +++ b/roles/get-ocp/files/ocp_ssh_pub @@ -1 +1 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8A2DcT4nRh8AT04kpIwCWGSSKRqAo9/M0Om2leuw2IDTCjDInCgdQ4kFuE11D/Z/bFiDgqQ92zAc/7MUA3qEgGWdX7/N1LfzIOv+RvHZ05Y5EH9TXsU+JLrc1Yk8ZOOOzn1a5B9hpimaxHeDMBkLxWrJToGdlzsL5livjUdk86xLzCcq3EjIX3mYv3gx6+/dg/Iz+z9reGIIN6+lbDt6d5ZpQ6kr1OfqUL3hNtn/cHwb9FzyHGRk1PpaQv6c3+pskWuc2RfZX88nTET+crDIzgCxK3yoB/jZi8d7DsB00ou4AxVCd14scNbqZyEfQbPBv39FSE02RfDY001Xcrlr9s2OMiXKY17KbiMUcFyRld3C40w7zT8Mp/jOQUL3Vpj4B85hu73azzV/TSOsXe0i5fthaokspaHGXGhdaR6GXbATU0u1bNVCeqdMjGDyQtIi4pLGopWgohsEl+/nYqy889tMo9zo1AyVzYMv2XKlkmzBBtW66EQHvG5jy2H7S5asBk7MH+ARzhmB7avfe8FdHFF6/O4YgMEeqOKXMW4Ffm6PchOqqL3EkSxvnq8WMUopOBSYS2ejV83dgf2c6/xqE2FwK9VfLiKm1CIZiGIfYAAR1FTGVo+Um6gO8zaKJLetHtkDGduzrAGBVWu67jBPYWY5cCmYu6g04GF7GuGz2ew== ocpz_distribution +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC9ZoxqTm4Jwopmrj61dyvolXcMVo0ebGknuKntIN+oUPuuyT0PA/2SMagE2HkaEACoO+6WRfy2uohvlsIkEsqNyfmwx7AXcEnf/jtvRdI6421rgd4hTyihlt4S9MEHVIOvtQpNfB4bA+7e+QGcWQBa66JOB9BTYYt80et0bXBwtpSQbimSRXohaisvqfUYvhXaMM8Gx+QnoN22atCw8hgA4pSztXzZBM6zSraUl7YgbFziAFdseFXlyZ3CkdvB+Ma7t5A1SszoHqFCpvIt6dCQqqg6CdQne/k789vFx+Pj5aQ+FZqfO6KNfEhEzZ7qLmyTC1M2VeXMEyWSJRpRTXUJQ9ag3z0GWzGZDUcSaz7vrJOEspuTMn4YuppwOHEVbyzesKdQdergf4g3bj6aIRnYHrYkh36CdKC+DB3+G9GBr94wDZfpClM00dGeRHsFg0GYl9btAGEvOckDdfz9kP4ND2S9fiSfVJh3WIqyU2QkRuJXZJHOu2iIpK4X2+dhv+1giavNIxBy54vJXob0hHM/Tw4YzYfQsLZjvuEkM6Z7OcSNsfpNYo4Q2izJzXI89W4obcU6obZzzDJDZR49Jtc6jUS9fkvNTpuANwLupCrVjvS46i1Y1cbJejosHlQ+zDg8U7zsy30nMAcYqrHytD7tx6fwyVO9fem2q0Ta5P2CCQ== ocpz_distribution diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index db320964..f6143050 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -103,6 +103,7 @@ file: path: roles/get-ocp/files/ocp_ssh_pub mode: '0755' + state: touch when: ocp_ssh_pub_del.changed == true or ocp_ssh_pub.stat.exists == false - name: Fetch ssh key from bastion for use in install-config diff --git a/roles/install_ansible/tasks/main.yaml b/roles/install_ansible/tasks/main.yaml index d9cdb4b6..dfcb1502 100644 --- a/roles/install_ansible/tasks/main.yaml +++ b/roles/install_ansible/tasks/main.yaml @@ -4,7 +4,9 @@ tags: bastion, ansible command: "{{ item }}" loop: - - subscription-manager repos --enable "codeready-builder-for-rhel-8-$(arch)-rpms" - - sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + - subscription-manager repos --enable "codeready-builder-for-rhel-8-s390x-rpms" + - sudo yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + - sudo rpm -i epel-release-latest-8.noarch.rpm + - sudo yum -y install ansible - ansible-galaxy collection install community.crypto - ansible-galaxy collection install community.general \ No newline at end of file From ca127d611c8262eea6ef2a3aafdc72ea93c55e1c Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 31 Aug 2021 15:19:19 -0500 Subject: [PATCH 392/885] debugging recorded run --- group_vars/all/main.yaml | 5 +- main.yaml | 16 ++-- roles/connect_cluster/tasks/main.yaml | 85 +++++++++++++++++++++- roles/create_bastion/tasks/main.yaml | 1 + roles/create_bootstrap/tasks/main.yaml | 2 +- roles/create_compute_nodes/tasks/main.yaml | 4 +- roles/create_control_nodes/tasks/main.yaml | 6 +- roles/get-ocp/files/ocp_ssh_pub | 2 +- roles/get-ocp/tasks/main.yaml | 1 + roles/install_ansible/tasks/main.yaml | 6 +- 10 files changed, 104 insertions(+), 24 deletions(-) diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml index 65ec7ec5..3f97f6a6 100644 --- a/group_vars/all/main.yaml +++ b/group_vars/all/main.yaml @@ -34,9 +34,8 @@ env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first- env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) -# OpenShift cluster's ssh key pair filename -env_ssh_ocp_name: ocp -env_ssh_ocp_pass: ibmzrocks +# OpenShift cluster's ssh key comment +env_ssh_ocp_comm: "ocpz_distribution" # networking dns_nameserver: 9.60.87.139 diff --git a/main.yaml b/main.yaml index 142a5260..0d0b5d5e 100644 --- a/main.yaml +++ b/main.yaml @@ -46,11 +46,11 @@ vars_files: - env.yaml vars: # feel free to add more packages as needed - - packages: [ 'ansible', 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] + - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - check_ssh + #- install_ansible - install_packages - - install_ansible - ssh-ocp-key-gen # SSH key for bastion to connect to nodes - set_selinux_permissive - set_firewall @@ -82,12 +82,12 @@ #roles: #- wait_for_bootstrap -#- hosts: bastion - #tags: bastion,cluster - #become: true - #gather_facts: no - #roles: - #- connect_cluster +- hosts: bastion + tags: bastion,cluster + become: true + gather_facts: no + roles: + - connect_cluster #- hosts: bastion #become: true diff --git a/roles/connect_cluster/tasks/main.yaml b/roles/connect_cluster/tasks/main.yaml index cd6cc5be..fb361d27 100644 --- a/roles/connect_cluster/tasks/main.yaml +++ b/roles/connect_cluster/tasks/main.yaml @@ -1,13 +1,79 @@ --- +#- name: Add another bin dir to system-wide $PATH. +# copy: +# dest: /etc/profile.d/custom-path.sh +# content: 'PATH=$PATH:{{ my_custom_path_var }}' + +- name: echo path + tags: cluster + shell: "echo $PATH" + register: path_check + +- name: print results of path check + tags: cluster + debug: + var: path_check.stdout + +- name: check who the user is + tags: cluster + shell: "whoami" + register: root_check + +- name: print results of checking what user is running tasks + tags: cluster + debug: + var: root_check.stdout + - name: export kube config file tags: cluster - command: export KUBECONFIG=/ocpinst/auth/kubeconfig + shell: "export KUBECONFIG=/ocpinst/auth/kubeconfig" + args: + chdir: / + +- name: export kube config file + tags: cluster + shell: "export KUBECONFIG=/ocpinst/auth/kubeconfig" + args: + chdir: / - name: check if system admin - command: oc whoami + tags: cluster + command: "oc whoami" register: whoami_check - failed_when: whoami_check.stdout != system:admin + #until: whoami_check.stdout.find("system:admin") != -1 + #retries: 5 + #delay: 30 + +- name: print whoami_check results to terminal + tags: cluster + debug: + var: whoami_check.stdout + +- name: get csr info + tags: cluster + command: oc get csr + register: csr + +- name: print csr info to terminal + tags: cluster + debug: + var: csr.stdout + +- name: approve all pending certificates + tags: cluster + command: "for i in `oc get csr --no-headers | grep -i pending | awk '{ print $1 }'`; do oc adm certificate approve $i; done" + register: csr_approve + +- name: print results from csr approval + tags: cluster + debug: + var: csr_approve.stdout + +- name: wait 5 minutes + tags: cluster + pause: + minutes: 5 - name: get csr info tags: cluster @@ -15,5 +81,16 @@ register: csr - name: print csr info to terminal + tags: cluster + debug: + var: csr.stdout + +- name: approve all pending certificates + tags: cluster + command: for i in `oc get csr --no-headers | grep -i pending | awk '{ print $1 }'`; do oc adm certificate approve $i; done + register: csr_approve + +- name: print results from csr approval + tags: cluster debug: - var: csr.stdout \ No newline at end of file + var: csr_approve.stdout \ No newline at end of file diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index c2a74f80..bd5e5bbb 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -28,6 +28,7 @@ name: bastion command: status register: bastion_check + ignore_errors: true - name: print status of bastion tags: kvm_host, bastionvm diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 60106987..f4ec9532 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -22,7 +22,7 @@ --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.133::9.60.86.1:255.255.254.0:bootstrap:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign" - --noautoconsole + --noautoconsole --wait=-1 when: bootstrap_check.failed == true - name: Pause 15 minutes for installation. Once you see the login prompt on the bootstrap's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index 0514be05..14ef6a1e 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -35,7 +35,7 @@ --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.135::9.60.86.1:255.255.254.0:compute-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" - --noautoconsole + --noautoconsole --wait=-1 when: compute_0_check.failed == true - name: pause 15 minutes @@ -53,7 +53,7 @@ --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.134::9.60.86.1:255.255.254.0:compute-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" - --noautoconsole + --noautoconsole --wait=-1 when: compute_1_check.failed == true - name: pause 15 minutes diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 9d04ef68..94b91c28 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -48,7 +48,7 @@ --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.138::9.60.86.1:255.255.254.0:control-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" - --noautoconsole + --noautoconsole --wait=-1 when: control_0_check.failed == true - name: pause 15 minutes @@ -66,7 +66,7 @@ --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.137::9.60.86.1:255.255.254.0:control-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" - --noautoconsole + --noautoconsole --wait=-1 when: control_1_check.failed == true - name: pause 15 minutes @@ -84,7 +84,7 @@ --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.136::9.60.86.1:255.255.254.0:control-2:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" - --noautoconsole + --noautoconsole --wait=-1 when: control_2_check.failed == true - name: pause 45 minutes diff --git a/roles/get-ocp/files/ocp_ssh_pub b/roles/get-ocp/files/ocp_ssh_pub index e76e1d5b..6f580fed 100644 --- a/roles/get-ocp/files/ocp_ssh_pub +++ b/roles/get-ocp/files/ocp_ssh_pub @@ -1 +1 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8A2DcT4nRh8AT04kpIwCWGSSKRqAo9/M0Om2leuw2IDTCjDInCgdQ4kFuE11D/Z/bFiDgqQ92zAc/7MUA3qEgGWdX7/N1LfzIOv+RvHZ05Y5EH9TXsU+JLrc1Yk8ZOOOzn1a5B9hpimaxHeDMBkLxWrJToGdlzsL5livjUdk86xLzCcq3EjIX3mYv3gx6+/dg/Iz+z9reGIIN6+lbDt6d5ZpQ6kr1OfqUL3hNtn/cHwb9FzyHGRk1PpaQv6c3+pskWuc2RfZX88nTET+crDIzgCxK3yoB/jZi8d7DsB00ou4AxVCd14scNbqZyEfQbPBv39FSE02RfDY001Xcrlr9s2OMiXKY17KbiMUcFyRld3C40w7zT8Mp/jOQUL3Vpj4B85hu73azzV/TSOsXe0i5fthaokspaHGXGhdaR6GXbATU0u1bNVCeqdMjGDyQtIi4pLGopWgohsEl+/nYqy889tMo9zo1AyVzYMv2XKlkmzBBtW66EQHvG5jy2H7S5asBk7MH+ARzhmB7avfe8FdHFF6/O4YgMEeqOKXMW4Ffm6PchOqqL3EkSxvnq8WMUopOBSYS2ejV83dgf2c6/xqE2FwK9VfLiKm1CIZiGIfYAAR1FTGVo+Um6gO8zaKJLetHtkDGduzrAGBVWu67jBPYWY5cCmYu6g04GF7GuGz2ew== ocpz_distribution +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC9ZoxqTm4Jwopmrj61dyvolXcMVo0ebGknuKntIN+oUPuuyT0PA/2SMagE2HkaEACoO+6WRfy2uohvlsIkEsqNyfmwx7AXcEnf/jtvRdI6421rgd4hTyihlt4S9MEHVIOvtQpNfB4bA+7e+QGcWQBa66JOB9BTYYt80et0bXBwtpSQbimSRXohaisvqfUYvhXaMM8Gx+QnoN22atCw8hgA4pSztXzZBM6zSraUl7YgbFziAFdseFXlyZ3CkdvB+Ma7t5A1SszoHqFCpvIt6dCQqqg6CdQne/k789vFx+Pj5aQ+FZqfO6KNfEhEzZ7qLmyTC1M2VeXMEyWSJRpRTXUJQ9ag3z0GWzGZDUcSaz7vrJOEspuTMn4YuppwOHEVbyzesKdQdergf4g3bj6aIRnYHrYkh36CdKC+DB3+G9GBr94wDZfpClM00dGeRHsFg0GYl9btAGEvOckDdfz9kP4ND2S9fiSfVJh3WIqyU2QkRuJXZJHOu2iIpK4X2+dhv+1giavNIxBy54vJXob0hHM/Tw4YzYfQsLZjvuEkM6Z7OcSNsfpNYo4Q2izJzXI89W4obcU6obZzzDJDZR49Jtc6jUS9fkvNTpuANwLupCrVjvS46i1Y1cbJejosHlQ+zDg8U7zsy30nMAcYqrHytD7tx6fwyVO9fem2q0Ta5P2CCQ== ocpz_distribution diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index db320964..f6143050 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -103,6 +103,7 @@ file: path: roles/get-ocp/files/ocp_ssh_pub mode: '0755' + state: touch when: ocp_ssh_pub_del.changed == true or ocp_ssh_pub.stat.exists == false - name: Fetch ssh key from bastion for use in install-config diff --git a/roles/install_ansible/tasks/main.yaml b/roles/install_ansible/tasks/main.yaml index d9cdb4b6..dfcb1502 100644 --- a/roles/install_ansible/tasks/main.yaml +++ b/roles/install_ansible/tasks/main.yaml @@ -4,7 +4,9 @@ tags: bastion, ansible command: "{{ item }}" loop: - - subscription-manager repos --enable "codeready-builder-for-rhel-8-$(arch)-rpms" - - sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + - subscription-manager repos --enable "codeready-builder-for-rhel-8-s390x-rpms" + - sudo yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + - sudo rpm -i epel-release-latest-8.noarch.rpm + - sudo yum -y install ansible - ansible-galaxy collection install community.crypto - ansible-galaxy collection install community.general \ No newline at end of file From a200204680867bbe712bce390d4350d159805dcd Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 11:06:08 -0500 Subject: [PATCH 393/885] Deleted deprecated shell scripts, started work on implementing Ansible Vault encryption during setup, created new ansible-setup scripts for linux and mac, updated README. --- .gitignore | 4 + README.md | 147 +++++++++++++----- ansible.cfg | 1 + .../shell_scripts/ansible-setup-linux.sh | 0 .../shell_scripts/ansible-setup-mac.sh | 0 files/shell_scripts/create_compute.sh | 6 - files/shell_scripts/create_control.sh | 9 -- files/shell_scripts/create_http.sh | 19 --- files/shell_scripts/get_ocp_installer.sh | 15 -- files/shell_scripts/macvtap-net.sh | 5 - files/shell_scripts/prep_kvm_guests.sh | 4 - files/shell_scripts/start_libvirtd.sh | 4 - files/shell_scripts/verify_bootstrap.sh | 3 - main.yaml | 1 + roles/ansible_setup/tasks/main.yaml | 42 +---- setup-linux.yaml | 7 + setup-mac.yaml | 17 ++ 17 files changed, 143 insertions(+), 141 deletions(-) rename ansible-setup-linux.sh => files/shell_scripts/ansible-setup-linux.sh (100%) rename ansible-setup-mac.sh => files/shell_scripts/ansible-setup-mac.sh (100%) delete mode 100644 files/shell_scripts/create_compute.sh delete mode 100644 files/shell_scripts/create_control.sh delete mode 100644 files/shell_scripts/create_http.sh delete mode 100644 files/shell_scripts/get_ocp_installer.sh delete mode 100644 files/shell_scripts/macvtap-net.sh delete mode 100644 files/shell_scripts/prep_kvm_guests.sh delete mode 100644 files/shell_scripts/start_libvirtd.sh delete mode 100644 files/shell_scripts/verify_bootstrap.sh create mode 100644 setup-linux.yaml create mode 100644 setup-mac.yaml diff --git a/.gitignore b/.gitignore index db0e4afa..bd25db43 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,6 @@ .DS_Store .iso +roles/get-ocp/files/ocp_ssh_pub +env.yaml +group_vars/all/main.yaml* +.vault_pass.txt \ No newline at end of file diff --git a/README.md b/README.md index 5b054508..b38aa6c6 100644 --- a/README.md +++ b/README.md @@ -2,11 +2,14 @@ ## Scope -The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. +* The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an + IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. +* This README file gives an extremely detailed step-by-step instruction for you to use as a reference. It assumes near zero experience. -## Supported operating systems for the localhost (the starting workstation) are: -* Linux (RedHat and Debian families) -* Unix and Unix-like (i.e. MacOS X) +## Supported Operating Systems (for local workstation): + +* Linux (RedHat and Debian) +* MacOS X ## Pre-requisites: @@ -19,7 +22,8 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu * 6 Integrated Facilities for Linux (IFLs) * 75 GB of RAM * 1 TB of disk space -* On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed with the following options enabled: +* On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed with + the following options enabled: * server * hardware monitoring utilities * networking file system client @@ -29,42 +33,115 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu * system tools * On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses * Fully Qualified Domain Names (FQDN) names for all IPv4 addresses +* DNS configuration files (forward (.db), reverse (.rev), and named.conf). Note: we plan to automate this in the future. + +## Installation Instructions: -## When you are ready: +### Setup: +* **Step 1: Get This Repository** + * Navigate to a folder where you would like to store this project in your terminal + * Run "git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git" +* **Step 2: Get OpenShift Information** + * In a web browser, navigate to https://console.redhat.com/openshift/install/ibmz/user-provisioned in order to: + * Download your local command line tools (oc and kubectl) + * Copy the OpenShift pull secret (for use in the next step) +* **Step 2: Set Variables** + * In a text editor of your choice, open env.yaml, found in the main directory of this repository + * Fill out all of the required variables for your specific installation +* **Step 3: DNS Configuration** + * Get DNS configuration files (forward (.db), reverse (.rev), and named.conf), or have them pre-defined by + your networking team, and place them in the roles/dns/files folder. +* **Step 4: Setup Script** + * Navigate to the folder where you saved the Git Repository + * Depending on which operating system you are using on your local workstation, run either + "ansible-playbook setup-mac.yaml --ask-become-pass" or "ansible-playbook setup-linux.yaml --ask-become-pass" -* Step 1: Clone this Git repository to a folder on your local computer. -* Step 2: Go to to: - * download your local command line tools (oc and kubectl) - * copy the OpenShift pull secret (for inputting it into env.yaml) in the next step -* Step 2: Fill out all of the required variables for your specific installation in the env.yaml file -* Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf) or have them pre-defined by your networking team. And place them in the roles/dns/files folder. -* Step 4: Run the appropriate Ansible setup shell script, which can be found in the main directory. While in the main directory, run "./ansible-setup-mac.sh" or "./ansible-setup-linux.sh" depending on your operating system to download the required Ansible modules and packages. -* Step 5: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass". Watch Ansible as it completes the installation, correcting errors if they arise. If the process fails in error, you should be able to run the same shell command to start the process from the top. Alternatively, use tags to run only the tasks that have that tag. See list of tags below for reference. -* Step 6: Once the create_bastion playbook runs, open the cockpit at :9090>, go to the "Virtual Machines" tab, and complete the bastion's installation with these options enabled: - * server - * hardware monitoring utilities - * networking file system client - * remote management for linux - * headless mgmt - * system tools - * basic web server - * network servers -* Step 7: When the playbooks for creating nodes run, watch them on the cockpit at "https://:9090". Go to the "Virtual Machines" tab and click on the VM you created. Once the operating system installs, it will power down. Click the blue "Run" button to start it back up. It will then run some more setup. Then, when you see " login" come back to the terminal here where you ran ansible and press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to stop the process. -* Step 8: Repeat Step 7 with the Bootstrap and Control nodes. Then, SSH into the bastion (run "ssh " in the terminal). From there, change to root user (run "su root"). Then ssh into the bootstrap ("ssh core@") and run "journalctl -u bootkube.service" to watch the bootstrap connect to the control nodes (hold space to get to the bottom of the log). Expect lots of errors, as all the nodes may not be entirely up yet. Once all control nodes are connected, the bootkube log will read "bootkube.service complete". -* Step 9: Repeat Step 7 with the Compute nodes. -* Step 10: Once all the Compute nodes up and prompting login, log in to the bastion and run "export KUBECONFIG=/ocpinst/auth/kubeconfig". Then run "oc get csr". It will bring up a list of certificates that need approval. For each cert that is "Pending", run "oc adm certiciate approve ". The csr names will be something like "csr-v8qqv". Once you approve all the certificates, double check that there are not more that have appeared by running "oc get csr" again. Once all certs are "Approved, Issued". You're ready for the next step. -* Step 11: From the bastion, run "oc get nodes". Once all nodes are "Ready", run "oc get clusteroperators". Wait for them to all read "True" under the "Available" column. This may take hours. -* Step 12: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" -* Step 13: Running the command in the above step will give you some information about how to log into the OpenShift cluster's dashboard. Copy the URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. -* Step 14: Celebrate! Your OpenShift cluster installation is complete. +### Provisioning +* **Step 5: Running the Main Playbook** + * If you are not already there, navigate to the folder where you saved the Git Repository in your terminal + * Execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" + * Watch Ansible as it completes the installation, correcting errors if they arise. + * If the process fails in error, you should be able to run the same shell command to start the process from the top. + * Alternatively, use tags to run only the tasks that have that tag. See main.yaml to determine what you would like + to run. There is also a list of all the tags at the bottom of this page for reference. +* **Step 6: Bastion Configuration** + * Once the create_bastion task runs, it will pause the playbook to give you time to configure it. + * Use a web browser to open the cockpit by going to: "https://your-KVM-host-IP-address-here:9090" + * Click on the "Virtual Machines" tab, then click on bastion from the list, click on the black terminal + screen and press Enter. Complete its installation with these options enabled: + * server + * hardware monitoring utilities + * networking file system client + * remote management for linux + * headless mgmt + * system tools + * basic web server + * network servers + * Once you fill out all the required configuration settings, press "b" to begin installation. + * Once you see "bastion login", come back to the terminal to continue your run by pressing "ctrl+c" and then + "c". If there was a problem and you need to stop the playbook, press "ctrl+c" and then "a". + * Note: we plan to automate the installation configuration in the future. +* **Step 7: Starting Up Bootstrap and Control Nodes** + * The playbook will continue to run, preparing the bootstrap and control nodes. + * To monitor the nodes as they come up, watch them on the cockpit at: "https://your-KVM-host-IP-address-here:9090" + * Click on the "Virtual Machines" tab and then click on the VM you want to monitor. Click on the black + terminal screen and press Enter. + * Once you see "node-name login" prompt come back to the terminal where you ran Ansible and press "ctrl+c" and + then "c" to continue running the playbook. + * If you encounter an error that does not resolve with time, press "ctrl+c" and then "a" to stop the process and debug. +* **Step 8: Bootkube Verification** + * SSH into the bastion (run "ssh your-bastion-IP-address-here" in the terminal) + * From there, change to root user (run "su root") and type in the root password that you set during configuration + * Then SSH into the bootstrap as core ("ssh core@your-bootstrap-IP-address-here") + * Run "journalctl -u bootkube.service" to watch the bootstrap connect to the control nodes (hold spacebar to + get to the bottom of the log). Press "q" to exit the log. + * Expect lots of errors, as the control nodes may not be entirely up yet. + * This may take some time, 30 minutes or more. Check in occassionally by running "journalctl -u bootkube.service" again + to update the log. Remember to hold the spacebar to go to the bottom, press "q" to quit. + * Once all control nodes are connected, the bootkube log will read "bootkube.service complete". +* **Step 9: Starting Up Compute Nodes** + * Repeat Step 7 with the Compute nodes. + * Monitor their status at the cockpit, found at "https://your-KVM-host-IP-address:9090" + * They are ready once their terminal screen shows a login prompt + * Once all your compute nodes are up and running, and bootkube is complete, you are ready for cluster verification + +### Verification +* **Step 10: Export Kube Config** + * SSH into the bastion (run "ssh your-bastion-IP-address-here") + * Change to root user (run "su root") and type in your password from when you configured the bastion. + * Then run "export KUBECONFIG=/ocpinst/auth/kubeconfig" + * Check that worked by running "oc whoami", which should return "system:admin" +* **Step 11: Approve Certificates** + * Fromm the bastion, running as root user (as above) run "oc get csr". This will bring up a list of certificates that need approval. + * To approve all certificates at the same time, run the following command: + "for i in `oc get csr --no-headers | grep -i pending | awk '{ print $1 }'`; do oc adm certificate approve $i; done" + * It may take some time for all the certificates that need approval to show up. Keep running "oc get csr" to check to make sure that + no new certificates have appeared since you last approved them. + * Once all certificates read "Approved, Issued". You're ready for the next step. +* **Step 11: Wait for Cluster To Become Operational** + * From the bastion, as root user (as above) check node status by running: "oc get nodes". All nodes need to be "Ready" in the "Status" column. + * From the bastion, as root user (as above) run "oc get clusteroperators". All cluster operators need to be "True" in the "Available" column. + * It may take hours, especially the cluster operators. Run the above two bullets' commmands to check in occasionally. + * Once all nodes are ready and cluster operators are available, you are ready to continue to the next step. +* **Step 12: Verify OpenShift Installation** + * From the bastion as root user (as above), run: "./openshift-install --dir=/ocpinst wait-for install-complete" + * If installation is ready, running the above command will give you some information about how to log into the OpenShift cluster's dashboard. + * Copy the provided URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. +* **Step 14: Celebrate!** + * Your OpenShift cluster provisioning and installation is now complete. * Optional: Leave the bootstrap running as is, shut it down and destroy it, or convert it into a compute node. ## Teardown: -* If you would like to teardown your VMs, run "ansible-playbook teardown.yaml --ask-become-pass --tags "partial/full". Choose either the partial or full tag. -* If you have provisioned more than the minimum number of nodes for your installation, add them to the respective list found in roles/teardown_vms/tasks/main.yaml. -* Use the "full" tag to teardown all VMs running on your KVM host. Once you run the full teardown, to start the main.yaml playbook back from that point, run with tags "bastionvm,bastion,create_nodes". -* Use the "partial" tag to teardown to the point where nothing except the bastion is running on your KVM host. Once you run the partial teardown, to start the main.yaml playbook back from that point, run with tags "bastion,create_nodes". + +* If you would like to teardown your VMs, first determine whether you would like to do a full or partial teardown. +* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags "full" +* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags "partial" +* If you have provisioned more than the minimum number of nodes for your installation, add them to the + respective list found in roles/teardown_vms/tasks/main.yaml. +* Once you run the full teardown, to start the main.yaml playbook back from that point, run: + "run ansible-playbook main.yaml --ask-become-pass --tags "bastionvm,bastion,create_nodes" +* Once you run the partial teardown, to start the main.yaml playbook back from that point, run main.yaml with the tags "bastion,create_nodes". ## Tags: diff --git a/ansible.cfg b/ansible.cfg index bf674386..008886a0 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,6 +1,7 @@ [defaults] inventory=inventory private_key_file=~/.ssh/ansible +vault_password_file = ~/.vault_pass.txt [inventory] cache=True private_key_file = ~/.ssh/ansible diff --git a/ansible-setup-linux.sh b/files/shell_scripts/ansible-setup-linux.sh similarity index 100% rename from ansible-setup-linux.sh rename to files/shell_scripts/ansible-setup-linux.sh diff --git a/ansible-setup-mac.sh b/files/shell_scripts/ansible-setup-mac.sh similarity index 100% rename from ansible-setup-mac.sh rename to files/shell_scripts/ansible-setup-mac.sh diff --git a/files/shell_scripts/create_compute.sh b/files/shell_scripts/create_compute.sh deleted file mode 100644 index e39cb1bf..00000000 --- a/files/shell_scripts/create_compute.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> diff --git a/files/shell_scripts/create_control.sh b/files/shell_scripts/create_control.sh deleted file mode 100644 index 12a8536b..00000000 --- a/files/shell_scripts/create_control.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G - -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> diff --git a/files/shell_scripts/create_http.sh b/files/shell_scripts/create_http.sh deleted file mode 100644 index 8650a27f..00000000 --- a/files/shell_scripts/create_http.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!bin/bash - -##install HTTP -dnf install -y httpd - -##make folders -mkdir /var/www/html/bin /var/www/html/bootstrap - -##get mirror 1 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x-O /var/www/html/bin/rhcos-kernel - -##get mirror 2 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-initramfs.s390x.img -O /var/www/html/bin/rhcos-initramfs.img - -##get mirror 3 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-rootfs.s390x.img -O rhcos-rootfs.img - -##enable http -systemctl enable --now httpd; systemctl status httpd diff --git a/files/shell_scripts/get_ocp_installer.sh b/files/shell_scripts/get_ocp_installer.sh deleted file mode 100644 index a19a0abb..00000000 --- a/files/shell_scripts/get_ocp_installer.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!bin/bash - -##get and extract mirror 1 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/latest/openshift-client-linux.tar.gz -tar -xvzf openshift-client-linux.tar.gz - -##get and extract mirror 2 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/latest/openshift-install-linux.tar.gz -tar -xvzf openshift-client-linux.tar.gz - -##Make executable -chmod +x kubectl oc openshift_install - -##move installed to bin folder -mv kubectl oc openshift_install /usr/local/bin/ diff --git a/files/shell_scripts/macvtap-net.sh b/files/shell_scripts/macvtap-net.sh deleted file mode 100644 index 1d51d565..00000000 --- a/files/shell_scripts/macvtap-net.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!bin/bash -virsh net-create macvtap.xml -virsh net-start --network macvtap-net -virsh net-autostart --network macvtap-net -virsh net-list --all diff --git a/files/shell_scripts/prep_kvm_guests.sh b/files/shell_scripts/prep_kvm_guests.sh deleted file mode 100644 index 50b36294..00000000 --- a/files/shell_scripts/prep_kvm_guests.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!bin/bash -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz -dnf install -y gzip -gunzip rhcos-qemu.s390x.qcow2.gz /var/lib/libvirt/images/ diff --git a/files/shell_scripts/start_libvirtd.sh b/files/shell_scripts/start_libvirtd.sh deleted file mode 100644 index 332d1b33..00000000 --- a/files/shell_scripts/start_libvirtd.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -systemctl enable --now libvirtd -systemctl status libvirtd.service -systemctl status libvirtd diff --git a/files/shell_scripts/verify_bootstrap.sh b/files/shell_scripts/verify_bootstrap.sh deleted file mode 100644 index fcb0845d..00000000 --- a/files/shell_scripts/verify_bootstrap.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!bin/bash -virsh console bootstrap -journalctl -u bootkube.service diff --git a/main.yaml b/main.yaml index 0d0b5d5e..fadc4c7d 100644 --- a/main.yaml +++ b/main.yaml @@ -9,6 +9,7 @@ - env.yaml vars: - ssh_target_ip: "{{ env_ip_kvm_host }}" + roles: - ansible_setup - ssh_key_gen diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 89dc1d1d..b2ac21e5 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -37,46 +37,6 @@ - dns_nameserver - default_gateway - netmask - -- name: Set facts from env.yaml so they can be used in other playbooks - tags: setup - set_fact: - env_baseDomain: "{{ env_baseDomain }}" - env_compute_arch: "{{ env_compute_arch }}" - env_control_count: "{{ env_control_count }}" - env_control_arch: "{{ env_control_arch }}" - env_metadata_name: "{{ env_metadata_name }}" - env_cidr: "{{ env_cidr }}" - env_host_prefix: "{{ env_host_prefix }}" - env_network_type: "{{ env_network_type }}" - env_service_network: "{{ env_service_network }}" - env_fips: "{{ env_fips }}" - env_pullSecret: "{{ env_pullSecret }}" - env_ip_kvm_host: "{{ env_ip_kvm_host }}" - env_ip_bastion: "{{ env_ip_bastion }}" - env_ip_bootstrap: "{{ env_ip_bootstrap }}" - env_ip_control_0: "{{ env_ip_control_0 }}" - env_ip_control_1: "{{ env_ip_control_1 }}" - env_ip_control_2: "{{ env_ip_control_2 }}" - env_ip_compute_0: "{{ env_ip_compute_0 }}" - env_ip_compute_1: "{{ env_ip_compute_1 }}" - env_ssh_username: "{{ env_ssh_username }}" - env_ssh_pass: "{{ env_ssh_pass }}" - env_ssh_ans_name: "{{ env_ssh_ans_name }}" - env_ssh_ans_pass: "{{ env_ssh_ans_pass }}" - env_ssh_ocp_comm: "{{ env_ssh_ocp_comm }}" - dns_nameserver: "{{ dns_nameserver }}" - default_gateway: "{{ default_gateway }}" - netmask: "{{ netmask }}" - cacheable: yes - -- name: Add the contents of user-input variables from env.yaml to group_vars/all folder to persist across hosts - tags: setup - blockinfile: - path: group_vars/all/main.yaml - block: "{{ lookup('file', 'env.yaml') }}" - state: present - backup: yes - name: Populate inventory file with ip variables from env.yaml tags: setup @@ -115,7 +75,7 @@ path: ansible.cfg regexp: '^private_key_file=' line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} - + - name: Ansible generic setup to re-read inventory file after populated in previous tasks tags: setup ansible.builtin.gather_facts: \ No newline at end of file diff --git a/setup-linux.yaml b/setup-linux.yaml new file mode 100644 index 00000000..70713486 --- /dev/null +++ b/setup-linux.yaml @@ -0,0 +1,7 @@ +--- + +- hosts: localhost + tags: localhost, prep + connection: local + become: false + gather_facts: no \ No newline at end of file diff --git a/setup-mac.yaml b/setup-mac.yaml new file mode 100644 index 00000000..3fa5c33c --- /dev/null +++ b/setup-mac.yaml @@ -0,0 +1,17 @@ +--- + +- hosts: localhost + tags: localhost, prep + connection: local + become: false + gather_facts: no + vars_prompt: + - name: vault_pass + prompt: Please provide a secure password to be used for encrypting your sensitive files in Ansible + private: yes + unsafe: yes # this just means you can use special characters. The password is safe. + + - tasks: + - name: install Ansible dependencies and packages + shell: ansible-setup-mac.sh + From 549e02ba299a34380c51001897eca35747701ad6 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 11:06:08 -0500 Subject: [PATCH 394/885] Deleted deprecated shell scripts, started work on implementing Ansible Vault encryption during setup, created new ansible-setup scripts for linux and mac, updated README. --- .gitignore | 4 + README.md | 147 +++++++++++++----- ansible.cfg | 1 + .../shell_scripts/ansible-setup-linux.sh | 0 .../shell_scripts/ansible-setup-mac.sh | 0 files/shell_scripts/create_compute.sh | 6 - files/shell_scripts/create_control.sh | 9 -- files/shell_scripts/create_http.sh | 19 --- files/shell_scripts/get_ocp_installer.sh | 15 -- files/shell_scripts/macvtap-net.sh | 5 - files/shell_scripts/prep_kvm_guests.sh | 4 - files/shell_scripts/start_libvirtd.sh | 4 - files/shell_scripts/verify_bootstrap.sh | 3 - main.yaml | 1 + roles/ansible_setup/tasks/main.yaml | 42 +---- setup-linux.yaml | 7 + setup-mac.yaml | 17 ++ 17 files changed, 143 insertions(+), 141 deletions(-) rename ansible-setup-linux.sh => files/shell_scripts/ansible-setup-linux.sh (100%) rename ansible-setup-mac.sh => files/shell_scripts/ansible-setup-mac.sh (100%) delete mode 100644 files/shell_scripts/create_compute.sh delete mode 100644 files/shell_scripts/create_control.sh delete mode 100644 files/shell_scripts/create_http.sh delete mode 100644 files/shell_scripts/get_ocp_installer.sh delete mode 100644 files/shell_scripts/macvtap-net.sh delete mode 100644 files/shell_scripts/prep_kvm_guests.sh delete mode 100644 files/shell_scripts/start_libvirtd.sh delete mode 100644 files/shell_scripts/verify_bootstrap.sh create mode 100644 setup-linux.yaml create mode 100644 setup-mac.yaml diff --git a/.gitignore b/.gitignore index db0e4afa..bd25db43 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,6 @@ .DS_Store .iso +roles/get-ocp/files/ocp_ssh_pub +env.yaml +group_vars/all/main.yaml* +.vault_pass.txt \ No newline at end of file diff --git a/README.md b/README.md index 5b054508..b38aa6c6 100644 --- a/README.md +++ b/README.md @@ -2,11 +2,14 @@ ## Scope -The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. +* The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an + IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. +* This README file gives an extremely detailed step-by-step instruction for you to use as a reference. It assumes near zero experience. -## Supported operating systems for the localhost (the starting workstation) are: -* Linux (RedHat and Debian families) -* Unix and Unix-like (i.e. MacOS X) +## Supported Operating Systems (for local workstation): + +* Linux (RedHat and Debian) +* MacOS X ## Pre-requisites: @@ -19,7 +22,8 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu * 6 Integrated Facilities for Linux (IFLs) * 75 GB of RAM * 1 TB of disk space -* On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed with the following options enabled: +* On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed with + the following options enabled: * server * hardware monitoring utilities * networking file system client @@ -29,42 +33,115 @@ The goal of this playbook is to setup and deploy a User Provisioned Infrastructu * system tools * On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses * Fully Qualified Domain Names (FQDN) names for all IPv4 addresses +* DNS configuration files (forward (.db), reverse (.rev), and named.conf). Note: we plan to automate this in the future. + +## Installation Instructions: -## When you are ready: +### Setup: +* **Step 1: Get This Repository** + * Navigate to a folder where you would like to store this project in your terminal + * Run "git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git" +* **Step 2: Get OpenShift Information** + * In a web browser, navigate to https://console.redhat.com/openshift/install/ibmz/user-provisioned in order to: + * Download your local command line tools (oc and kubectl) + * Copy the OpenShift pull secret (for use in the next step) +* **Step 2: Set Variables** + * In a text editor of your choice, open env.yaml, found in the main directory of this repository + * Fill out all of the required variables for your specific installation +* **Step 3: DNS Configuration** + * Get DNS configuration files (forward (.db), reverse (.rev), and named.conf), or have them pre-defined by + your networking team, and place them in the roles/dns/files folder. +* **Step 4: Setup Script** + * Navigate to the folder where you saved the Git Repository + * Depending on which operating system you are using on your local workstation, run either + "ansible-playbook setup-mac.yaml --ask-become-pass" or "ansible-playbook setup-linux.yaml --ask-become-pass" -* Step 1: Clone this Git repository to a folder on your local computer. -* Step 2: Go to to: - * download your local command line tools (oc and kubectl) - * copy the OpenShift pull secret (for inputting it into env.yaml) in the next step -* Step 2: Fill out all of the required variables for your specific installation in the env.yaml file -* Step 3: Get DNS configuration files (forward (.db), reverse (.rev), and named.conf) or have them pre-defined by your networking team. And place them in the roles/dns/files folder. -* Step 4: Run the appropriate Ansible setup shell script, which can be found in the main directory. While in the main directory, run "./ansible-setup-mac.sh" or "./ansible-setup-linux.sh" depending on your operating system to download the required Ansible modules and packages. -* Step 5: Navigate to the folder where you saved the Git Repository and execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass". Watch Ansible as it completes the installation, correcting errors if they arise. If the process fails in error, you should be able to run the same shell command to start the process from the top. Alternatively, use tags to run only the tasks that have that tag. See list of tags below for reference. -* Step 6: Once the create_bastion playbook runs, open the cockpit at :9090>, go to the "Virtual Machines" tab, and complete the bastion's installation with these options enabled: - * server - * hardware monitoring utilities - * networking file system client - * remote management for linux - * headless mgmt - * system tools - * basic web server - * network servers -* Step 7: When the playbooks for creating nodes run, watch them on the cockpit at "https://:9090". Go to the "Virtual Machines" tab and click on the VM you created. Once the operating system installs, it will power down. Click the blue "Run" button to start it back up. It will then run some more setup. Then, when you see " login" come back to the terminal here where you ran ansible and press "ctrl-C" and then "C" to continue. If you do not see the login prompt, press "ctrl+C" and then "A" to stop the process. -* Step 8: Repeat Step 7 with the Bootstrap and Control nodes. Then, SSH into the bastion (run "ssh " in the terminal). From there, change to root user (run "su root"). Then ssh into the bootstrap ("ssh core@") and run "journalctl -u bootkube.service" to watch the bootstrap connect to the control nodes (hold space to get to the bottom of the log). Expect lots of errors, as all the nodes may not be entirely up yet. Once all control nodes are connected, the bootkube log will read "bootkube.service complete". -* Step 9: Repeat Step 7 with the Compute nodes. -* Step 10: Once all the Compute nodes up and prompting login, log in to the bastion and run "export KUBECONFIG=/ocpinst/auth/kubeconfig". Then run "oc get csr". It will bring up a list of certificates that need approval. For each cert that is "Pending", run "oc adm certiciate approve ". The csr names will be something like "csr-v8qqv". Once you approve all the certificates, double check that there are not more that have appeared by running "oc get csr" again. Once all certs are "Approved, Issued". You're ready for the next step. -* Step 11: From the bastion, run "oc get nodes". Once all nodes are "Ready", run "oc get clusteroperators". Wait for them to all read "True" under the "Available" column. This may take hours. -* Step 12: Verify installation by running: "./openshift-install --dir=/ocpinst wait-for install-complete" -* Step 13: Running the command in the above step will give you some information about how to log into the OpenShift cluster's dashboard. Copy the URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. -* Step 14: Celebrate! Your OpenShift cluster installation is complete. +### Provisioning +* **Step 5: Running the Main Playbook** + * If you are not already there, navigate to the folder where you saved the Git Repository in your terminal + * Execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" + * Watch Ansible as it completes the installation, correcting errors if they arise. + * If the process fails in error, you should be able to run the same shell command to start the process from the top. + * Alternatively, use tags to run only the tasks that have that tag. See main.yaml to determine what you would like + to run. There is also a list of all the tags at the bottom of this page for reference. +* **Step 6: Bastion Configuration** + * Once the create_bastion task runs, it will pause the playbook to give you time to configure it. + * Use a web browser to open the cockpit by going to: "https://your-KVM-host-IP-address-here:9090" + * Click on the "Virtual Machines" tab, then click on bastion from the list, click on the black terminal + screen and press Enter. Complete its installation with these options enabled: + * server + * hardware monitoring utilities + * networking file system client + * remote management for linux + * headless mgmt + * system tools + * basic web server + * network servers + * Once you fill out all the required configuration settings, press "b" to begin installation. + * Once you see "bastion login", come back to the terminal to continue your run by pressing "ctrl+c" and then + "c". If there was a problem and you need to stop the playbook, press "ctrl+c" and then "a". + * Note: we plan to automate the installation configuration in the future. +* **Step 7: Starting Up Bootstrap and Control Nodes** + * The playbook will continue to run, preparing the bootstrap and control nodes. + * To monitor the nodes as they come up, watch them on the cockpit at: "https://your-KVM-host-IP-address-here:9090" + * Click on the "Virtual Machines" tab and then click on the VM you want to monitor. Click on the black + terminal screen and press Enter. + * Once you see "node-name login" prompt come back to the terminal where you ran Ansible and press "ctrl+c" and + then "c" to continue running the playbook. + * If you encounter an error that does not resolve with time, press "ctrl+c" and then "a" to stop the process and debug. +* **Step 8: Bootkube Verification** + * SSH into the bastion (run "ssh your-bastion-IP-address-here" in the terminal) + * From there, change to root user (run "su root") and type in the root password that you set during configuration + * Then SSH into the bootstrap as core ("ssh core@your-bootstrap-IP-address-here") + * Run "journalctl -u bootkube.service" to watch the bootstrap connect to the control nodes (hold spacebar to + get to the bottom of the log). Press "q" to exit the log. + * Expect lots of errors, as the control nodes may not be entirely up yet. + * This may take some time, 30 minutes or more. Check in occassionally by running "journalctl -u bootkube.service" again + to update the log. Remember to hold the spacebar to go to the bottom, press "q" to quit. + * Once all control nodes are connected, the bootkube log will read "bootkube.service complete". +* **Step 9: Starting Up Compute Nodes** + * Repeat Step 7 with the Compute nodes. + * Monitor their status at the cockpit, found at "https://your-KVM-host-IP-address:9090" + * They are ready once their terminal screen shows a login prompt + * Once all your compute nodes are up and running, and bootkube is complete, you are ready for cluster verification + +### Verification +* **Step 10: Export Kube Config** + * SSH into the bastion (run "ssh your-bastion-IP-address-here") + * Change to root user (run "su root") and type in your password from when you configured the bastion. + * Then run "export KUBECONFIG=/ocpinst/auth/kubeconfig" + * Check that worked by running "oc whoami", which should return "system:admin" +* **Step 11: Approve Certificates** + * Fromm the bastion, running as root user (as above) run "oc get csr". This will bring up a list of certificates that need approval. + * To approve all certificates at the same time, run the following command: + "for i in `oc get csr --no-headers | grep -i pending | awk '{ print $1 }'`; do oc adm certificate approve $i; done" + * It may take some time for all the certificates that need approval to show up. Keep running "oc get csr" to check to make sure that + no new certificates have appeared since you last approved them. + * Once all certificates read "Approved, Issued". You're ready for the next step. +* **Step 11: Wait for Cluster To Become Operational** + * From the bastion, as root user (as above) check node status by running: "oc get nodes". All nodes need to be "Ready" in the "Status" column. + * From the bastion, as root user (as above) run "oc get clusteroperators". All cluster operators need to be "True" in the "Available" column. + * It may take hours, especially the cluster operators. Run the above two bullets' commmands to check in occasionally. + * Once all nodes are ready and cluster operators are available, you are ready to continue to the next step. +* **Step 12: Verify OpenShift Installation** + * From the bastion as root user (as above), run: "./openshift-install --dir=/ocpinst wait-for install-complete" + * If installation is ready, running the above command will give you some information about how to log into the OpenShift cluster's dashboard. + * Copy the provided URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. +* **Step 14: Celebrate!** + * Your OpenShift cluster provisioning and installation is now complete. * Optional: Leave the bootstrap running as is, shut it down and destroy it, or convert it into a compute node. ## Teardown: -* If you would like to teardown your VMs, run "ansible-playbook teardown.yaml --ask-become-pass --tags "partial/full". Choose either the partial or full tag. -* If you have provisioned more than the minimum number of nodes for your installation, add them to the respective list found in roles/teardown_vms/tasks/main.yaml. -* Use the "full" tag to teardown all VMs running on your KVM host. Once you run the full teardown, to start the main.yaml playbook back from that point, run with tags "bastionvm,bastion,create_nodes". -* Use the "partial" tag to teardown to the point where nothing except the bastion is running on your KVM host. Once you run the partial teardown, to start the main.yaml playbook back from that point, run with tags "bastion,create_nodes". + +* If you would like to teardown your VMs, first determine whether you would like to do a full or partial teardown. +* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags "full" +* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags "partial" +* If you have provisioned more than the minimum number of nodes for your installation, add them to the + respective list found in roles/teardown_vms/tasks/main.yaml. +* Once you run the full teardown, to start the main.yaml playbook back from that point, run: + "run ansible-playbook main.yaml --ask-become-pass --tags "bastionvm,bastion,create_nodes" +* Once you run the partial teardown, to start the main.yaml playbook back from that point, run main.yaml with the tags "bastion,create_nodes". ## Tags: diff --git a/ansible.cfg b/ansible.cfg index bf674386..008886a0 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,6 +1,7 @@ [defaults] inventory=inventory private_key_file=~/.ssh/ansible +vault_password_file = ~/.vault_pass.txt [inventory] cache=True private_key_file = ~/.ssh/ansible diff --git a/ansible-setup-linux.sh b/files/shell_scripts/ansible-setup-linux.sh similarity index 100% rename from ansible-setup-linux.sh rename to files/shell_scripts/ansible-setup-linux.sh diff --git a/ansible-setup-mac.sh b/files/shell_scripts/ansible-setup-mac.sh similarity index 100% rename from ansible-setup-mac.sh rename to files/shell_scripts/ansible-setup-mac.sh diff --git a/files/shell_scripts/create_compute.sh b/files/shell_scripts/create_compute.sh deleted file mode 100644 index e39cb1bf..00000000 --- a/files/shell_scripts/create_compute.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-0.qcow2 100G -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/compute-1.qcow2 100G -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> diff --git a/files/shell_scripts/create_control.sh b/files/shell_scripts/create_control.sh deleted file mode 100644 index 12a8536b..00000000 --- a/files/shell_scripts/create_control.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-0.qcow2 100G -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-1.qcow2 100G -qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/control-2.qcow2 100G - -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> -virt-install --boot kernel=rhcos-kernel,initrd=rhcos-initramfs.img,kernel_args='rd.neednet=1 coreos.inst.install_dev=/dev/vda coreos.live.rootfs_url=http://9.60.> diff --git a/files/shell_scripts/create_http.sh b/files/shell_scripts/create_http.sh deleted file mode 100644 index 8650a27f..00000000 --- a/files/shell_scripts/create_http.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!bin/bash - -##install HTTP -dnf install -y httpd - -##make folders -mkdir /var/www/html/bin /var/www/html/bootstrap - -##get mirror 1 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-kernel-s390x-O /var/www/html/bin/rhcos-kernel - -##get mirror 2 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-initramfs.s390x.img -O /var/www/html/bin/rhcos-initramfs.img - -##get mirror 3 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-4.7.7-s390x-live-rootfs.s390x.img -O rhcos-rootfs.img - -##enable http -systemctl enable --now httpd; systemctl status httpd diff --git a/files/shell_scripts/get_ocp_installer.sh b/files/shell_scripts/get_ocp_installer.sh deleted file mode 100644 index a19a0abb..00000000 --- a/files/shell_scripts/get_ocp_installer.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!bin/bash - -##get and extract mirror 1 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/latest/openshift-client-linux.tar.gz -tar -xvzf openshift-client-linux.tar.gz - -##get and extract mirror 2 -wget https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/latest/openshift-install-linux.tar.gz -tar -xvzf openshift-client-linux.tar.gz - -##Make executable -chmod +x kubectl oc openshift_install - -##move installed to bin folder -mv kubectl oc openshift_install /usr/local/bin/ diff --git a/files/shell_scripts/macvtap-net.sh b/files/shell_scripts/macvtap-net.sh deleted file mode 100644 index 1d51d565..00000000 --- a/files/shell_scripts/macvtap-net.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!bin/bash -virsh net-create macvtap.xml -virsh net-start --network macvtap-net -virsh net-autostart --network macvtap-net -virsh net-list --all diff --git a/files/shell_scripts/prep_kvm_guests.sh b/files/shell_scripts/prep_kvm_guests.sh deleted file mode 100644 index 50b36294..00000000 --- a/files/shell_scripts/prep_kvm_guests.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!bin/bash -wget https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz -dnf install -y gzip -gunzip rhcos-qemu.s390x.qcow2.gz /var/lib/libvirt/images/ diff --git a/files/shell_scripts/start_libvirtd.sh b/files/shell_scripts/start_libvirtd.sh deleted file mode 100644 index 332d1b33..00000000 --- a/files/shell_scripts/start_libvirtd.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -systemctl enable --now libvirtd -systemctl status libvirtd.service -systemctl status libvirtd diff --git a/files/shell_scripts/verify_bootstrap.sh b/files/shell_scripts/verify_bootstrap.sh deleted file mode 100644 index fcb0845d..00000000 --- a/files/shell_scripts/verify_bootstrap.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!bin/bash -virsh console bootstrap -journalctl -u bootkube.service diff --git a/main.yaml b/main.yaml index 0d0b5d5e..fadc4c7d 100644 --- a/main.yaml +++ b/main.yaml @@ -9,6 +9,7 @@ - env.yaml vars: - ssh_target_ip: "{{ env_ip_kvm_host }}" + roles: - ansible_setup - ssh_key_gen diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 89dc1d1d..b2ac21e5 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -37,46 +37,6 @@ - dns_nameserver - default_gateway - netmask - -- name: Set facts from env.yaml so they can be used in other playbooks - tags: setup - set_fact: - env_baseDomain: "{{ env_baseDomain }}" - env_compute_arch: "{{ env_compute_arch }}" - env_control_count: "{{ env_control_count }}" - env_control_arch: "{{ env_control_arch }}" - env_metadata_name: "{{ env_metadata_name }}" - env_cidr: "{{ env_cidr }}" - env_host_prefix: "{{ env_host_prefix }}" - env_network_type: "{{ env_network_type }}" - env_service_network: "{{ env_service_network }}" - env_fips: "{{ env_fips }}" - env_pullSecret: "{{ env_pullSecret }}" - env_ip_kvm_host: "{{ env_ip_kvm_host }}" - env_ip_bastion: "{{ env_ip_bastion }}" - env_ip_bootstrap: "{{ env_ip_bootstrap }}" - env_ip_control_0: "{{ env_ip_control_0 }}" - env_ip_control_1: "{{ env_ip_control_1 }}" - env_ip_control_2: "{{ env_ip_control_2 }}" - env_ip_compute_0: "{{ env_ip_compute_0 }}" - env_ip_compute_1: "{{ env_ip_compute_1 }}" - env_ssh_username: "{{ env_ssh_username }}" - env_ssh_pass: "{{ env_ssh_pass }}" - env_ssh_ans_name: "{{ env_ssh_ans_name }}" - env_ssh_ans_pass: "{{ env_ssh_ans_pass }}" - env_ssh_ocp_comm: "{{ env_ssh_ocp_comm }}" - dns_nameserver: "{{ dns_nameserver }}" - default_gateway: "{{ default_gateway }}" - netmask: "{{ netmask }}" - cacheable: yes - -- name: Add the contents of user-input variables from env.yaml to group_vars/all folder to persist across hosts - tags: setup - blockinfile: - path: group_vars/all/main.yaml - block: "{{ lookup('file', 'env.yaml') }}" - state: present - backup: yes - name: Populate inventory file with ip variables from env.yaml tags: setup @@ -115,7 +75,7 @@ path: ansible.cfg regexp: '^private_key_file=' line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} - + - name: Ansible generic setup to re-read inventory file after populated in previous tasks tags: setup ansible.builtin.gather_facts: \ No newline at end of file diff --git a/setup-linux.yaml b/setup-linux.yaml new file mode 100644 index 00000000..70713486 --- /dev/null +++ b/setup-linux.yaml @@ -0,0 +1,7 @@ +--- + +- hosts: localhost + tags: localhost, prep + connection: local + become: false + gather_facts: no \ No newline at end of file diff --git a/setup-mac.yaml b/setup-mac.yaml new file mode 100644 index 00000000..3fa5c33c --- /dev/null +++ b/setup-mac.yaml @@ -0,0 +1,17 @@ +--- + +- hosts: localhost + tags: localhost, prep + connection: local + become: false + gather_facts: no + vars_prompt: + - name: vault_pass + prompt: Please provide a secure password to be used for encrypting your sensitive files in Ansible + private: yes + unsafe: yes # this just means you can use special characters. The password is safe. + + - tasks: + - name: install Ansible dependencies and packages + shell: ansible-setup-mac.sh + From a19e226952698a5d0c8aeba2baee8ed956239efd Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 11:08:21 -0500 Subject: [PATCH 395/885] Updated readme to fix formatting issue --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b38aa6c6..56090c8e 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,7 @@ * **Step 11: Approve Certificates** * Fromm the bastion, running as root user (as above) run "oc get csr". This will bring up a list of certificates that need approval. * To approve all certificates at the same time, run the following command: - "for i in `oc get csr --no-headers | grep -i pending | awk '{ print $1 }'`; do oc adm certificate approve $i; done" + "for i in \`oc get csr --no-headers | grep -i pending | awk '{ print $1 }\'`; do oc adm certificate approve $i; done" * It may take some time for all the certificates that need approval to show up. Keep running "oc get csr" to check to make sure that no new certificates have appeared since you last approved them. * Once all certificates read "Approved, Issued". You're ready for the next step. From 9dc1b9ae0d7276f3b617608434bf73b083cbd864 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 11:08:21 -0500 Subject: [PATCH 396/885] Updated readme to fix formatting issue --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b38aa6c6..56090c8e 100644 --- a/README.md +++ b/README.md @@ -114,7 +114,7 @@ * **Step 11: Approve Certificates** * Fromm the bastion, running as root user (as above) run "oc get csr". This will bring up a list of certificates that need approval. * To approve all certificates at the same time, run the following command: - "for i in `oc get csr --no-headers | grep -i pending | awk '{ print $1 }'`; do oc adm certificate approve $i; done" + "for i in \`oc get csr --no-headers | grep -i pending | awk '{ print $1 }\'`; do oc adm certificate approve $i; done" * It may take some time for all the certificates that need approval to show up. Keep running "oc get csr" to check to make sure that no new certificates have appeared since you last approved them. * Once all certificates read "Approved, Issued". You're ready for the next step. From fe1dddab4454cddb2a1a429f9b84198ace0310e8 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 11:21:35 -0500 Subject: [PATCH 397/885] Alphabetized tags and fixed more formatting issues in README. --- README.md | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 56090c8e..b7e7e22b 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ * Navigate to a folder where you would like to store this project in your terminal * Run "git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git" * **Step 2: Get OpenShift Information** - * In a web browser, navigate to https://console.redhat.com/openshift/install/ibmz/user-provisioned in order to: + * In a web browser, navigate to https://console.redhat.com/openshift/install/ibmz/user-provisioned * Download your local command line tools (oc and kubectl) * Copy the OpenShift pull secret (for use in the next step) * **Step 2: Set Variables** @@ -54,7 +54,8 @@ * **Step 4: Setup Script** * Navigate to the folder where you saved the Git Repository * Depending on which operating system you are using on your local workstation, run either - "ansible-playbook setup-mac.yaml --ask-become-pass" or "ansible-playbook setup-linux.yaml --ask-become-pass" + "ansible-playbook setup-mac.yaml --ask-become-pass" if your local workstation is a Mac, or + "ansible-playbook setup-linux.yaml --ask-become-pass" if you are using Linx ### Provisioning * **Step 5: Running the Main Playbook** @@ -135,8 +136,8 @@ ## Teardown: * If you would like to teardown your VMs, first determine whether you would like to do a full or partial teardown. -* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags "full" -* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags "partial" +* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full +* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial * If you have provisioned more than the minimum number of nodes for your installation, add them to the respective list found in roles/teardown_vms/tasks/main.yaml. * Once you run the full teardown, to start the main.yaml playbook back from that point, run: @@ -145,25 +146,25 @@ ## Tags: -* setup = first-time setup of ansible -* prep = run all setup playbooks -* pkg = install and update all packages * bastion = configuration of bastion for OCP -* keymastr = ssh key configuration and testing * bastionvm = creation of Bastion KVM guest * boostrap = creation of Boostrap KVM guest * compute = creation of the Compute nodes KVM guests * control = creation of the Control nodes KVM guests -* ssh-copy-id = for copying ssh id -* dns = configuration of dns server on bastion +* create_nodes = tasks from the second set of kvm plays +* dns = configuration of DNS server on bastion +* firewall = for tasks related to firewall settings +* full = for use with teardown.yaml to bring down all VMs * getocp = download of OCP installer and http server configuration * haproxy = configuration of haproxy on bastion kvm guest * httpconf = configuration of httpd server on bastion kvm guest +* keymastr = ssh key configuration and testing * kvm_host = tasks to apply to KVM host for OCP cluster * kvm_prep = tasks from the first set of kvm plays -* create_nodes = tasks from the second set of kvm plays * localhost = for tasks that apply to the local machine running Ansible -* firewall = for tasks related to firewall settings -* selinux = for tasks related to SELinux settings * partial = for use with teardown.yaml to bring down VMs except bastion -* full = for use with teardown.yaml to bring down all VMs +* pkg = install and update all packages +* prep = run all setup playbooks +* selinux = for tasks related to SELinux settings +* setup = first-time setup of ansible +* ssh-copy-id = for copying ssh id \ No newline at end of file From 02239ddcef85042f807e46a1a105f1a8c3725e0e Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 11:21:35 -0500 Subject: [PATCH 398/885] Alphabetized tags and fixed more formatting issues in README. --- README.md | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 56090c8e..b7e7e22b 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ * Navigate to a folder where you would like to store this project in your terminal * Run "git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git" * **Step 2: Get OpenShift Information** - * In a web browser, navigate to https://console.redhat.com/openshift/install/ibmz/user-provisioned in order to: + * In a web browser, navigate to https://console.redhat.com/openshift/install/ibmz/user-provisioned * Download your local command line tools (oc and kubectl) * Copy the OpenShift pull secret (for use in the next step) * **Step 2: Set Variables** @@ -54,7 +54,8 @@ * **Step 4: Setup Script** * Navigate to the folder where you saved the Git Repository * Depending on which operating system you are using on your local workstation, run either - "ansible-playbook setup-mac.yaml --ask-become-pass" or "ansible-playbook setup-linux.yaml --ask-become-pass" + "ansible-playbook setup-mac.yaml --ask-become-pass" if your local workstation is a Mac, or + "ansible-playbook setup-linux.yaml --ask-become-pass" if you are using Linx ### Provisioning * **Step 5: Running the Main Playbook** @@ -135,8 +136,8 @@ ## Teardown: * If you would like to teardown your VMs, first determine whether you would like to do a full or partial teardown. -* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags "full" -* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags "partial" +* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full +* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial * If you have provisioned more than the minimum number of nodes for your installation, add them to the respective list found in roles/teardown_vms/tasks/main.yaml. * Once you run the full teardown, to start the main.yaml playbook back from that point, run: @@ -145,25 +146,25 @@ ## Tags: -* setup = first-time setup of ansible -* prep = run all setup playbooks -* pkg = install and update all packages * bastion = configuration of bastion for OCP -* keymastr = ssh key configuration and testing * bastionvm = creation of Bastion KVM guest * boostrap = creation of Boostrap KVM guest * compute = creation of the Compute nodes KVM guests * control = creation of the Control nodes KVM guests -* ssh-copy-id = for copying ssh id -* dns = configuration of dns server on bastion +* create_nodes = tasks from the second set of kvm plays +* dns = configuration of DNS server on bastion +* firewall = for tasks related to firewall settings +* full = for use with teardown.yaml to bring down all VMs * getocp = download of OCP installer and http server configuration * haproxy = configuration of haproxy on bastion kvm guest * httpconf = configuration of httpd server on bastion kvm guest +* keymastr = ssh key configuration and testing * kvm_host = tasks to apply to KVM host for OCP cluster * kvm_prep = tasks from the first set of kvm plays -* create_nodes = tasks from the second set of kvm plays * localhost = for tasks that apply to the local machine running Ansible -* firewall = for tasks related to firewall settings -* selinux = for tasks related to SELinux settings * partial = for use with teardown.yaml to bring down VMs except bastion -* full = for use with teardown.yaml to bring down all VMs +* pkg = install and update all packages +* prep = run all setup playbooks +* selinux = for tasks related to SELinux settings +* setup = first-time setup of ansible +* ssh-copy-id = for copying ssh id \ No newline at end of file From 5f61592109fc28a27a9d1858427baf402a83915e Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 16:28:17 -0500 Subject: [PATCH 399/885] Added encryption to secure sensitive data in env.yaml. Added files to .gitignore. Created a unified setup playbook. Updated README. --- .gitignore | 2 -- README.md | 13 +++++---- ansible.cfg | 2 +- env.yaml | 12 ++++---- group_vars/all/main.yaml | 44 ----------------------------- main.yaml | 16 +++++------ roles/ansible_setup/tasks/main.yaml | 7 ++++- setup-linux.yaml | 7 ----- setup-mac.yaml | 17 ----------- setup.yaml | 42 +++++++++++++++++++++++++++ 10 files changed, 69 insertions(+), 93 deletions(-) delete mode 100644 group_vars/all/main.yaml delete mode 100644 setup-linux.yaml delete mode 100644 setup-mac.yaml create mode 100644 setup.yaml diff --git a/.gitignore b/.gitignore index bd25db43..fc60cbbb 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,4 @@ .DS_Store .iso roles/get-ocp/files/ocp_ssh_pub -env.yaml -group_vars/all/main.yaml* .vault_pass.txt \ No newline at end of file diff --git a/README.md b/README.md index b7e7e22b..3079d63a 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ * The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. -* This README file gives an extremely detailed step-by-step instruction for you to use as a reference. It assumes near zero experience. +* This README file gives extremely detailed step-by-step instructions for you to use as a reference. It assumes near zero experience. ## Supported Operating Systems (for local workstation): @@ -13,6 +13,7 @@ ## Pre-requisites: +* Red Hat Enterprise Linux (RHEL) license * Python3 intalled on your local computer (how-to: https://realpython.com/installing-python/) * Ansible installed on your local computer (how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) * If you are using Mac OS X for your localhost workstation to run Ansible, you also need to have: @@ -53,9 +54,10 @@ your networking team, and place them in the roles/dns/files folder. * **Step 4: Setup Script** * Navigate to the folder where you saved the Git Repository - * Depending on which operating system you are using on your local workstation, run either - "ansible-playbook setup-mac.yaml --ask-become-pass" if your local workstation is a Mac, or - "ansible-playbook setup-linux.yaml --ask-become-pass" if you are using Linx + * Run "ansible-playbook setup.yaml --ask-become-pass" + * When the setup playbook starts, it will prompt you for a password to use for encrypting Ansible vault files + * No files are encrypted until you run the main playbook in step 5 below + * If you would like to decrypt a file protected by Ansible vault, run: "ansible-vault decrypt file-name-here" ### Provisioning * **Step 5: Running the Main Playbook** @@ -125,7 +127,8 @@ * It may take hours, especially the cluster operators. Run the above two bullets' commmands to check in occasionally. * Once all nodes are ready and cluster operators are available, you are ready to continue to the next step. * **Step 12: Verify OpenShift Installation** - * From the bastion as root user (as above), run: "./openshift-install --dir=/ocpinst wait-for install-complete" + * From the bastion as root user (as above), navigate to /ocpinst ("cd /ocpinst") + * Run "./openshift-install --dir=/ocpinst wait-for install-complete" * If installation is ready, running the above command will give you some information about how to log into the OpenShift cluster's dashboard. * Copy the provided URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. * **Step 14: Celebrate!** diff --git a/ansible.cfg b/ansible.cfg index 008886a0..a0a3609c 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,7 +1,7 @@ [defaults] inventory=inventory private_key_file=~/.ssh/ansible -vault_password_file = ~/.vault_pass.txt +vault_password_file = .vault_pass.txt [inventory] cache=True private_key_file = ~/.ssh/ansible diff --git a/env.yaml b/env.yaml index a139e7df..3a51c4a5 100644 --- a/env.yaml +++ b/env.yaml @@ -13,8 +13,6 @@ env_fips: "false" # "true" or "false" (include quotes) env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"registry.connect.redhat.com":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"},"registry.redhat.io":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"}}}' #paste it into these single quotes here - - # to fill inventory env_ip_kvm_host: 9.60.87.132 env_ip_bastion: 9.60.87.139 @@ -25,15 +23,15 @@ env_ip_control_2: 9.60.87.138 env_ip_compute_0: 9.60.87.134 env_ip_compute_1: 9.60.87.135 -# ssh -env_ssh_username: jacob #Username to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. -env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. +# SSH +env_ssh_username: jacob #Username to use for SSH into KVM and host bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. +env_ssh_pass: ibmzrocks #Password to use for SSH into KVM host and bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. -# Ansible passwordless ssh +# Ansible passwordless SSH env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) -# OpenShift cluster's ssh key comment +# OpenShift cluster's SSH key comment env_ssh_ocp_comm: "ocpz_distribution" # networking diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml deleted file mode 100644 index 3f97f6a6..00000000 --- a/group_vars/all/main.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# BEGIN ANSIBLE MANAGED BLOCK - -# to populate install_config -env_baseDomain: ocpz.wsclab.endicott.ibm.com -env_compute_arch: s390x #default to s390x -env_control_count: 3 #default 3 -env_control_arch: s390x #default s390x -env_metadata_name: distribution -env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now -env_host_prefix: 23 #default 23 for now -env_network_type: OpenShiftSDN #set default OpenShiftSDN -env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 -env_fips: "false" # "true" or "false" (include quotes) - -env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"registry.connect.redhat.com":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"},"registry.redhat.io":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"}}}' #paste it into these single quotes here - - - -# to fill inventory -env_ip_kvm_host: 9.60.87.132 -env_ip_bastion: 9.60.87.139 -env_ip_bootstrap: 9.60.87.133 -env_ip_control_0: 9.60.87.136 -env_ip_control_1: 9.60.87.137 -env_ip_control_2: 9.60.87.138 -env_ip_compute_0: 9.60.87.134 -env_ip_compute_1: 9.60.87.135 - -# ssh -env_ssh_username: jacob #Username to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. -env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. - -# Ansible passwordless ssh -env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible -env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) - -# OpenShift cluster's ssh key comment -env_ssh_ocp_comm: "ocpz_distribution" - -# networking -dns_nameserver: 9.60.87.139 -default_gateway: 9.60.86.1 -netmask: 255.255.254.0 -# END ANSIBLE MANAGED BLOCK diff --git a/main.yaml b/main.yaml index fadc4c7d..b8995055 100644 --- a/main.yaml +++ b/main.yaml @@ -9,7 +9,6 @@ - env.yaml vars: - ssh_target_ip: "{{ env_ip_kvm_host }}" - roles: - ansible_setup - ssh_key_gen @@ -24,7 +23,7 @@ - packages: [ 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img', 'libvirt' ] roles: - check_ssh - #- install_packages + - install_packages - set_selinux_permissive - enable_packages - macvtap @@ -50,7 +49,6 @@ - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - check_ssh - #- install_ansible - install_packages - ssh-ocp-key-gen # SSH key for bastion to connect to nodes - set_selinux_permissive @@ -83,12 +81,12 @@ #roles: #- wait_for_bootstrap -- hosts: bastion - tags: bastion,cluster - become: true - gather_facts: no - roles: - - connect_cluster +#- hosts: bastion + #tags: bastion,cluster + #become: true + #gather_facts: no + #roles: + #- connect_cluster #- hosts: bastion #become: true diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index b2ac21e5..29ab132d 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -69,12 +69,17 @@ register: inv_check failed_when: inv_check.rc != 0 -- name: fill ansible.cfg with given variable name for ansible passwordless ssh setup +# .vault_pass.txt is in the .gitignore file, and will therefore not be uploaded to Git should you do a git push. +- name: fill ansible.cfg with provided variable ansible ssh key file name tags: setup ansible.builtin.lineinfile: path: ansible.cfg regexp: '^private_key_file=' line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} + +- name: encrypt env.yaml and .vault_pass.txt + tags: setup + shell: ansible-vault encrypt env.yaml - name: Ansible generic setup to re-read inventory file after populated in previous tasks tags: setup diff --git a/setup-linux.yaml b/setup-linux.yaml deleted file mode 100644 index 70713486..00000000 --- a/setup-linux.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - -- hosts: localhost - tags: localhost, prep - connection: local - become: false - gather_facts: no \ No newline at end of file diff --git a/setup-mac.yaml b/setup-mac.yaml deleted file mode 100644 index 3fa5c33c..00000000 --- a/setup-mac.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - -- hosts: localhost - tags: localhost, prep - connection: local - become: false - gather_facts: no - vars_prompt: - - name: vault_pass - prompt: Please provide a secure password to be used for encrypting your sensitive files in Ansible - private: yes - unsafe: yes # this just means you can use special characters. The password is safe. - - - tasks: - - name: install Ansible dependencies and packages - shell: ansible-setup-mac.sh - diff --git a/setup.yaml b/setup.yaml new file mode 100644 index 00000000..d1c038b0 --- /dev/null +++ b/setup.yaml @@ -0,0 +1,42 @@ +--- + +- hosts: localhost + tags: localhost, prep + connection: local + become: false + gather_facts: yes + vars_prompt: + - name: vault_pass + prompt: Please provide a secure password to be used for encrypting your sensitive files in Ansible + private: yes + unsafe: yes # this just means you can use special characters. The password is safe. + + tasks: + - name: check to see if .vault_pass.txt exists already + stat: + path: .vault_pass.txt + register: vault_pass_check + + - name: delete .vault_pass.txt if it exists already to ensure idempotence + file: + path: .vault_pass.txt + state: absent + when: vault_pass_check.stat.exists + + - name: fill .vault_pass.txt with user-provided password + lineinfile: + path: .vault_pass.txt + state: present + create: yes + line: "{{ vault_pass }}" + + - name: install Ansible dependencies and packages + shell: files/shell_scripts/ansible-setup-mac.sh + when: ansible_facts['os_family'] == "Darwin" + + - name: install Ansible dependencies and packages + shell: files/shell_scripts/ansible-setup-linux.sh + when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" + + + From 50e89c4c7a2107f181358b6e9f62acb096d50ce2 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 16:28:17 -0500 Subject: [PATCH 400/885] Added encryption to secure sensitive data in env.yaml. Added files to .gitignore. Created a unified setup playbook. Updated README. --- .gitignore | 2 -- README.md | 13 +++++---- ansible.cfg | 2 +- group_vars/all/main.yaml | 44 ----------------------------- main.yaml | 16 +++++------ roles/ansible_setup/tasks/main.yaml | 7 ++++- setup-linux.yaml | 7 ----- setup-mac.yaml | 17 ----------- setup.yaml | 42 +++++++++++++++++++++++++++ 9 files changed, 64 insertions(+), 86 deletions(-) delete mode 100644 group_vars/all/main.yaml delete mode 100644 setup-linux.yaml delete mode 100644 setup-mac.yaml create mode 100644 setup.yaml diff --git a/.gitignore b/.gitignore index bd25db43..fc60cbbb 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,4 @@ .DS_Store .iso roles/get-ocp/files/ocp_ssh_pub -env.yaml -group_vars/all/main.yaml* .vault_pass.txt \ No newline at end of file diff --git a/README.md b/README.md index b7e7e22b..3079d63a 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ * The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. -* This README file gives an extremely detailed step-by-step instruction for you to use as a reference. It assumes near zero experience. +* This README file gives extremely detailed step-by-step instructions for you to use as a reference. It assumes near zero experience. ## Supported Operating Systems (for local workstation): @@ -13,6 +13,7 @@ ## Pre-requisites: +* Red Hat Enterprise Linux (RHEL) license * Python3 intalled on your local computer (how-to: https://realpython.com/installing-python/) * Ansible installed on your local computer (how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) * If you are using Mac OS X for your localhost workstation to run Ansible, you also need to have: @@ -53,9 +54,10 @@ your networking team, and place them in the roles/dns/files folder. * **Step 4: Setup Script** * Navigate to the folder where you saved the Git Repository - * Depending on which operating system you are using on your local workstation, run either - "ansible-playbook setup-mac.yaml --ask-become-pass" if your local workstation is a Mac, or - "ansible-playbook setup-linux.yaml --ask-become-pass" if you are using Linx + * Run "ansible-playbook setup.yaml --ask-become-pass" + * When the setup playbook starts, it will prompt you for a password to use for encrypting Ansible vault files + * No files are encrypted until you run the main playbook in step 5 below + * If you would like to decrypt a file protected by Ansible vault, run: "ansible-vault decrypt file-name-here" ### Provisioning * **Step 5: Running the Main Playbook** @@ -125,7 +127,8 @@ * It may take hours, especially the cluster operators. Run the above two bullets' commmands to check in occasionally. * Once all nodes are ready and cluster operators are available, you are ready to continue to the next step. * **Step 12: Verify OpenShift Installation** - * From the bastion as root user (as above), run: "./openshift-install --dir=/ocpinst wait-for install-complete" + * From the bastion as root user (as above), navigate to /ocpinst ("cd /ocpinst") + * Run "./openshift-install --dir=/ocpinst wait-for install-complete" * If installation is ready, running the above command will give you some information about how to log into the OpenShift cluster's dashboard. * Copy the provided URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. * **Step 14: Celebrate!** diff --git a/ansible.cfg b/ansible.cfg index 008886a0..a0a3609c 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,7 +1,7 @@ [defaults] inventory=inventory private_key_file=~/.ssh/ansible -vault_password_file = ~/.vault_pass.txt +vault_password_file = .vault_pass.txt [inventory] cache=True private_key_file = ~/.ssh/ansible diff --git a/group_vars/all/main.yaml b/group_vars/all/main.yaml deleted file mode 100644 index 3f97f6a6..00000000 --- a/group_vars/all/main.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# BEGIN ANSIBLE MANAGED BLOCK - -# to populate install_config -env_baseDomain: ocpz.wsclab.endicott.ibm.com -env_compute_arch: s390x #default to s390x -env_control_count: 3 #default 3 -env_control_arch: s390x #default s390x -env_metadata_name: distribution -env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now -env_host_prefix: 23 #default 23 for now -env_network_type: OpenShiftSDN #set default OpenShiftSDN -env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 -env_fips: "false" # "true" or "false" (include quotes) - -env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"registry.connect.redhat.com":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"},"registry.redhat.io":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"}}}' #paste it into these single quotes here - - - -# to fill inventory -env_ip_kvm_host: 9.60.87.132 -env_ip_bastion: 9.60.87.139 -env_ip_bootstrap: 9.60.87.133 -env_ip_control_0: 9.60.87.136 -env_ip_control_1: 9.60.87.137 -env_ip_control_2: 9.60.87.138 -env_ip_compute_0: 9.60.87.134 -env_ip_compute_1: 9.60.87.135 - -# ssh -env_ssh_username: jacob #Username to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. -env_ssh_pass: ibmzrocks #Password to use for ssh into kvm and bastion for first-time set-up of ansible passwordless ssh. Assumes same for both kvm and bastion. - -# Ansible passwordless ssh -env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible -env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) - -# OpenShift cluster's ssh key comment -env_ssh_ocp_comm: "ocpz_distribution" - -# networking -dns_nameserver: 9.60.87.139 -default_gateway: 9.60.86.1 -netmask: 255.255.254.0 -# END ANSIBLE MANAGED BLOCK diff --git a/main.yaml b/main.yaml index fadc4c7d..b8995055 100644 --- a/main.yaml +++ b/main.yaml @@ -9,7 +9,6 @@ - env.yaml vars: - ssh_target_ip: "{{ env_ip_kvm_host }}" - roles: - ansible_setup - ssh_key_gen @@ -24,7 +23,7 @@ - packages: [ 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img', 'libvirt' ] roles: - check_ssh - #- install_packages + - install_packages - set_selinux_permissive - enable_packages - macvtap @@ -50,7 +49,6 @@ - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] roles: - check_ssh - #- install_ansible - install_packages - ssh-ocp-key-gen # SSH key for bastion to connect to nodes - set_selinux_permissive @@ -83,12 +81,12 @@ #roles: #- wait_for_bootstrap -- hosts: bastion - tags: bastion,cluster - become: true - gather_facts: no - roles: - - connect_cluster +#- hosts: bastion + #tags: bastion,cluster + #become: true + #gather_facts: no + #roles: + #- connect_cluster #- hosts: bastion #become: true diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index b2ac21e5..29ab132d 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -69,12 +69,17 @@ register: inv_check failed_when: inv_check.rc != 0 -- name: fill ansible.cfg with given variable name for ansible passwordless ssh setup +# .vault_pass.txt is in the .gitignore file, and will therefore not be uploaded to Git should you do a git push. +- name: fill ansible.cfg with provided variable ansible ssh key file name tags: setup ansible.builtin.lineinfile: path: ansible.cfg regexp: '^private_key_file=' line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} + +- name: encrypt env.yaml and .vault_pass.txt + tags: setup + shell: ansible-vault encrypt env.yaml - name: Ansible generic setup to re-read inventory file after populated in previous tasks tags: setup diff --git a/setup-linux.yaml b/setup-linux.yaml deleted file mode 100644 index 70713486..00000000 --- a/setup-linux.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - -- hosts: localhost - tags: localhost, prep - connection: local - become: false - gather_facts: no \ No newline at end of file diff --git a/setup-mac.yaml b/setup-mac.yaml deleted file mode 100644 index 3fa5c33c..00000000 --- a/setup-mac.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - -- hosts: localhost - tags: localhost, prep - connection: local - become: false - gather_facts: no - vars_prompt: - - name: vault_pass - prompt: Please provide a secure password to be used for encrypting your sensitive files in Ansible - private: yes - unsafe: yes # this just means you can use special characters. The password is safe. - - - tasks: - - name: install Ansible dependencies and packages - shell: ansible-setup-mac.sh - diff --git a/setup.yaml b/setup.yaml new file mode 100644 index 00000000..d1c038b0 --- /dev/null +++ b/setup.yaml @@ -0,0 +1,42 @@ +--- + +- hosts: localhost + tags: localhost, prep + connection: local + become: false + gather_facts: yes + vars_prompt: + - name: vault_pass + prompt: Please provide a secure password to be used for encrypting your sensitive files in Ansible + private: yes + unsafe: yes # this just means you can use special characters. The password is safe. + + tasks: + - name: check to see if .vault_pass.txt exists already + stat: + path: .vault_pass.txt + register: vault_pass_check + + - name: delete .vault_pass.txt if it exists already to ensure idempotence + file: + path: .vault_pass.txt + state: absent + when: vault_pass_check.stat.exists + + - name: fill .vault_pass.txt with user-provided password + lineinfile: + path: .vault_pass.txt + state: present + create: yes + line: "{{ vault_pass }}" + + - name: install Ansible dependencies and packages + shell: files/shell_scripts/ansible-setup-mac.sh + when: ansible_facts['os_family'] == "Darwin" + + - name: install Ansible dependencies and packages + shell: files/shell_scripts/ansible-setup-linux.sh + when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" + + + From 6fd4497e8e586430639172183370c80d7ef4d750 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 16:48:37 -0500 Subject: [PATCH 401/885] variablized dns main.yaml and updated the README --- README.md | 10 ++++++---- roles/dns/tasks/main.yaml | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 3079d63a..14a3bef4 100644 --- a/README.md +++ b/README.md @@ -50,8 +50,10 @@ * In a text editor of your choice, open env.yaml, found in the main directory of this repository * Fill out all of the required variables for your specific installation * **Step 3: DNS Configuration** - * Get DNS configuration files (forward (.db), reverse (.rev), and named.conf), or have them pre-defined by - your networking team, and place them in the roles/dns/files folder. + * Get DNS configuration files (forward (.db), reverse (.rev), and named.conf), or have them pre-defined by your networking team. + * Place them in the roles/dns/files folder + * Please leave the named.conf the same name. + * Rename the .db and .rev files with the same name you set for "env_metadata_name" in env.yaml (i.e. distribution.rev) * **Step 4: Setup Script** * Navigate to the folder where you saved the Git Repository * Run "ansible-playbook setup.yaml --ask-become-pass" @@ -130,7 +132,7 @@ * From the bastion as root user (as above), navigate to /ocpinst ("cd /ocpinst") * Run "./openshift-install --dir=/ocpinst wait-for install-complete" * If installation is ready, running the above command will give you some information about how to log into the OpenShift cluster's dashboard. - * Copy the provided URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. + * Copy the provided URL into a web browser and use "kubeadmin" as login and the provided password for first time sign-on. * **Step 14: Celebrate!** * Your OpenShift cluster provisioning and installation is now complete. @@ -147,7 +149,7 @@ "run ansible-playbook main.yaml --ask-become-pass --tags "bastionvm,bastion,create_nodes" * Once you run the partial teardown, to start the main.yaml playbook back from that point, run main.yaml with the tags "bastion,create_nodes". -## Tags: +## Tags (in alphabetical order): * bastion = configuration of bastion for OCP * bastionvm = creation of Bastion KVM guest diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index e3dadf2c..164fe609 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -25,7 +25,7 @@ - name: Copy DNS .db file to bastion tags: dns,bastion ansible.builtin.copy: - src: distribution.db + src: "{{ env_metadata_name }}.db" dest: /var/named owner: named group: named @@ -35,7 +35,7 @@ - name: Copy DNS .rev file to bastion tags: dns,bastion ansible.builtin.copy: - src: distribution.rev + src: "{{ env_metadata_name }}.rev" dest: /var/named owner: named group: named From 10dcfb021c8e5549463cf85ce6f80711019254ff Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 16:48:37 -0500 Subject: [PATCH 402/885] variablized dns main.yaml and updated the README --- README.md | 10 ++++++---- roles/dns/tasks/main.yaml | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 3079d63a..14a3bef4 100644 --- a/README.md +++ b/README.md @@ -50,8 +50,10 @@ * In a text editor of your choice, open env.yaml, found in the main directory of this repository * Fill out all of the required variables for your specific installation * **Step 3: DNS Configuration** - * Get DNS configuration files (forward (.db), reverse (.rev), and named.conf), or have them pre-defined by - your networking team, and place them in the roles/dns/files folder. + * Get DNS configuration files (forward (.db), reverse (.rev), and named.conf), or have them pre-defined by your networking team. + * Place them in the roles/dns/files folder + * Please leave the named.conf the same name. + * Rename the .db and .rev files with the same name you set for "env_metadata_name" in env.yaml (i.e. distribution.rev) * **Step 4: Setup Script** * Navigate to the folder where you saved the Git Repository * Run "ansible-playbook setup.yaml --ask-become-pass" @@ -130,7 +132,7 @@ * From the bastion as root user (as above), navigate to /ocpinst ("cd /ocpinst") * Run "./openshift-install --dir=/ocpinst wait-for install-complete" * If installation is ready, running the above command will give you some information about how to log into the OpenShift cluster's dashboard. - * Copy the provided URL into a web browser and use the provided "kubeadmin" login and password for first time sign-on. + * Copy the provided URL into a web browser and use "kubeadmin" as login and the provided password for first time sign-on. * **Step 14: Celebrate!** * Your OpenShift cluster provisioning and installation is now complete. @@ -147,7 +149,7 @@ "run ansible-playbook main.yaml --ask-become-pass --tags "bastionvm,bastion,create_nodes" * Once you run the partial teardown, to start the main.yaml playbook back from that point, run main.yaml with the tags "bastion,create_nodes". -## Tags: +## Tags (in alphabetical order): * bastion = configuration of bastion for OCP * bastionvm = creation of Bastion KVM guest diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index e3dadf2c..164fe609 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -25,7 +25,7 @@ - name: Copy DNS .db file to bastion tags: dns,bastion ansible.builtin.copy: - src: distribution.db + src: "{{ env_metadata_name }}.db" dest: /var/named owner: named group: named @@ -35,7 +35,7 @@ - name: Copy DNS .rev file to bastion tags: dns,bastion ansible.builtin.copy: - src: distribution.rev + src: "{{ env_metadata_name }}.rev" dest: /var/named owner: named group: named From c230e6d10a8d400643ff758408b68878a9c8ceff Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 16:54:47 -0500 Subject: [PATCH 403/885] Changed the name of networking variables in env.yaml to match others in that file. Deleted reference to encrypting .vault_pass.txt from ansible_setup because it was added to .gitignore instead --- env.yaml | 21 ++++++++++----------- roles/ansible_setup/tasks/main.yaml | 8 ++++---- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/env.yaml b/env.yaml index 3a51c4a5..b3711ac2 100644 --- a/env.yaml +++ b/env.yaml @@ -1,16 +1,15 @@ # to populate install_config env_baseDomain: ocpz.wsclab.endicott.ibm.com -env_compute_arch: s390x #default to s390x -env_control_count: 3 #default 3 -env_control_arch: s390x #default s390x +env_compute_arch: s390x +env_control_count: 3 +env_control_arch: s390x env_metadata_name: distribution -env_cidr: 10.128.0.0/14 #default 10.128.0.0/14 for now -env_host_prefix: 23 #default 23 for now -env_network_type: OpenShiftSDN #set default OpenShiftSDN -env_service_network: 172.30.0.0/16 #default 172.30.0.0/16 +env_cidr: 10.128.0.0/14 +env_host_prefix: 23 +env_network_type: OpenShiftSDN +env_service_network: 172.30.0.0/16 env_fips: "false" # "true" or "false" (include quotes) - env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"registry.connect.redhat.com":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"},"registry.redhat.io":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"}}}' #paste it into these single quotes here # to fill inventory @@ -35,6 +34,6 @@ env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. d env_ssh_ocp_comm: "ocpz_distribution" # networking -dns_nameserver: 9.60.87.139 -default_gateway: 9.60.86.1 -netmask: 255.255.254.0 \ No newline at end of file +env_dns_nameserver: 9.60.87.139 +env_default_gateway: 9.60.86.1 +env_netmask: 255.255.254.0 \ No newline at end of file diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 29ab132d..71610b5f 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -34,9 +34,9 @@ - env_ssh_ans_name - env_ssh_ans_pass - env_ssh_ocp_comm - - dns_nameserver - - default_gateway - - netmask + - env_dns_nameserver + - env_default_gateway + - env_netmask - name: Populate inventory file with ip variables from env.yaml tags: setup @@ -77,7 +77,7 @@ regexp: '^private_key_file=' line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} -- name: encrypt env.yaml and .vault_pass.txt +- name: encrypt env.yaml to protect sensitive data tags: setup shell: ansible-vault encrypt env.yaml From abe2fc1cdf87926062529dcb06aecf3a585ea13f Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 16:54:47 -0500 Subject: [PATCH 404/885] Changed the name of networking variables in env.yaml to match others in that file. Deleted reference to encrypting .vault_pass.txt from ansible_setup because it was added to .gitignore instead --- roles/ansible_setup/tasks/main.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 29ab132d..71610b5f 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -34,9 +34,9 @@ - env_ssh_ans_name - env_ssh_ans_pass - env_ssh_ocp_comm - - dns_nameserver - - default_gateway - - netmask + - env_dns_nameserver + - env_default_gateway + - env_netmask - name: Populate inventory file with ip variables from env.yaml tags: setup @@ -77,7 +77,7 @@ regexp: '^private_key_file=' line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} -- name: encrypt env.yaml and .vault_pass.txt +- name: encrypt env.yaml to protect sensitive data tags: setup shell: ansible-vault encrypt env.yaml From 42b4580ac1fbaa07e77d94928a68fcb6a56dc368 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 16:59:16 -0500 Subject: [PATCH 405/885] updated readme --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 14a3bef4..2d21bd8a 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ * system tools * On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses * Fully Qualified Domain Names (FQDN) names for all IPv4 addresses -* DNS configuration files (forward (.db), reverse (.rev), and named.conf). Note: we plan to automate this in the future. +* DNS configuration files (forward (.db), reverse (.rev), and named.conf). ## Installation Instructions: @@ -85,7 +85,6 @@ * Once you fill out all the required configuration settings, press "b" to begin installation. * Once you see "bastion login", come back to the terminal to continue your run by pressing "ctrl+c" and then "c". If there was a problem and you need to stop the playbook, press "ctrl+c" and then "a". - * Note: we plan to automate the installation configuration in the future. * **Step 7: Starting Up Bootstrap and Control Nodes** * The playbook will continue to run, preparing the bootstrap and control nodes. * To monitor the nodes as they come up, watch them on the cockpit at: "https://your-KVM-host-IP-address-here:9090" From 21ab6f9f53427e6475101b6401cc8a92a55197da Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 16:59:16 -0500 Subject: [PATCH 406/885] updated readme --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 14a3bef4..2d21bd8a 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ * system tools * On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses * Fully Qualified Domain Names (FQDN) names for all IPv4 addresses -* DNS configuration files (forward (.db), reverse (.rev), and named.conf). Note: we plan to automate this in the future. +* DNS configuration files (forward (.db), reverse (.rev), and named.conf). ## Installation Instructions: @@ -85,7 +85,6 @@ * Once you fill out all the required configuration settings, press "b" to begin installation. * Once you see "bastion login", come back to the terminal to continue your run by pressing "ctrl+c" and then "c". If there was a problem and you need to stop the playbook, press "ctrl+c" and then "a". - * Note: we plan to automate the installation configuration in the future. * **Step 7: Starting Up Bootstrap and Control Nodes** * The playbook will continue to run, preparing the bootstrap and control nodes. * To monitor the nodes as they come up, watch them on the cockpit at: "https://your-KVM-host-IP-address-here:9090" From 053c28d45f7e110d206c323e06065370e591db9f Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 17:37:15 -0500 Subject: [PATCH 407/885] variablized the create nodes shell scripts --- README.md | 2 +- env.yaml | 3 ++- main.yaml | 2 ++ roles/ansible_setup/tasks/main.yaml | 1 + roles/create_bastion/tasks/main.yaml | 8 ++++++-- roles/create_bootstrap/tasks/main.yaml | 6 +++++- roles/create_compute_nodes/tasks/main.yaml | 8 ++++++-- roles/create_control_nodes/tasks/main.yaml | 12 ++++++++---- roles/workstations | 1 - 9 files changed, 31 insertions(+), 12 deletions(-) delete mode 100644 roles/workstations diff --git a/README.md b/README.md index 2d21bd8a..1986263a 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ * The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. -* This README file gives extremely detailed step-by-step instructions for you to use as a reference. It assumes near zero experience. +* This README file gives extremely detailed step-by-step instructions for you to use as a reference. It assumes near zero experience with Ansible. ## Supported Operating Systems (for local workstation): diff --git a/env.yaml b/env.yaml index b3711ac2..20f3a2b4 100644 --- a/env.yaml +++ b/env.yaml @@ -36,4 +36,5 @@ env_ssh_ocp_comm: "ocpz_distribution" # networking env_dns_nameserver: 9.60.87.139 env_default_gateway: 9.60.86.1 -env_netmask: 255.255.254.0 \ No newline at end of file +env_netmask: 255.255.254.0 +env_ftp: 9.60.86.81 \ No newline at end of file diff --git a/main.yaml b/main.yaml index b8995055..7e78360c 100644 --- a/main.yaml +++ b/main.yaml @@ -62,6 +62,8 @@ tags: kvm_host,create_nodes become: true gather_facts: no + vars_files: + - env.yaml roles: - prep_kvm_guests - create_bootstrap diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 71610b5f..7a037c25 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -37,6 +37,7 @@ - env_dns_nameserver - env_default_gateway - env_netmask + - env_ftp - name: Populate inventory file with ip variables from env.yaml tags: setup diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index bd5e5bbb..07764748 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -22,6 +22,10 @@ # - name: virtualize bastion server # command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G +- name: Load in variables from env.yaml + tags: setup + include_vars: env.yaml + - name: check if bastion already exists tags: kvm_host, bastionvm community.libvirt.virt: @@ -37,10 +41,10 @@ - name: start bastion install tags: kvm_host, bastionvm - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip={{env_ip_bastion}}::{{env_default_gateway}}:{{env_netmask}}:bastion::none nameserver={{env_dns_nameserver}} inst.repo=http://{{env_ftp}}/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole when: bastion_check.failed == true -- name: README - Pausing for 60 minutes for you to complete the bastion installation of rhel OS with your specific installation's requirements. Please go to your kvm host at to complete installation. Once you see the login prompt on the bastion's terminal, come back here and press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. +- name: README - Pausing for 60 minutes for you to complete the bastion installation of rhel OS with your specific installation's requirements. Please go to your kvm host at https://your-kvm-host-ip-address-here:9090 to complete installation. Once you see the login prompt on the bastion's terminal, come back here and press "ctrl+c" and then "c" on your localhost terminal where you are seeing this message. tags: kvm_host, bastionvm pause: minutes: 60 diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index f4ec9532..5d026a17 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- name: Load in variables from env.yaml + tags: setup + include_vars: env.yaml + - name: check if bootstrap already exists tags: bootstrap community.libvirt.virt: @@ -21,7 +25,7 @@ --os-type linux --os-variant rhel8.0 --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.133::9.60.86.1:255.255.254.0:bootstrap:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign" + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_bootstrap}}::{{env_default_gateway}}:{{env_netmask}}:bootstrap:enc1:none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_dns_nameserver}}:8080/ignition/bootstrap.ign" --noautoconsole --wait=-1 when: bootstrap_check.failed == true diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index 14ef6a1e..8c7908b2 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- name: Load in variables from env.yaml + tags: setup + include_vars: env.yaml + - name: check if compute-0 already exists tags: compute community.libvirt.virt: @@ -34,7 +38,7 @@ --os-type linux --os-variant rhel8.0 --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.135::9.60.86.1:255.255.254.0:compute-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_compute_0}}::{{env_default_gateway}}:{{env_netmask}}:compute-0:enc1:none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/worker.ign" --noautoconsole --wait=-1 when: compute_0_check.failed == true @@ -52,7 +56,7 @@ --os-type linux --os-variant rhel8.0 --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.134::9.60.86.1:255.255.254.0:compute-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_compute_1}}::{{env_default_gateway}}:{{env_netmask}}:compute-1:enc1:none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/worker.ign" --noautoconsole --wait=-1 when: compute_1_check.failed == true diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 94b91c28..3b5c50a7 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- name: Load in variables from env.yaml + tags: setup + include_vars: env.yaml + - name: check if control-0 already exists tags: control community.libvirt.virt: @@ -47,7 +51,7 @@ --os-type linux --os-variant rhel8.0 --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.138::9.60.86.1:255.255.254.0:control-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_0}}::{{env_default_gateway}}:{{env_netmask}}:control-0:enc1:none:1500 nameserver={{env_ip_bastion}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" --noautoconsole --wait=-1 when: control_0_check.failed == true @@ -65,7 +69,7 @@ --os-type linux --os-variant rhel8.0 --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.137::9.60.86.1:255.255.254.0:control-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_1}}::{{env_default_gateway}}:{{env_netmask}}:control-1:enc1:none:1500 nameserver={{env_ip_bastion}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" --noautoconsole --wait=-1 when: control_1_check.failed == true @@ -83,11 +87,11 @@ --os-type linux --os-variant rhel8.0 --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.136::9.60.86.1:255.255.254.0:control-2:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_2}}::{{env_default_gateway}}:{{env_netmask}}:control-2:enc1:none:1500 nameserver={{env_ip_bastion}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" --noautoconsole --wait=-1 when: control_2_check.failed == true -- name: pause 45 minutes +- name: pause 45 minutes to wait for bootkube.service to complete connecting control nodes. See README for more information. tags: control pause: minutes: 45 diff --git a/roles/workstations b/roles/workstations deleted file mode 100644 index ed97d539..00000000 --- a/roles/workstations +++ /dev/null @@ -1 +0,0 @@ ---- From d947403c83a79498054741eaa7cab88de84bb8f6 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 17:37:15 -0500 Subject: [PATCH 408/885] variablized the create nodes shell scripts --- README.md | 2 +- main.yaml | 2 ++ roles/ansible_setup/tasks/main.yaml | 1 + roles/create_bastion/tasks/main.yaml | 8 ++++++-- roles/create_bootstrap/tasks/main.yaml | 6 +++++- roles/create_compute_nodes/tasks/main.yaml | 8 ++++++-- roles/create_control_nodes/tasks/main.yaml | 12 ++++++++---- roles/workstations | 1 - 8 files changed, 29 insertions(+), 11 deletions(-) delete mode 100644 roles/workstations diff --git a/README.md b/README.md index 2d21bd8a..1986263a 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ * The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. -* This README file gives extremely detailed step-by-step instructions for you to use as a reference. It assumes near zero experience. +* This README file gives extremely detailed step-by-step instructions for you to use as a reference. It assumes near zero experience with Ansible. ## Supported Operating Systems (for local workstation): diff --git a/main.yaml b/main.yaml index b8995055..7e78360c 100644 --- a/main.yaml +++ b/main.yaml @@ -62,6 +62,8 @@ tags: kvm_host,create_nodes become: true gather_facts: no + vars_files: + - env.yaml roles: - prep_kvm_guests - create_bootstrap diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/ansible_setup/tasks/main.yaml index 71610b5f..7a037c25 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/ansible_setup/tasks/main.yaml @@ -37,6 +37,7 @@ - env_dns_nameserver - env_default_gateway - env_netmask + - env_ftp - name: Populate inventory file with ip variables from env.yaml tags: setup diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index bd5e5bbb..07764748 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -22,6 +22,10 @@ # - name: virtualize bastion server # command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G +- name: Load in variables from env.yaml + tags: setup + include_vars: env.yaml + - name: check if bastion already exists tags: kvm_host, bastionvm community.libvirt.virt: @@ -37,10 +41,10 @@ - name: start bastion install tags: kvm_host, bastionvm - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip=9.60.87.139::9.60.86.1:255.255.254.0:bastion::none nameserver=9.60.70.82 inst.repo=http://9.60.86.81/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole + command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip={{env_ip_bastion}}::{{env_default_gateway}}:{{env_netmask}}:bastion::none nameserver={{env_dns_nameserver}} inst.repo=http://{{env_ftp}}/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole when: bastion_check.failed == true -- name: README - Pausing for 60 minutes for you to complete the bastion installation of rhel OS with your specific installation's requirements. Please go to your kvm host at to complete installation. Once you see the login prompt on the bastion's terminal, come back here and press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. +- name: README - Pausing for 60 minutes for you to complete the bastion installation of rhel OS with your specific installation's requirements. Please go to your kvm host at https://your-kvm-host-ip-address-here:9090 to complete installation. Once you see the login prompt on the bastion's terminal, come back here and press "ctrl+c" and then "c" on your localhost terminal where you are seeing this message. tags: kvm_host, bastionvm pause: minutes: 60 diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index f4ec9532..5d026a17 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- name: Load in variables from env.yaml + tags: setup + include_vars: env.yaml + - name: check if bootstrap already exists tags: bootstrap community.libvirt.virt: @@ -21,7 +25,7 @@ --os-type linux --os-variant rhel8.0 --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.133::9.60.86.1:255.255.254.0:bootstrap:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/bootstrap.ign" + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_bootstrap}}::{{env_default_gateway}}:{{env_netmask}}:bootstrap:enc1:none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_dns_nameserver}}:8080/ignition/bootstrap.ign" --noautoconsole --wait=-1 when: bootstrap_check.failed == true diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index 14ef6a1e..8c7908b2 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- name: Load in variables from env.yaml + tags: setup + include_vars: env.yaml + - name: check if compute-0 already exists tags: compute community.libvirt.virt: @@ -34,7 +38,7 @@ --os-type linux --os-variant rhel8.0 --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.135::9.60.86.1:255.255.254.0:compute-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_compute_0}}::{{env_default_gateway}}:{{env_netmask}}:compute-0:enc1:none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/worker.ign" --noautoconsole --wait=-1 when: compute_0_check.failed == true @@ -52,7 +56,7 @@ --os-type linux --os-variant rhel8.0 --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.134::9.60.86.1:255.255.254.0:compute-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/worker.ign" + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_compute_1}}::{{env_default_gateway}}:{{env_netmask}}:compute-1:enc1:none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/worker.ign" --noautoconsole --wait=-1 when: compute_1_check.failed == true diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 94b91c28..3b5c50a7 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- name: Load in variables from env.yaml + tags: setup + include_vars: env.yaml + - name: check if control-0 already exists tags: control community.libvirt.virt: @@ -47,7 +51,7 @@ --os-type linux --os-variant rhel8.0 --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.138::9.60.86.1:255.255.254.0:control-0:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_0}}::{{env_default_gateway}}:{{env_netmask}}:control-0:enc1:none:1500 nameserver={{env_ip_bastion}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" --noautoconsole --wait=-1 when: control_0_check.failed == true @@ -65,7 +69,7 @@ --os-type linux --os-variant rhel8.0 --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.137::9.60.86.1:255.255.254.0:control-1:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_1}}::{{env_default_gateway}}:{{env_netmask}}:control-1:enc1:none:1500 nameserver={{env_ip_bastion}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" --noautoconsole --wait=-1 when: control_1_check.failed == true @@ -83,11 +87,11 @@ --os-type linux --os-variant rhel8.0 --network network=macvtap-net --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://9.60.87.139:8080/bin/rhcos-live-rootfs.s390x.img ip=9.60.87.136::9.60.86.1:255.255.254.0:control-2:enc1:none:1500 nameserver=9.60.87.139 coreos.inst.ignition_url=http://9.60.87.139:8080/ignition/master.ign" + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_2}}::{{env_default_gateway}}:{{env_netmask}}:control-2:enc1:none:1500 nameserver={{env_ip_bastion}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" --noautoconsole --wait=-1 when: control_2_check.failed == true -- name: pause 45 minutes +- name: pause 45 minutes to wait for bootkube.service to complete connecting control nodes. See README for more information. tags: control pause: minutes: 45 diff --git a/roles/workstations b/roles/workstations deleted file mode 100644 index ed97d539..00000000 --- a/roles/workstations +++ /dev/null @@ -1 +0,0 @@ ---- From dab4912bba34745688cf17e75c875368d32703bf Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 18:54:22 -0500 Subject: [PATCH 409/885] Deleted deprecated files, started working on templating haproxy.cfg, and fixed tags in create nodes playbooks --- files/cluster-pvc.yaml | 18 ----- files/distribution.db | 48 ------------- files/distribution.rev | 30 --------- files/haproxy8.cfg | 58 ---------------- files/install-config.yaml | 26 -------- files/macvtap.xml.j2 | 6 -- files/named.conf | 78 ---------------------- files/pull-secret.txt | 1 - files/rhel-guest-image.txt | 1 - files/rhel83.iso | 1 - files/sudoers_zcts | 2 - files/test.txt | 1 - group_vars/bastion/main.yaml | 0 main.yaml | 1 + roles/create_bastion/tasks/main.yaml | 2 +- roles/create_bootstrap/tasks/main.yaml | 2 +- roles/create_compute_nodes/tasks/main.yaml | 2 +- roles/create_control_nodes/tasks/main.yaml | 2 +- roles/haproxy/tasks/main.yaml | 9 +++ 19 files changed, 14 insertions(+), 274 deletions(-) delete mode 100644 files/cluster-pvc.yaml delete mode 100644 files/distribution.db delete mode 100644 files/distribution.rev delete mode 100644 files/haproxy8.cfg delete mode 100644 files/install-config.yaml delete mode 100644 files/macvtap.xml.j2 delete mode 100644 files/named.conf delete mode 100644 files/pull-secret.txt delete mode 100644 files/rhel-guest-image.txt delete mode 100644 files/rhel83.iso delete mode 100644 files/sudoers_zcts delete mode 100644 files/test.txt delete mode 100644 group_vars/bastion/main.yaml diff --git a/files/cluster-pvc.yaml b/files/cluster-pvc.yaml deleted file mode 100644 index f2feb6f2..00000000 --- a/files/cluster-pvc.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pv0001 - annotations: - volume.beta.kubernetes.io/mount-options: rw,nfsvers=4,noexec -spec: - capacity: - storage: 150Gi - accessModes: - - ReadWriteOnce - nfs: - path: /mnt/nfs-shares/dist-ocp - server: 9.60.87.222 - persistentVolumeReclaimPolicy: Retain - claimRef: - name: claim1 - namespace: default \ No newline at end of file diff --git a/files/distribution.db b/files/distribution.db deleted file mode 100644 index 35be68ba..00000000 --- a/files/distribution.db +++ /dev/null @@ -1,48 +0,0 @@ -$TTL 86400 -@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( - 2020021821 ;Serial - 3600 ;Refresh - 1800 ;Retry - 604800 ;Expire - 86400 ;Minimum TTL -) - -;Name Server Information -@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. - -;IP Address for Name Server -bastion IN A 9.60.87.139 - -;entry for bootstrap host. -;bootstrap IN A 9.60.87.133 -bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 - -;entries for the master nodes -control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 -;control-0 IN A 9.60.87.138 -control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 -;control-1 IN A 9.60.87.137 -control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 -;control-2 IN A 9.60.87.136 - -;entry for the bastion host -bastion IN A 9.60.87.139 - -;entries for the worker nodes -compute-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.135 -;compute-0 IN A 9.60.87.135 -compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 -;compute-1 IN A 9.60.87.134 - -;entry of your load balancer -haproxy IN A 9.60.87.139 - -;The api identifies the IP of your load balancer. -api IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. -api-int IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. - -;The wildcard also identifies the load balancer. -apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. -*.apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. - -;EOF diff --git a/files/distribution.rev b/files/distribution.rev deleted file mode 100644 index 3365a5f5..00000000 --- a/files/distribution.rev +++ /dev/null @@ -1,30 +0,0 @@ -$TTL 86400 -@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ib$ - 2020011800 ;Serial - 3600 ;Refresh - 1800 ;Retry - 604800 ;Expire - 86400 ;Minimum TTL -) -;Name Server Information -@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. -bastion IN A 9.60.87.139 - -;Reverse lookup for Name Server -139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. - -;PTR Record IP address to Hostname -;133 IN PTR bootstrap. -;138 IN PTR control-0. -;137 IN PTR control-1. -;136 IN PTR control-2. -;135 IN PTR compute-0. -;134 IN PTR compute-1. -138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. -137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. -136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. -135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. -134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. -133 IN PTR bootstrap.distribution.ocpz.wsclab.endicott.ibm.com. -139 IN PTR api-int.distribution.ocpz.wsclab.endicott.ibm.com. -139 IN PTR api.distribution.ocpz.wsclab.endicott.ibm.com. diff --git a/files/haproxy8.cfg b/files/haproxy8.cfg deleted file mode 100644 index f7b1f7f0..00000000 --- a/files/haproxy8.cfg +++ /dev/null @@ -1,58 +0,0 @@ -global - log 127.0.0.1 local2 - pidfile /var/run/haproxy.pid - maxconn 4000 - daemon -defaults - mode http - log global - option dontlognull - option http-server-close - option redispatch - retries 3 - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 1m - timeout server 1m - timeout http-keep-alive 10s - timeout check 10s - maxconn 3000 -frontend stats - bind *:1936 - mode http - log global - maxconn 10 - stats enable - stats hide-version - stats refresh 30s - stats show-node - stats show-desc Stats for distribution cluster - stats auth admin:distribution - stats uri /stats -listen api-server-6443 - bind *:6443 - mode tcp - server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s backup - server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s - server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s - server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s -listen machine-config-server-22623 - bind *:22623 - mode tcp - server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup - server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s -listen ingress-router-443 - bind *:443 - mode tcp - balance source - server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s -listen ingress-router-80 - bind *:80 - mode tcp - balance source - server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s diff --git a/files/install-config.yaml b/files/install-config.yaml deleted file mode 100644 index d016fa9c..00000000 --- a/files/install-config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -baseDomain: ocpz.wsclab.endicott.ibm.com -compute: -- hyperthreading: Enabled - name: worker - replicas: 0 - architecture : s390x -controlPlane: - hyperthreading: Enabled - name: master - replicas: 3 - architecture : s390x -metadata: - name: distribution -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN - serviceNetwork: - - 172.30.0.0/16 -platform: - none: {} -fips: false -pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file diff --git a/files/macvtap.xml.j2 b/files/macvtap.xml.j2 deleted file mode 100644 index 388477ea..00000000 --- a/files/macvtap.xml.j2 +++ /dev/null @@ -1,6 +0,0 @@ - - macvtap-net - - - - diff --git a/files/named.conf b/files/named.conf deleted file mode 100644 index b07a27be..00000000 --- a/files/named.conf +++ /dev/null @@ -1,78 +0,0 @@ -// -// named.conf -// -// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS -// server as a caching only nameserver (as a localhost DNS resolver only). -// -// See /usr/share/doc/bind*/sample/ for example named configuration files. -// - -options { -// listen-on port 53 { 127.0.0.1; }; - listen-on port 53 { any; }; - listen-on-v6 port 53 { ::1; }; - directory "/var/named"; - dump-file "/var/named/data/cache_dump.db"; - statistics-file "/var/named/data/named_stats.txt"; - memstatistics-file "/var/named/data/named_mem_stats.txt"; - secroots-file "/var/named/data/named.secroots"; - recursing-file "/var/named/data/named.recursing"; - allow-query { any; }; - - /* - - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. - - If you are building a RECURSIVE (caching) DNS server, you need to enable - recursion. - - If your recursive DNS server has a public IP address, you MUST enable access - control to limit queries to your legitimate users. Failing to do so will - cause your server to become part of large scale DNS amplification - attacks. Implementing BCP38 within your network would greatly - reduce such attack surface - */ - recursion yes; - - dnssec-enable no; - dnssec-validation no; - - managed-keys-directory "/var/named/dynamic"; - - pid-file "/run/named/named.pid"; - session-keyfile "/run/named/session.key"; - - /* https://fedoraproject.org/wiki/Changes/CryptoPolicy */ - include "/etc/crypto-policies/back-ends/bind.config"; -}; - -logging { - channel default_debug { - file "data/named.run"; - severity dynamic; - }; -}; - -zone "." IN { - type forward; - forwarders { 9.60.70.82; }; -// type hint; -// file "named.ca"; -}; - -include "/etc/named.rfc1912.zones"; -include "/etc/named.root.key"; - -//forward zone -zone "distribution.ocpz.wsclab.endicott.ibm.com" IN { - type master; - file "distribution.db"; - allow-update { any; }; - allow-query { any; }; -}; - -//backward zone -zone "87.60.9.in-addr.arpa" IN { - type master; - file "distribution.rev"; - allow-update { any; }; - allow-query { any; }; -}; - diff --git a/files/pull-secret.txt b/files/pull-secret.txt deleted file mode 100644 index ee0cd16d..00000000 --- a/files/pull-secret.txt +++ /dev/null @@ -1 +0,0 @@ -{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}} diff --git a/files/rhel-guest-image.txt b/files/rhel-guest-image.txt deleted file mode 100644 index 242bf2b8..00000000 --- a/files/rhel-guest-image.txt +++ /dev/null @@ -1 +0,0 @@ -# This file is a placeholder to identify the image file without uploading to github diff --git a/files/rhel83.iso b/files/rhel83.iso deleted file mode 100644 index 1bbf7176..00000000 --- a/files/rhel83.iso +++ /dev/null @@ -1 +0,0 @@ -Placeholder for ISO to be manually replaced for copy during install. Before running playbooks. Copy real ISO into this directory to replace this file. diff --git a/files/sudoers_zcts b/files/sudoers_zcts deleted file mode 100644 index 054d9c55..00000000 --- a/files/sudoers_zcts +++ /dev/null @@ -1,2 +0,0 @@ -zcts ALL=(ALL) NOPASSWD: ALL - diff --git a/files/test.txt b/files/test.txt deleted file mode 100644 index 16b14f5d..00000000 --- a/files/test.txt +++ /dev/null @@ -1 +0,0 @@ -test file diff --git a/group_vars/bastion/main.yaml b/group_vars/bastion/main.yaml deleted file mode 100644 index e69de29b..00000000 diff --git a/main.yaml b/main.yaml index 7e78360c..ee13f377 100644 --- a/main.yaml +++ b/main.yaml @@ -70,6 +70,7 @@ - create_control_nodes - create_compute_nodes +## IN DEVELOPMENT #- hosts: localhost #connection: local #become: false diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 07764748..4ab20e45 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -23,7 +23,7 @@ # command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: Load in variables from env.yaml - tags: setup + tags: kvm_host, bastionvm include_vars: env.yaml - name: check if bastion already exists diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 5d026a17..8d2ea665 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Load in variables from env.yaml - tags: setup + tags: bootstrap include_vars: env.yaml - name: check if bootstrap already exists diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index 8c7908b2..8069a6d0 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Load in variables from env.yaml - tags: setup + tags: compute include_vars: env.yaml - name: check if compute-0 already exists diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 3b5c50a7..4d182439 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Load in variables from env.yaml - tags: setup + tags: control include_vars: env.yaml - name: check if control-0 already exists diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index 3f42bcab..1c3bf19b 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -1,11 +1,20 @@ --- +- name: Load in variables from env.yaml + tags: haproxy,bastion + include_vars: env.yaml + - name: Change permissive domain for haproxy tags: selinux,haproxy,bastion selinux_permissive: name: haproxy_t permissive: true +- name: use template to create haproxy config file + template: + src: haproxy.cfg.j2 + dest: + - name: move haproxy config file to bastion tags: haproxy,bastion copy: From f26625aad2617f48e8e43406c07bb67da62947d5 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 18:54:22 -0500 Subject: [PATCH 410/885] Deleted deprecated files, started working on templating haproxy.cfg, and fixed tags in create nodes playbooks --- files/cluster-pvc.yaml | 18 ----- files/distribution.db | 48 ------------- files/distribution.rev | 30 --------- files/haproxy8.cfg | 58 ---------------- files/install-config.yaml | 26 -------- files/macvtap.xml.j2 | 6 -- files/named.conf | 78 ---------------------- files/pull-secret.txt | 1 - files/rhel-guest-image.txt | 1 - files/rhel83.iso | 1 - files/sudoers_zcts | 2 - files/test.txt | 1 - group_vars/bastion/main.yaml | 0 main.yaml | 1 + roles/create_bastion/tasks/main.yaml | 2 +- roles/create_bootstrap/tasks/main.yaml | 2 +- roles/create_compute_nodes/tasks/main.yaml | 2 +- roles/create_control_nodes/tasks/main.yaml | 2 +- roles/haproxy/tasks/main.yaml | 9 +++ 19 files changed, 14 insertions(+), 274 deletions(-) delete mode 100644 files/cluster-pvc.yaml delete mode 100644 files/distribution.db delete mode 100644 files/distribution.rev delete mode 100644 files/haproxy8.cfg delete mode 100644 files/install-config.yaml delete mode 100644 files/macvtap.xml.j2 delete mode 100644 files/named.conf delete mode 100644 files/pull-secret.txt delete mode 100644 files/rhel-guest-image.txt delete mode 100644 files/rhel83.iso delete mode 100644 files/sudoers_zcts delete mode 100644 files/test.txt delete mode 100644 group_vars/bastion/main.yaml diff --git a/files/cluster-pvc.yaml b/files/cluster-pvc.yaml deleted file mode 100644 index f2feb6f2..00000000 --- a/files/cluster-pvc.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pv0001 - annotations: - volume.beta.kubernetes.io/mount-options: rw,nfsvers=4,noexec -spec: - capacity: - storage: 150Gi - accessModes: - - ReadWriteOnce - nfs: - path: /mnt/nfs-shares/dist-ocp - server: 9.60.87.222 - persistentVolumeReclaimPolicy: Retain - claimRef: - name: claim1 - namespace: default \ No newline at end of file diff --git a/files/distribution.db b/files/distribution.db deleted file mode 100644 index 35be68ba..00000000 --- a/files/distribution.db +++ /dev/null @@ -1,48 +0,0 @@ -$TTL 86400 -@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( - 2020021821 ;Serial - 3600 ;Refresh - 1800 ;Retry - 604800 ;Expire - 86400 ;Minimum TTL -) - -;Name Server Information -@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. - -;IP Address for Name Server -bastion IN A 9.60.87.139 - -;entry for bootstrap host. -;bootstrap IN A 9.60.87.133 -bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 - -;entries for the master nodes -control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 -;control-0 IN A 9.60.87.138 -control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 -;control-1 IN A 9.60.87.137 -control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 -;control-2 IN A 9.60.87.136 - -;entry for the bastion host -bastion IN A 9.60.87.139 - -;entries for the worker nodes -compute-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.135 -;compute-0 IN A 9.60.87.135 -compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 -;compute-1 IN A 9.60.87.134 - -;entry of your load balancer -haproxy IN A 9.60.87.139 - -;The api identifies the IP of your load balancer. -api IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. -api-int IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. - -;The wildcard also identifies the load balancer. -apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. -*.apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. - -;EOF diff --git a/files/distribution.rev b/files/distribution.rev deleted file mode 100644 index 3365a5f5..00000000 --- a/files/distribution.rev +++ /dev/null @@ -1,30 +0,0 @@ -$TTL 86400 -@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ib$ - 2020011800 ;Serial - 3600 ;Refresh - 1800 ;Retry - 604800 ;Expire - 86400 ;Minimum TTL -) -;Name Server Information -@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. -bastion IN A 9.60.87.139 - -;Reverse lookup for Name Server -139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. - -;PTR Record IP address to Hostname -;133 IN PTR bootstrap. -;138 IN PTR control-0. -;137 IN PTR control-1. -;136 IN PTR control-2. -;135 IN PTR compute-0. -;134 IN PTR compute-1. -138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. -137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. -136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. -135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. -134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. -133 IN PTR bootstrap.distribution.ocpz.wsclab.endicott.ibm.com. -139 IN PTR api-int.distribution.ocpz.wsclab.endicott.ibm.com. -139 IN PTR api.distribution.ocpz.wsclab.endicott.ibm.com. diff --git a/files/haproxy8.cfg b/files/haproxy8.cfg deleted file mode 100644 index f7b1f7f0..00000000 --- a/files/haproxy8.cfg +++ /dev/null @@ -1,58 +0,0 @@ -global - log 127.0.0.1 local2 - pidfile /var/run/haproxy.pid - maxconn 4000 - daemon -defaults - mode http - log global - option dontlognull - option http-server-close - option redispatch - retries 3 - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 1m - timeout server 1m - timeout http-keep-alive 10s - timeout check 10s - maxconn 3000 -frontend stats - bind *:1936 - mode http - log global - maxconn 10 - stats enable - stats hide-version - stats refresh 30s - stats show-node - stats show-desc Stats for distribution cluster - stats auth admin:distribution - stats uri /stats -listen api-server-6443 - bind *:6443 - mode tcp - server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s backup - server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s - server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s - server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s -listen machine-config-server-22623 - bind *:22623 - mode tcp - server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup - server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s -listen ingress-router-443 - bind *:443 - mode tcp - balance source - server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s -listen ingress-router-80 - bind *:80 - mode tcp - balance source - server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s diff --git a/files/install-config.yaml b/files/install-config.yaml deleted file mode 100644 index d016fa9c..00000000 --- a/files/install-config.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -baseDomain: ocpz.wsclab.endicott.ibm.com -compute: -- hyperthreading: Enabled - name: worker - replicas: 0 - architecture : s390x -controlPlane: - hyperthreading: Enabled - name: master - replicas: 3 - architecture : s390x -metadata: - name: distribution -networking: - clusterNetwork: - - cidr: 10.128.0.0/14 - hostPrefix: 23 - networkType: OpenShiftSDN - serviceNetwork: - - 172.30.0.0/16 -platform: - none: {} -fips: false -pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}}' -sshKey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8HQPCbHgHsJJIUcmrrBotZlxw9GOKvZQ57IJxA+6/RnmDQa9RsPpNOTBjcQ4D4XIr9Iz40wVbUCzp5pbeTV0irO2TcN0irUdMF637HYbjYMZv8CHHfhjH/HXQAW2e7u4naxM/jBVJbgB1rTj5BRsHGH7IWv/Nrv8d2Q6r7HW7fh1TYZa2dQ70ZqOwD/SfafTf3041U4+MGHYefF0MITMHB3iHFeyjl0NGRc2F4WtEQc5uhqlUjLo+BymgJwvYjNWgl8QYL5XwqzFzBfU58OvA9nb1WRoxVKFibnVrKnEy+UVaHqsrRL25apy3abWjyDzTNYu1z1fL6z6X7onZciiWNHMDfRpPjFPD8GVivfne5oAIaGltmvRFipAPPriV3MSVbtS0dBzOfkjKZf7spZ18Asj31tceMpwGDAOK5T+I5NPHSteXDAB3Fbii8zA3cvLe2OH6dhdq+eKwjUALXaHl/VFXbAvy+cHHc1XrKWrxle+dTtyRY+3xTYb4KbbxzJRsnDJ+rNRGX5wIGMGPijIDWtaMKv10GhlG9qQVoWnVpikwYzZfuAL17Cc1NAe6OaT4cRBux4BLFW8e7eaXEXlk4u+qT/VpZ62Doo6XRHxYpC2Z72y4bXp8zUSC9NxViXj4hQkMR1uItXCZrfPGr4XxOvS8gvkEyS7PeX+yWbpe2w== root@bastion.distribution.ocpz.wsclab.endicott.ibm.com' \ No newline at end of file diff --git a/files/macvtap.xml.j2 b/files/macvtap.xml.j2 deleted file mode 100644 index 388477ea..00000000 --- a/files/macvtap.xml.j2 +++ /dev/null @@ -1,6 +0,0 @@ - - macvtap-net - - - - diff --git a/files/named.conf b/files/named.conf deleted file mode 100644 index b07a27be..00000000 --- a/files/named.conf +++ /dev/null @@ -1,78 +0,0 @@ -// -// named.conf -// -// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS -// server as a caching only nameserver (as a localhost DNS resolver only). -// -// See /usr/share/doc/bind*/sample/ for example named configuration files. -// - -options { -// listen-on port 53 { 127.0.0.1; }; - listen-on port 53 { any; }; - listen-on-v6 port 53 { ::1; }; - directory "/var/named"; - dump-file "/var/named/data/cache_dump.db"; - statistics-file "/var/named/data/named_stats.txt"; - memstatistics-file "/var/named/data/named_mem_stats.txt"; - secroots-file "/var/named/data/named.secroots"; - recursing-file "/var/named/data/named.recursing"; - allow-query { any; }; - - /* - - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. - - If you are building a RECURSIVE (caching) DNS server, you need to enable - recursion. - - If your recursive DNS server has a public IP address, you MUST enable access - control to limit queries to your legitimate users. Failing to do so will - cause your server to become part of large scale DNS amplification - attacks. Implementing BCP38 within your network would greatly - reduce such attack surface - */ - recursion yes; - - dnssec-enable no; - dnssec-validation no; - - managed-keys-directory "/var/named/dynamic"; - - pid-file "/run/named/named.pid"; - session-keyfile "/run/named/session.key"; - - /* https://fedoraproject.org/wiki/Changes/CryptoPolicy */ - include "/etc/crypto-policies/back-ends/bind.config"; -}; - -logging { - channel default_debug { - file "data/named.run"; - severity dynamic; - }; -}; - -zone "." IN { - type forward; - forwarders { 9.60.70.82; }; -// type hint; -// file "named.ca"; -}; - -include "/etc/named.rfc1912.zones"; -include "/etc/named.root.key"; - -//forward zone -zone "distribution.ocpz.wsclab.endicott.ibm.com" IN { - type master; - file "distribution.db"; - allow-update { any; }; - allow-query { any; }; -}; - -//backward zone -zone "87.60.9.in-addr.arpa" IN { - type master; - file "distribution.rev"; - allow-update { any; }; - allow-query { any; }; -}; - diff --git a/files/pull-secret.txt b/files/pull-secret.txt deleted file mode 100644 index ee0cd16d..00000000 --- a/files/pull-secret.txt +++ /dev/null @@ -1 +0,0 @@ -{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K2libXBzd2lsc28xdXptamF3Ym8wMjFsNXR0YnpqZnJ1dGlmZjM6UzROMUNYOU1UMkdRRUI1VDVETkhXWk1FTzUxOVdJRVhNSFRCUUVEVlVXUU1RNE1GTFlWOFQ4VFg1ODk4MkpOOQ==","email":"pswilso@us.ibm.com"},"registry.connect.redhat.com":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"},"registry.redhat.io":{"auth":"NDAzNjY1M3x1aGMtMVVaTUphd0JPMDIxTDVUdGJaSkZydXRJZmYzOmV5SmhiR2NpT2lKU1V6VXhNaUo5LmV5SnpkV0lpT2lJMFlqZ3dZMlZrTWpJeFlqUTBaRFF3WVdGaE1qTXdZMkZqWmpGaE1XVTJOaUo5LmZDLW5OMVI1cWg1TEVjcGM1dHJSQ2FnVkk0Uk9LUk4zSEE5N3paR3ZZX0JrbldOYVgwU1N3TnZrY2p4dndPOXdjU0F6dmw4OElZYkhlRWJib2lCT2Q4Nlp0dXloNExLU1BGNVBSenlENElyeWZyaC1iT3VWVFlfN3o2ZzIySERrRzdWNGxJdzFDVVBkUV9nWDFpaXpkd0Z3d2NKaEZ1RzZGU2dxSFdtd2dpc1l2NlF6cG9neUo4WnFZTl90YkNSUWNLa1F6ODRPSm4zd25pMHM1LWFXWGdQUHhiT3VibXBuUldZWXVpdHBRTHVPWUFUbXUwLUhXZGRpQnB5QVN5bHJuamVoTVQ4ZzBxdWpzQmJwU3FJSkx1LWljWUF4Y3RYTGxVZEstTWIxX3QxSjlZX3B5NEpCVl9JSTBsQmtCU2VDVlpFbjRJQnFwNk1nTzFmS2FmcEl3V3hnOHpvdVJ6RFBWdFI4OWExT1E4eEFaaGotQUQ3MUV5dm9YaWFJTDdzSWFfX0VudVFGcXhrUGdaWmZHRUY5OHh3QTBpYkExV1FzdWg2Skg4aF8xM1BrYndSZF9mWkRjelk5OTZobC1sQ1ZGMm5kQ29Oc19xNzExdUNDT1FOUUtOT19saDRlNTY5R0hMTUhMcWV4b2hyUzZOSWN1Z053U0NYNTRkWXlVRjhsbXhQMUhiX3lmdU1LZl9QOEF1TW54UGRTVHJXRWQ3dGJNX2JTMHBHNnRKQWktaDlHYnYwd2dSbnN1ZWNjZ3lyWHRJREhnZVZzenRFTWo3cHRnOGhYa2U1ODI3a01Wdmt4U3dlX1lZTnNfTlFFZ1BvQTZNYjgwX1IxNWxBdkpEN3NrVDFoeEpVaFJjWlZLRkZNcG1lTUJhSURKek9uaHNFZktIZHBZcDBzMDlB","email":"pswilso@us.ibm.com"}}} diff --git a/files/rhel-guest-image.txt b/files/rhel-guest-image.txt deleted file mode 100644 index 242bf2b8..00000000 --- a/files/rhel-guest-image.txt +++ /dev/null @@ -1 +0,0 @@ -# This file is a placeholder to identify the image file without uploading to github diff --git a/files/rhel83.iso b/files/rhel83.iso deleted file mode 100644 index 1bbf7176..00000000 --- a/files/rhel83.iso +++ /dev/null @@ -1 +0,0 @@ -Placeholder for ISO to be manually replaced for copy during install. Before running playbooks. Copy real ISO into this directory to replace this file. diff --git a/files/sudoers_zcts b/files/sudoers_zcts deleted file mode 100644 index 054d9c55..00000000 --- a/files/sudoers_zcts +++ /dev/null @@ -1,2 +0,0 @@ -zcts ALL=(ALL) NOPASSWD: ALL - diff --git a/files/test.txt b/files/test.txt deleted file mode 100644 index 16b14f5d..00000000 --- a/files/test.txt +++ /dev/null @@ -1 +0,0 @@ -test file diff --git a/group_vars/bastion/main.yaml b/group_vars/bastion/main.yaml deleted file mode 100644 index e69de29b..00000000 diff --git a/main.yaml b/main.yaml index 7e78360c..ee13f377 100644 --- a/main.yaml +++ b/main.yaml @@ -70,6 +70,7 @@ - create_control_nodes - create_compute_nodes +## IN DEVELOPMENT #- hosts: localhost #connection: local #become: false diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 07764748..4ab20e45 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -23,7 +23,7 @@ # command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G - name: Load in variables from env.yaml - tags: setup + tags: kvm_host, bastionvm include_vars: env.yaml - name: check if bastion already exists diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 5d026a17..8d2ea665 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Load in variables from env.yaml - tags: setup + tags: bootstrap include_vars: env.yaml - name: check if bootstrap already exists diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index 8c7908b2..8069a6d0 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Load in variables from env.yaml - tags: setup + tags: compute include_vars: env.yaml - name: check if compute-0 already exists diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 3b5c50a7..4d182439 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Load in variables from env.yaml - tags: setup + tags: control include_vars: env.yaml - name: check if control-0 already exists diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index 3f42bcab..1c3bf19b 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -1,11 +1,20 @@ --- +- name: Load in variables from env.yaml + tags: haproxy,bastion + include_vars: env.yaml + - name: Change permissive domain for haproxy tags: selinux,haproxy,bastion selinux_permissive: name: haproxy_t permissive: true +- name: use template to create haproxy config file + template: + src: haproxy.cfg.j2 + dest: + - name: move haproxy config file to bastion tags: haproxy,bastion copy: From 2676d731e5aa16e7a8d626d49b66da257d2d47ba Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:01:10 -0500 Subject: [PATCH 411/885] Added templating for haproxy.cfg --- roles/haproxy/files/haproxy.cfg | 58 -------------------------- roles/haproxy/tasks/main.yaml | 9 +--- roles/haproxy/templates/haproxy.cfg.j2 | 58 ++++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 65 deletions(-) delete mode 100644 roles/haproxy/files/haproxy.cfg create mode 100644 roles/haproxy/templates/haproxy.cfg.j2 diff --git a/roles/haproxy/files/haproxy.cfg b/roles/haproxy/files/haproxy.cfg deleted file mode 100644 index f7b1f7f0..00000000 --- a/roles/haproxy/files/haproxy.cfg +++ /dev/null @@ -1,58 +0,0 @@ -global - log 127.0.0.1 local2 - pidfile /var/run/haproxy.pid - maxconn 4000 - daemon -defaults - mode http - log global - option dontlognull - option http-server-close - option redispatch - retries 3 - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 1m - timeout server 1m - timeout http-keep-alive 10s - timeout check 10s - maxconn 3000 -frontend stats - bind *:1936 - mode http - log global - maxconn 10 - stats enable - stats hide-version - stats refresh 30s - stats show-node - stats show-desc Stats for distribution cluster - stats auth admin:distribution - stats uri /stats -listen api-server-6443 - bind *:6443 - mode tcp - server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s backup - server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s - server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s - server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s -listen machine-config-server-22623 - bind *:22623 - mode tcp - server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup - server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s -listen ingress-router-443 - bind *:443 - mode tcp - balance source - server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s -listen ingress-router-80 - bind *:80 - mode tcp - balance source - server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index 1c3bf19b..7df039a4 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -11,17 +11,12 @@ permissive: true - name: use template to create haproxy config file + tags: haproxy,bastion template: src: haproxy.cfg.j2 - dest: - -- name: move haproxy config file to bastion - tags: haproxy,bastion - copy: - src: haproxy.cfg dest: /etc/haproxy/haproxy.cfg - force: yes backup: yes + force: yes - name: enable haproxy tags: haproxy,bastion diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2 new file mode 100644 index 00000000..9199ff87 --- /dev/null +++ b/roles/haproxy/templates/haproxy.cfg.j2 @@ -0,0 +1,58 @@ +global + log 127.0.0.1 local2 + pidfile /var/run/haproxy.pid + maxconn 4000 + daemon +defaults + mode http + log global + option dontlognull + option http-server-close + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 +frontend stats + bind *:1936 + mode http + log global + maxconn 10 + stats enable + stats hide-version + stats refresh 30s + stats show-node + stats show-desc Stats for {{env_metadata_name}} cluster + stats auth admin:{{env_metadata_name}} + stats uri /stats +listen api-server-6443 + bind *:6443 + mode tcp + server bootstrap bootstrap.{{env_metadata_name}}.{{env_baseDomain}}:6443 check inter 1s backup + server control-0 control-0.{{env_metadata_name}}.{{env_baseDomain}}:6443 check inter 1s + server control-1 control-1.{{env_metadata_name}}.{{env_baseDomain}}:6443 check inter 1s + server control-2 control-2.{{env_metadata_name}}.{{env_baseDomain}}:6443 check inter 1s +listen machine-config-server-22623 + bind *:22623 + mode tcp + server bootstrap bootstrap.{{env_metadata_name}}.{{env_baseDomain}}:22623 check inter 1s backup + server control-0 control-0.{{env_metadata_name}}.{{env_baseDomain}}:22623 check inter 1s + server control-1 control-1.{{env_metadata_name}}.{{env_baseDomain}}:22623 check inter 1s + server control-2 control-2.{{env_metadata_name}}.{{env_baseDomain}}:22623 check inter 1s +listen ingress-router-443 + bind *:443 + mode tcp + balance source + server compute-0 compute-0.{{env_metadata_name}}.{{env_baseDomain}}:443 check inter 1s + server compute-1 compute-1.{{env_metadata_name}}.{{env_baseDomain}}:443 check inter 1s +listen ingress-router-80 + bind *:80 + mode tcp + balance source + server compute-0 compute-0.{{env_metadata_name}}.{{env_baseDomain}}:80 check inter 1s + server compute-1 compute-1.{{env_metadata_name}}.{{env_baseDomain}}:80 check inter 1s From 875aac15d9288fbb7b416e89babef5d9f2670903 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:01:10 -0500 Subject: [PATCH 412/885] Added templating for haproxy.cfg --- roles/haproxy/files/haproxy.cfg | 58 -------------------------- roles/haproxy/tasks/main.yaml | 9 +--- roles/haproxy/templates/haproxy.cfg.j2 | 58 ++++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 65 deletions(-) delete mode 100644 roles/haproxy/files/haproxy.cfg create mode 100644 roles/haproxy/templates/haproxy.cfg.j2 diff --git a/roles/haproxy/files/haproxy.cfg b/roles/haproxy/files/haproxy.cfg deleted file mode 100644 index f7b1f7f0..00000000 --- a/roles/haproxy/files/haproxy.cfg +++ /dev/null @@ -1,58 +0,0 @@ -global - log 127.0.0.1 local2 - pidfile /var/run/haproxy.pid - maxconn 4000 - daemon -defaults - mode http - log global - option dontlognull - option http-server-close - option redispatch - retries 3 - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 1m - timeout server 1m - timeout http-keep-alive 10s - timeout check 10s - maxconn 3000 -frontend stats - bind *:1936 - mode http - log global - maxconn 10 - stats enable - stats hide-version - stats refresh 30s - stats show-node - stats show-desc Stats for distribution cluster - stats auth admin:distribution - stats uri /stats -listen api-server-6443 - bind *:6443 - mode tcp - server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s backup - server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s - server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s - server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s -listen machine-config-server-22623 - bind *:22623 - mode tcp - server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup - server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s -listen ingress-router-443 - bind *:443 - mode tcp - balance source - server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s -listen ingress-router-80 - bind *:80 - mode tcp - balance source - server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index 1c3bf19b..7df039a4 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -11,17 +11,12 @@ permissive: true - name: use template to create haproxy config file + tags: haproxy,bastion template: src: haproxy.cfg.j2 - dest: - -- name: move haproxy config file to bastion - tags: haproxy,bastion - copy: - src: haproxy.cfg dest: /etc/haproxy/haproxy.cfg - force: yes backup: yes + force: yes - name: enable haproxy tags: haproxy,bastion diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2 new file mode 100644 index 00000000..9199ff87 --- /dev/null +++ b/roles/haproxy/templates/haproxy.cfg.j2 @@ -0,0 +1,58 @@ +global + log 127.0.0.1 local2 + pidfile /var/run/haproxy.pid + maxconn 4000 + daemon +defaults + mode http + log global + option dontlognull + option http-server-close + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 +frontend stats + bind *:1936 + mode http + log global + maxconn 10 + stats enable + stats hide-version + stats refresh 30s + stats show-node + stats show-desc Stats for {{env_metadata_name}} cluster + stats auth admin:{{env_metadata_name}} + stats uri /stats +listen api-server-6443 + bind *:6443 + mode tcp + server bootstrap bootstrap.{{env_metadata_name}}.{{env_baseDomain}}:6443 check inter 1s backup + server control-0 control-0.{{env_metadata_name}}.{{env_baseDomain}}:6443 check inter 1s + server control-1 control-1.{{env_metadata_name}}.{{env_baseDomain}}:6443 check inter 1s + server control-2 control-2.{{env_metadata_name}}.{{env_baseDomain}}:6443 check inter 1s +listen machine-config-server-22623 + bind *:22623 + mode tcp + server bootstrap bootstrap.{{env_metadata_name}}.{{env_baseDomain}}:22623 check inter 1s backup + server control-0 control-0.{{env_metadata_name}}.{{env_baseDomain}}:22623 check inter 1s + server control-1 control-1.{{env_metadata_name}}.{{env_baseDomain}}:22623 check inter 1s + server control-2 control-2.{{env_metadata_name}}.{{env_baseDomain}}:22623 check inter 1s +listen ingress-router-443 + bind *:443 + mode tcp + balance source + server compute-0 compute-0.{{env_metadata_name}}.{{env_baseDomain}}:443 check inter 1s + server compute-1 compute-1.{{env_metadata_name}}.{{env_baseDomain}}:443 check inter 1s +listen ingress-router-80 + bind *:80 + mode tcp + balance source + server compute-0 compute-0.{{env_metadata_name}}.{{env_baseDomain}}:80 check inter 1s + server compute-1 compute-1.{{env_metadata_name}}.{{env_baseDomain}}:80 check inter 1s From fbcfe5a362b58951e12a889840b02b44e250376e Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:10:31 -0500 Subject: [PATCH 413/885] Deleted sections of main.yaml still in development and their respective roles folders. Created backups on local machine. --- inventory | 2 +- main.yaml | 29 +------- roles/connect_cluster/tasks/main.yaml | 96 ------------------------- roles/ssh_config_jump/tasks/main.yaml | 33 --------- roles/wait_for_bootkube/tasks/main.yaml | 26 ------- 5 files changed, 2 insertions(+), 184 deletions(-) delete mode 100644 roles/connect_cluster/tasks/main.yaml delete mode 100644 roles/ssh_config_jump/tasks/main.yaml delete mode 100644 roles/wait_for_bootkube/tasks/main.yaml diff --git a/inventory b/inventory index 0b3481a3..f914a36d 100755 --- a/inventory +++ b/inventory @@ -24,4 +24,4 @@ ansible_python_interpreter=/usr/bin/python3 [compute_nodes] 9.60.87.134 9.60.87.135 -# END ANSIBLE MANAGED BLOCK +# END ANSIBLE MANAGED BLOCK \ No newline at end of file diff --git a/main.yaml b/main.yaml index ee13f377..44f9dfb1 100644 --- a/main.yaml +++ b/main.yaml @@ -68,31 +68,4 @@ - prep_kvm_guests - create_bootstrap - create_control_nodes - - create_compute_nodes - -## IN DEVELOPMENT -#- hosts: localhost - #connection: local - #become: false - #gather_facts: yes - #roles: - #- ssh_config_jump - -#- hosts: bastion - #become: true - #gather_facts: no - #roles: - #- wait_for_bootstrap - -#- hosts: bastion - #tags: bastion,cluster - #become: true - #gather_facts: no - #roles: - #- connect_cluster - -#- hosts: bastion - #become: true - #gather_facts: no - #roles: - #- wait_for_bootkube \ No newline at end of file + - create_compute_nodes \ No newline at end of file diff --git a/roles/connect_cluster/tasks/main.yaml b/roles/connect_cluster/tasks/main.yaml deleted file mode 100644 index fb361d27..00000000 --- a/roles/connect_cluster/tasks/main.yaml +++ /dev/null @@ -1,96 +0,0 @@ ---- - -#- name: Add another bin dir to system-wide $PATH. -# copy: -# dest: /etc/profile.d/custom-path.sh -# content: 'PATH=$PATH:{{ my_custom_path_var }}' - -- name: echo path - tags: cluster - shell: "echo $PATH" - register: path_check - -- name: print results of path check - tags: cluster - debug: - var: path_check.stdout - -- name: check who the user is - tags: cluster - shell: "whoami" - register: root_check - -- name: print results of checking what user is running tasks - tags: cluster - debug: - var: root_check.stdout - -- name: export kube config file - tags: cluster - shell: "export KUBECONFIG=/ocpinst/auth/kubeconfig" - args: - chdir: / - -- name: export kube config file - tags: cluster - shell: "export KUBECONFIG=/ocpinst/auth/kubeconfig" - args: - chdir: / - -- name: check if system admin - tags: cluster - command: "oc whoami" - register: whoami_check - #until: whoami_check.stdout.find("system:admin") != -1 - #retries: 5 - #delay: 30 - -- name: print whoami_check results to terminal - tags: cluster - debug: - var: whoami_check.stdout - -- name: get csr info - tags: cluster - command: oc get csr - register: csr - -- name: print csr info to terminal - tags: cluster - debug: - var: csr.stdout - -- name: approve all pending certificates - tags: cluster - command: "for i in `oc get csr --no-headers | grep -i pending | awk '{ print $1 }'`; do oc adm certificate approve $i; done" - register: csr_approve - -- name: print results from csr approval - tags: cluster - debug: - var: csr_approve.stdout - -- name: wait 5 minutes - tags: cluster - pause: - minutes: 5 - -- name: get csr info - tags: cluster - command: oc get csr - register: csr - -- name: print csr info to terminal - tags: cluster - debug: - var: csr.stdout - -- name: approve all pending certificates - tags: cluster - command: for i in `oc get csr --no-headers | grep -i pending | awk '{ print $1 }'`; do oc adm certificate approve $i; done - register: csr_approve - -- name: print results from csr approval - tags: cluster - debug: - var: csr_approve.stdout \ No newline at end of file diff --git a/roles/ssh_config_jump/tasks/main.yaml b/roles/ssh_config_jump/tasks/main.yaml deleted file mode 100644 index 16a59fa9..00000000 --- a/roles/ssh_config_jump/tasks/main.yaml +++ /dev/null @@ -1,33 +0,0 @@ -## will not work as is. Need to research using ProxyCommands or SSHuttle to run commands on bootstrap via the bastion jump host - ---- - -- name: Check that the ssh_config exists - tags: keymastr - stat: - path: ~/.ssh/config - register: ssh_config - -- name: Create ssh config file, if it doesnt exist already - tags: keymastr - file: - path: ~/.ssh/config - state: touch - when: not ssh_config.stat.exists - -- name: Insert ssh keys for jump host configuration in /ssh/config - tags: keymastr - blockinfile: - path: ~/.ssh/config - block: | - Host bastion_server - HostName 9.60.87.139 - IdentityFile ~/.ssh/ansible.pub - User root - - Host bootstrap_server - HostName 9.60.87.133 - IdentityFile ~/.ssh/ansible.pub - User core - ProxyJump bastion_server - \ No newline at end of file diff --git a/roles/wait_for_bootkube/tasks/main.yaml b/roles/wait_for_bootkube/tasks/main.yaml deleted file mode 100644 index c4555da4..00000000 --- a/roles/wait_for_bootkube/tasks/main.yaml +++ /dev/null @@ -1,26 +0,0 @@ -#Will not work as is. Waiting to figure out sshuttle or ProxyCommands to run commands on the bootstrap from bastion jump host. - ---- - -#when waiting for bootstrap to come up for the first time, wait for "github.com/openshift/cluster-bootstrap/pkg/start/status.go:66:" - -- name: ssh to bootstrap from bastion - command: ssh core@9.60.87.133 - -#- name: Wait 30 minutes for bootkube to connect nodes. May take up to 45 minutes. - #pause: - #minutes: 1 - -#above is for testing. Use below when above works. -#1800 - -- name: Start checking for bootkube to complete connecting nodes. Checks every 2 minutes. - command: journalctl -u bootkube.service - register: bootkube_status - until: bootkube_status.stdout.find("bootkube.service complete") != -1 - retries: 15 - delay: 1 - -#above is for testing. Use below when above works. -#retries: 15 -#delay: 120 \ No newline at end of file From 459ad482e22c3ad79f8b903d0e743e7c9a751930 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:10:31 -0500 Subject: [PATCH 414/885] Deleted sections of main.yaml still in development and their respective roles folders. Created backups on local machine. --- inventory | 2 +- main.yaml | 29 +------- roles/connect_cluster/tasks/main.yaml | 96 ------------------------- roles/ssh_config_jump/tasks/main.yaml | 33 --------- roles/wait_for_bootkube/tasks/main.yaml | 26 ------- 5 files changed, 2 insertions(+), 184 deletions(-) delete mode 100644 roles/connect_cluster/tasks/main.yaml delete mode 100644 roles/ssh_config_jump/tasks/main.yaml delete mode 100644 roles/wait_for_bootkube/tasks/main.yaml diff --git a/inventory b/inventory index 0b3481a3..f914a36d 100755 --- a/inventory +++ b/inventory @@ -24,4 +24,4 @@ ansible_python_interpreter=/usr/bin/python3 [compute_nodes] 9.60.87.134 9.60.87.135 -# END ANSIBLE MANAGED BLOCK +# END ANSIBLE MANAGED BLOCK \ No newline at end of file diff --git a/main.yaml b/main.yaml index ee13f377..44f9dfb1 100644 --- a/main.yaml +++ b/main.yaml @@ -68,31 +68,4 @@ - prep_kvm_guests - create_bootstrap - create_control_nodes - - create_compute_nodes - -## IN DEVELOPMENT -#- hosts: localhost - #connection: local - #become: false - #gather_facts: yes - #roles: - #- ssh_config_jump - -#- hosts: bastion - #become: true - #gather_facts: no - #roles: - #- wait_for_bootstrap - -#- hosts: bastion - #tags: bastion,cluster - #become: true - #gather_facts: no - #roles: - #- connect_cluster - -#- hosts: bastion - #become: true - #gather_facts: no - #roles: - #- wait_for_bootkube \ No newline at end of file + - create_compute_nodes \ No newline at end of file diff --git a/roles/connect_cluster/tasks/main.yaml b/roles/connect_cluster/tasks/main.yaml deleted file mode 100644 index fb361d27..00000000 --- a/roles/connect_cluster/tasks/main.yaml +++ /dev/null @@ -1,96 +0,0 @@ ---- - -#- name: Add another bin dir to system-wide $PATH. -# copy: -# dest: /etc/profile.d/custom-path.sh -# content: 'PATH=$PATH:{{ my_custom_path_var }}' - -- name: echo path - tags: cluster - shell: "echo $PATH" - register: path_check - -- name: print results of path check - tags: cluster - debug: - var: path_check.stdout - -- name: check who the user is - tags: cluster - shell: "whoami" - register: root_check - -- name: print results of checking what user is running tasks - tags: cluster - debug: - var: root_check.stdout - -- name: export kube config file - tags: cluster - shell: "export KUBECONFIG=/ocpinst/auth/kubeconfig" - args: - chdir: / - -- name: export kube config file - tags: cluster - shell: "export KUBECONFIG=/ocpinst/auth/kubeconfig" - args: - chdir: / - -- name: check if system admin - tags: cluster - command: "oc whoami" - register: whoami_check - #until: whoami_check.stdout.find("system:admin") != -1 - #retries: 5 - #delay: 30 - -- name: print whoami_check results to terminal - tags: cluster - debug: - var: whoami_check.stdout - -- name: get csr info - tags: cluster - command: oc get csr - register: csr - -- name: print csr info to terminal - tags: cluster - debug: - var: csr.stdout - -- name: approve all pending certificates - tags: cluster - command: "for i in `oc get csr --no-headers | grep -i pending | awk '{ print $1 }'`; do oc adm certificate approve $i; done" - register: csr_approve - -- name: print results from csr approval - tags: cluster - debug: - var: csr_approve.stdout - -- name: wait 5 minutes - tags: cluster - pause: - minutes: 5 - -- name: get csr info - tags: cluster - command: oc get csr - register: csr - -- name: print csr info to terminal - tags: cluster - debug: - var: csr.stdout - -- name: approve all pending certificates - tags: cluster - command: for i in `oc get csr --no-headers | grep -i pending | awk '{ print $1 }'`; do oc adm certificate approve $i; done - register: csr_approve - -- name: print results from csr approval - tags: cluster - debug: - var: csr_approve.stdout \ No newline at end of file diff --git a/roles/ssh_config_jump/tasks/main.yaml b/roles/ssh_config_jump/tasks/main.yaml deleted file mode 100644 index 16a59fa9..00000000 --- a/roles/ssh_config_jump/tasks/main.yaml +++ /dev/null @@ -1,33 +0,0 @@ -## will not work as is. Need to research using ProxyCommands or SSHuttle to run commands on bootstrap via the bastion jump host - ---- - -- name: Check that the ssh_config exists - tags: keymastr - stat: - path: ~/.ssh/config - register: ssh_config - -- name: Create ssh config file, if it doesnt exist already - tags: keymastr - file: - path: ~/.ssh/config - state: touch - when: not ssh_config.stat.exists - -- name: Insert ssh keys for jump host configuration in /ssh/config - tags: keymastr - blockinfile: - path: ~/.ssh/config - block: | - Host bastion_server - HostName 9.60.87.139 - IdentityFile ~/.ssh/ansible.pub - User root - - Host bootstrap_server - HostName 9.60.87.133 - IdentityFile ~/.ssh/ansible.pub - User core - ProxyJump bastion_server - \ No newline at end of file diff --git a/roles/wait_for_bootkube/tasks/main.yaml b/roles/wait_for_bootkube/tasks/main.yaml deleted file mode 100644 index c4555da4..00000000 --- a/roles/wait_for_bootkube/tasks/main.yaml +++ /dev/null @@ -1,26 +0,0 @@ -#Will not work as is. Waiting to figure out sshuttle or ProxyCommands to run commands on the bootstrap from bastion jump host. - ---- - -#when waiting for bootstrap to come up for the first time, wait for "github.com/openshift/cluster-bootstrap/pkg/start/status.go:66:" - -- name: ssh to bootstrap from bastion - command: ssh core@9.60.87.133 - -#- name: Wait 30 minutes for bootkube to connect nodes. May take up to 45 minutes. - #pause: - #minutes: 1 - -#above is for testing. Use below when above works. -#1800 - -- name: Start checking for bootkube to complete connecting nodes. Checks every 2 minutes. - command: journalctl -u bootkube.service - register: bootkube_status - until: bootkube_status.stdout.find("bootkube.service complete") != -1 - retries: 15 - delay: 1 - -#above is for testing. Use below when above works. -#retries: 15 -#delay: 120 \ No newline at end of file From e9d86b47b9db61ae185d445323285f00413e3157 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:20:24 -0500 Subject: [PATCH 415/885] Created roles for setup playbook that runs before main.yaml --- files/haproxy.cfg | 58 ------------------- roles/defaults/main.yml | 12 ---- .../files}/ansible-setup-linux.sh | 0 .../files}/ansible-setup-mac.sh | 0 roles/install_dependencies/tasks/main.yaml | 9 +++ roles/setup_vault/tasks/main.yaml | 19 ++++++ setup.yaml | 33 +---------- 7 files changed, 31 insertions(+), 100 deletions(-) delete mode 100644 files/haproxy.cfg delete mode 100644 roles/defaults/main.yml rename {files/shell_scripts => roles/install_dependencies/files}/ansible-setup-linux.sh (100%) rename {files/shell_scripts => roles/install_dependencies/files}/ansible-setup-mac.sh (100%) create mode 100644 roles/install_dependencies/tasks/main.yaml create mode 100644 roles/setup_vault/tasks/main.yaml diff --git a/files/haproxy.cfg b/files/haproxy.cfg deleted file mode 100644 index f7b1f7f0..00000000 --- a/files/haproxy.cfg +++ /dev/null @@ -1,58 +0,0 @@ -global - log 127.0.0.1 local2 - pidfile /var/run/haproxy.pid - maxconn 4000 - daemon -defaults - mode http - log global - option dontlognull - option http-server-close - option redispatch - retries 3 - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 1m - timeout server 1m - timeout http-keep-alive 10s - timeout check 10s - maxconn 3000 -frontend stats - bind *:1936 - mode http - log global - maxconn 10 - stats enable - stats hide-version - stats refresh 30s - stats show-node - stats show-desc Stats for distribution cluster - stats auth admin:distribution - stats uri /stats -listen api-server-6443 - bind *:6443 - mode tcp - server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s backup - server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s - server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s - server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s -listen machine-config-server-22623 - bind *:22623 - mode tcp - server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup - server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s -listen ingress-router-443 - bind *:443 - mode tcp - balance source - server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s -listen ingress-router-80 - bind *:80 - mode tcp - balance source - server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s diff --git a/roles/defaults/main.yml b/roles/defaults/main.yml deleted file mode 100644 index 06bce909..00000000 --- a/roles/defaults/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# defaults file for playbooks/roles/kvm-vm -kvm_vm_hostname: [] -kvm_vm_public_ip: [] -kvm_vm_root_pwd: [] -kvm_vm_base_img: [] #NOTE: This should be the name of a base image in /var/lib/libvirt/images on your KVM host -kvm_vm_vcpus: "1" -kvm_vm_ram: "8196" -# kvm_vm_ram: "16384" -kvm_vm_os_disk_name: "{{ kvm_vm_hostname }}" -kvm_vm_os_disk_size: "70G" -kvm_vm_nics: [] #NOTE: see example playbook for structure diff --git a/files/shell_scripts/ansible-setup-linux.sh b/roles/install_dependencies/files/ansible-setup-linux.sh similarity index 100% rename from files/shell_scripts/ansible-setup-linux.sh rename to roles/install_dependencies/files/ansible-setup-linux.sh diff --git a/files/shell_scripts/ansible-setup-mac.sh b/roles/install_dependencies/files/ansible-setup-mac.sh similarity index 100% rename from files/shell_scripts/ansible-setup-mac.sh rename to roles/install_dependencies/files/ansible-setup-mac.sh diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml new file mode 100644 index 00000000..d071c939 --- /dev/null +++ b/roles/install_dependencies/tasks/main.yaml @@ -0,0 +1,9 @@ +--- + +- name: install Ansible dependencies and packages + shell: ansible-setup-mac.sh + when: ansible_facts['os_family'] == "Darwin" + +- name: install Ansible dependencies and packages + shell: ansible-setup-linux.sh + when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" \ No newline at end of file diff --git a/roles/setup_vault/tasks/main.yaml b/roles/setup_vault/tasks/main.yaml new file mode 100644 index 00000000..b25568da --- /dev/null +++ b/roles/setup_vault/tasks/main.yaml @@ -0,0 +1,19 @@ +--- + +- name: check to see if .vault_pass.txt exists already + stat: + path: .vault_pass.txt + register: vault_pass_check + +- name: delete .vault_pass.txt if it exists already to ensure idempotence + file: + path: .vault_pass.txt + state: absent + when: vault_pass_check.stat.exists + +- name: fill .vault_pass.txt with user-provided password + lineinfile: + path: .vault_pass.txt + state: present + create: yes + line: "{{ vault_pass }}" \ No newline at end of file diff --git a/setup.yaml b/setup.yaml index d1c038b0..4e19ae13 100644 --- a/setup.yaml +++ b/setup.yaml @@ -10,33 +10,6 @@ prompt: Please provide a secure password to be used for encrypting your sensitive files in Ansible private: yes unsafe: yes # this just means you can use special characters. The password is safe. - - tasks: - - name: check to see if .vault_pass.txt exists already - stat: - path: .vault_pass.txt - register: vault_pass_check - - - name: delete .vault_pass.txt if it exists already to ensure idempotence - file: - path: .vault_pass.txt - state: absent - when: vault_pass_check.stat.exists - - - name: fill .vault_pass.txt with user-provided password - lineinfile: - path: .vault_pass.txt - state: present - create: yes - line: "{{ vault_pass }}" - - - name: install Ansible dependencies and packages - shell: files/shell_scripts/ansible-setup-mac.sh - when: ansible_facts['os_family'] == "Darwin" - - - name: install Ansible dependencies and packages - shell: files/shell_scripts/ansible-setup-linux.sh - when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" - - - + roles: + - setup_vault + - install_dependencies \ No newline at end of file From 840d98ba5e57b834432091bc741d522caf4814eb Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:20:24 -0500 Subject: [PATCH 416/885] Created roles for setup playbook that runs before main.yaml --- files/haproxy.cfg | 58 ------------------- roles/defaults/main.yml | 12 ---- .../files}/ansible-setup-linux.sh | 0 .../files}/ansible-setup-mac.sh | 0 roles/install_dependencies/tasks/main.yaml | 9 +++ roles/setup_vault/tasks/main.yaml | 19 ++++++ setup.yaml | 33 +---------- 7 files changed, 31 insertions(+), 100 deletions(-) delete mode 100644 files/haproxy.cfg delete mode 100644 roles/defaults/main.yml rename {files/shell_scripts => roles/install_dependencies/files}/ansible-setup-linux.sh (100%) rename {files/shell_scripts => roles/install_dependencies/files}/ansible-setup-mac.sh (100%) create mode 100644 roles/install_dependencies/tasks/main.yaml create mode 100644 roles/setup_vault/tasks/main.yaml diff --git a/files/haproxy.cfg b/files/haproxy.cfg deleted file mode 100644 index f7b1f7f0..00000000 --- a/files/haproxy.cfg +++ /dev/null @@ -1,58 +0,0 @@ -global - log 127.0.0.1 local2 - pidfile /var/run/haproxy.pid - maxconn 4000 - daemon -defaults - mode http - log global - option dontlognull - option http-server-close - option redispatch - retries 3 - timeout http-request 10s - timeout queue 1m - timeout connect 10s - timeout client 1m - timeout server 1m - timeout http-keep-alive 10s - timeout check 10s - maxconn 3000 -frontend stats - bind *:1936 - mode http - log global - maxconn 10 - stats enable - stats hide-version - stats refresh 30s - stats show-node - stats show-desc Stats for distribution cluster - stats auth admin:distribution - stats uri /stats -listen api-server-6443 - bind *:6443 - mode tcp - server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s backup - server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s - server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s - server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:6443 check inter 1s -listen machine-config-server-22623 - bind *:22623 - mode tcp - server bootstrap bootstrap.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s backup - server control-0 control-0.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server control-1 control-1.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s - server control-2 control-2.distribution.ocpz.wsclab.endicott.ibm.com:22623 check inter 1s -listen ingress-router-443 - bind *:443 - mode tcp - balance source - server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:443 check inter 1s -listen ingress-router-80 - bind *:80 - mode tcp - balance source - server compute-0 compute-0.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s - server compute-1 compute-1.distribution.ocpz.wsclab.endicott.ibm.com:80 check inter 1s diff --git a/roles/defaults/main.yml b/roles/defaults/main.yml deleted file mode 100644 index 06bce909..00000000 --- a/roles/defaults/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# defaults file for playbooks/roles/kvm-vm -kvm_vm_hostname: [] -kvm_vm_public_ip: [] -kvm_vm_root_pwd: [] -kvm_vm_base_img: [] #NOTE: This should be the name of a base image in /var/lib/libvirt/images on your KVM host -kvm_vm_vcpus: "1" -kvm_vm_ram: "8196" -# kvm_vm_ram: "16384" -kvm_vm_os_disk_name: "{{ kvm_vm_hostname }}" -kvm_vm_os_disk_size: "70G" -kvm_vm_nics: [] #NOTE: see example playbook for structure diff --git a/files/shell_scripts/ansible-setup-linux.sh b/roles/install_dependencies/files/ansible-setup-linux.sh similarity index 100% rename from files/shell_scripts/ansible-setup-linux.sh rename to roles/install_dependencies/files/ansible-setup-linux.sh diff --git a/files/shell_scripts/ansible-setup-mac.sh b/roles/install_dependencies/files/ansible-setup-mac.sh similarity index 100% rename from files/shell_scripts/ansible-setup-mac.sh rename to roles/install_dependencies/files/ansible-setup-mac.sh diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml new file mode 100644 index 00000000..d071c939 --- /dev/null +++ b/roles/install_dependencies/tasks/main.yaml @@ -0,0 +1,9 @@ +--- + +- name: install Ansible dependencies and packages + shell: ansible-setup-mac.sh + when: ansible_facts['os_family'] == "Darwin" + +- name: install Ansible dependencies and packages + shell: ansible-setup-linux.sh + when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" \ No newline at end of file diff --git a/roles/setup_vault/tasks/main.yaml b/roles/setup_vault/tasks/main.yaml new file mode 100644 index 00000000..b25568da --- /dev/null +++ b/roles/setup_vault/tasks/main.yaml @@ -0,0 +1,19 @@ +--- + +- name: check to see if .vault_pass.txt exists already + stat: + path: .vault_pass.txt + register: vault_pass_check + +- name: delete .vault_pass.txt if it exists already to ensure idempotence + file: + path: .vault_pass.txt + state: absent + when: vault_pass_check.stat.exists + +- name: fill .vault_pass.txt with user-provided password + lineinfile: + path: .vault_pass.txt + state: present + create: yes + line: "{{ vault_pass }}" \ No newline at end of file diff --git a/setup.yaml b/setup.yaml index d1c038b0..4e19ae13 100644 --- a/setup.yaml +++ b/setup.yaml @@ -10,33 +10,6 @@ prompt: Please provide a secure password to be used for encrypting your sensitive files in Ansible private: yes unsafe: yes # this just means you can use special characters. The password is safe. - - tasks: - - name: check to see if .vault_pass.txt exists already - stat: - path: .vault_pass.txt - register: vault_pass_check - - - name: delete .vault_pass.txt if it exists already to ensure idempotence - file: - path: .vault_pass.txt - state: absent - when: vault_pass_check.stat.exists - - - name: fill .vault_pass.txt with user-provided password - lineinfile: - path: .vault_pass.txt - state: present - create: yes - line: "{{ vault_pass }}" - - - name: install Ansible dependencies and packages - shell: files/shell_scripts/ansible-setup-mac.sh - when: ansible_facts['os_family'] == "Darwin" - - - name: install Ansible dependencies and packages - shell: files/shell_scripts/ansible-setup-linux.sh - when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" - - - + roles: + - setup_vault + - install_dependencies \ No newline at end of file From e95798ce92add7a803c3e89cae02c264199d1870 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:22:37 -0500 Subject: [PATCH 417/885] Added .vscode to gitignore so that the user's local configuration is not added to Git --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index fc60cbbb..25b53b2f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ .DS_Store .iso +.vscode roles/get-ocp/files/ocp_ssh_pub .vault_pass.txt \ No newline at end of file From 6e08d7f277c5f5b7545f30e08fd00c2db759ca2f Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:22:37 -0500 Subject: [PATCH 418/885] Added .vscode to gitignore so that the user's local configuration is not added to Git --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index fc60cbbb..25b53b2f 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ .DS_Store .iso +.vscode roles/get-ocp/files/ocp_ssh_pub .vault_pass.txt \ No newline at end of file From fe2623b05f73c0c012074cdcadf3613a01070283 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:25:44 -0500 Subject: [PATCH 419/885] Deleted .vscode from git repo --- .vscode/settings.json | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 106f856c..00000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "files.autoSave": "onFocusChange" -} \ No newline at end of file From a8ef96fd6d3058364afcfc1f997c23a318b8004c Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:25:44 -0500 Subject: [PATCH 420/885] Deleted .vscode from git repo --- .vscode/settings.json | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json deleted file mode 100644 index 106f856c..00000000 --- a/.vscode/settings.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "files.autoSave": "onFocusChange" -} \ No newline at end of file From e79e0dc117e4cefe0cf1e3277b654c1540b36325 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:55:29 -0500 Subject: [PATCH 421/885] Updated README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1986263a..268b6ad3 100644 --- a/README.md +++ b/README.md @@ -116,7 +116,7 @@ * Then run "export KUBECONFIG=/ocpinst/auth/kubeconfig" * Check that worked by running "oc whoami", which should return "system:admin" * **Step 11: Approve Certificates** - * Fromm the bastion, running as root user (as above) run "oc get csr". This will bring up a list of certificates that need approval. + * From the bastion, running as root user (as above) run "oc get csr". This will bring up a list of certificates that need approval. * To approve all certificates at the same time, run the following command: "for i in \`oc get csr --no-headers | grep -i pending | awk '{ print $1 }\'`; do oc adm certificate approve $i; done" * It may take some time for all the certificates that need approval to show up. Keep running "oc get csr" to check to make sure that From 70c00e64163bf52e5930e81744ea1fd0ea1940ad Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:55:29 -0500 Subject: [PATCH 422/885] Updated README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1986263a..268b6ad3 100644 --- a/README.md +++ b/README.md @@ -116,7 +116,7 @@ * Then run "export KUBECONFIG=/ocpinst/auth/kubeconfig" * Check that worked by running "oc whoami", which should return "system:admin" * **Step 11: Approve Certificates** - * Fromm the bastion, running as root user (as above) run "oc get csr". This will bring up a list of certificates that need approval. + * From the bastion, running as root user (as above) run "oc get csr". This will bring up a list of certificates that need approval. * To approve all certificates at the same time, run the following command: "for i in \`oc get csr --no-headers | grep -i pending | awk '{ print $1 }\'`; do oc adm certificate approve $i; done" * It may take some time for all the certificates that need approval to show up. Keep running "oc get csr" to check to make sure that From e788a24b4c268431181db15427ee771bd8e65412 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:57:38 -0500 Subject: [PATCH 423/885] Updated README --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 268b6ad3..ff95edfe 100644 --- a/README.md +++ b/README.md @@ -140,8 +140,8 @@ ## Teardown: * If you would like to teardown your VMs, first determine whether you would like to do a full or partial teardown. -* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full -* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial +* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full" +* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial" * If you have provisioned more than the minimum number of nodes for your installation, add them to the respective list found in roles/teardown_vms/tasks/main.yaml. * Once you run the full teardown, to start the main.yaml playbook back from that point, run: From 3daf9f26b98833b8c51b0cdb4f6d091a487eecfa Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 19:57:38 -0500 Subject: [PATCH 424/885] Updated README --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 268b6ad3..ff95edfe 100644 --- a/README.md +++ b/README.md @@ -140,8 +140,8 @@ ## Teardown: * If you would like to teardown your VMs, first determine whether you would like to do a full or partial teardown. -* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full -* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial +* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full" +* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial" * If you have provisioned more than the minimum number of nodes for your installation, add them to the respective list found in roles/teardown_vms/tasks/main.yaml. * Once you run the full teardown, to start the main.yaml playbook back from that point, run: From 350387bb9aff4ab70cce228f473ef8041cbe530c Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 20:04:35 -0500 Subject: [PATCH 425/885] Emptied env.yaml of development/test variables. Created backup on local computer --- env.yaml | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/env.yaml b/env.yaml index 20f3a2b4..8f62a1ea 100644 --- a/env.yaml +++ b/env.yaml @@ -1,40 +1,41 @@ - -# to populate install_config -env_baseDomain: ocpz.wsclab.endicott.ibm.com +# to populate install-config +env_baseDomain: env_compute_arch: s390x env_control_count: 3 env_control_arch: s390x -env_metadata_name: distribution +env_metadata_name: env_cidr: 10.128.0.0/14 env_host_prefix: 23 env_network_type: OpenShiftSDN env_service_network: 172.30.0.0/16 env_fips: "false" # "true" or "false" (include quotes) -env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"registry.connect.redhat.com":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"},"registry.redhat.io":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"}}}' #paste it into these single quotes here +env_pullSecret: '' #paste it into these single quotes here # to fill inventory -env_ip_kvm_host: 9.60.87.132 -env_ip_bastion: 9.60.87.139 -env_ip_bootstrap: 9.60.87.133 -env_ip_control_0: 9.60.87.136 -env_ip_control_1: 9.60.87.137 -env_ip_control_2: 9.60.87.138 -env_ip_compute_0: 9.60.87.134 -env_ip_compute_1: 9.60.87.135 +env_ip_kvm_host: +env_ip_bastion: +env_ip_bootstrap: +env_ip_control_0: +env_ip_control_1: +env_ip_control_2: +env_ip_compute_0: +env_ip_compute_1: # SSH -env_ssh_username: jacob #Username to use for SSH into KVM and host bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. -env_ssh_pass: ibmzrocks #Password to use for SSH into KVM host and bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. +#Username to use for SSH into KVM and host bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. +env_ssh_username: +#Password to use for SSH into KVM host and bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. +env_ssh_pass: # Ansible passwordless SSH env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) # OpenShift cluster's SSH key comment -env_ssh_ocp_comm: "ocpz_distribution" +env_ssh_ocp_comm: "ocpz" # networking -env_dns_nameserver: 9.60.87.139 -env_default_gateway: 9.60.86.1 -env_netmask: 255.255.254.0 -env_ftp: 9.60.86.81 \ No newline at end of file +env_dns_nameserver: +env_default_gateway: +env_netmask: +env_ftp: \ No newline at end of file From a9993d1874a170448ab1e2a9f517e3f5e07f88fa Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 20:08:25 -0500 Subject: [PATCH 426/885] Added env.yaml to .gitignore so that users don't upload their sensitive variables to Git --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 25b53b2f..d031c7a2 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ .iso .vscode roles/get-ocp/files/ocp_ssh_pub -.vault_pass.txt \ No newline at end of file +.vault_pass.txt +env.yaml \ No newline at end of file From fec144986d9285b923e21f557878c331a389be15 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 20:08:25 -0500 Subject: [PATCH 427/885] Added env.yaml to .gitignore so that users don't upload their sensitive variables to Git --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 25b53b2f..d031c7a2 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ .iso .vscode roles/get-ocp/files/ocp_ssh_pub -.vault_pass.txt \ No newline at end of file +.vault_pass.txt +env.yaml \ No newline at end of file From 0fc14e5a30a0d36cd0bdeb7b8417b601662933e3 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 20:17:59 -0500 Subject: [PATCH 428/885] Cleared env.yaml --- env.yaml | 52 ++++++++++++++++++++++++++-------------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/env.yaml b/env.yaml index 8f62a1ea..31da6a94 100644 --- a/env.yaml +++ b/env.yaml @@ -1,41 +1,41 @@ -# to populate install-config -env_baseDomain: -env_compute_arch: s390x -env_control_count: 3 -env_control_arch: s390x + +# to populate install_config +env_baseDomain: +env_compute_arch: s390x +env_control_count: 3 +env_control_arch: s390x env_metadata_name: -env_cidr: 10.128.0.0/14 -env_host_prefix: 23 -env_network_type: OpenShiftSDN -env_service_network: 172.30.0.0/16 +env_cidr: 10.128.0.0/14 +env_host_prefix: 23 +env_network_type: OpenShiftSDN +env_service_network: 172.30.0.0/16 env_fips: "false" # "true" or "false" (include quotes) -env_pullSecret: '' #paste it into these single quotes here +env_pullSecret: '' #paste it into these single quotes # to fill inventory -env_ip_kvm_host: -env_ip_bastion: -env_ip_bootstrap: -env_ip_control_0: -env_ip_control_1: -env_ip_control_2: -env_ip_compute_0: -env_ip_compute_1: +env_ip_kvm_host: +env_ip_bastion: +env_ip_bootstrap: +env_ip_control_0: +env_ip_control_1: +env_ip_control_2: +env_ip_compute_0: +env_ip_compute_1: # SSH #Username to use for SSH into KVM and host bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. -env_ssh_username: +env_ssh_username: #Password to use for SSH into KVM host and bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. -env_ssh_pass: - +env_ssh_pass: # Ansible passwordless SSH env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) # OpenShift cluster's SSH key comment -env_ssh_ocp_comm: "ocpz" +env_ssh_ocp_comm: "" # networking -env_dns_nameserver: -env_default_gateway: -env_netmask: -env_ftp: \ No newline at end of file +env_dns_nameserver: +env_default_gateway: +env_netmask: +env_ftp: From 5a1c17c0ea57273b1ff4eadbb05f5c75f023909e Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 20:32:17 -0500 Subject: [PATCH 429/885] Adding env.yaml to .gitignore --- .gitignore | 2 +- env.yaml | 41 ----------------------------------------- 2 files changed, 1 insertion(+), 42 deletions(-) delete mode 100644 env.yaml diff --git a/.gitignore b/.gitignore index d031c7a2..31e30fe4 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,4 @@ .vscode roles/get-ocp/files/ocp_ssh_pub .vault_pass.txt -env.yaml \ No newline at end of file +env.yaml diff --git a/env.yaml b/env.yaml deleted file mode 100644 index 31da6a94..00000000 --- a/env.yaml +++ /dev/null @@ -1,41 +0,0 @@ - -# to populate install_config -env_baseDomain: -env_compute_arch: s390x -env_control_count: 3 -env_control_arch: s390x -env_metadata_name: -env_cidr: 10.128.0.0/14 -env_host_prefix: 23 -env_network_type: OpenShiftSDN -env_service_network: 172.30.0.0/16 -env_fips: "false" # "true" or "false" (include quotes) -env_pullSecret: '' #paste it into these single quotes - -# to fill inventory -env_ip_kvm_host: -env_ip_bastion: -env_ip_bootstrap: -env_ip_control_0: -env_ip_control_1: -env_ip_control_2: -env_ip_compute_0: -env_ip_compute_1: - -# SSH -#Username to use for SSH into KVM and host bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. -env_ssh_username: -#Password to use for SSH into KVM host and bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. -env_ssh_pass: -# Ansible passwordless SSH -env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible -env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) - -# OpenShift cluster's SSH key comment -env_ssh_ocp_comm: "" - -# networking -env_dns_nameserver: -env_default_gateway: -env_netmask: -env_ftp: From 33c1aeca78a9cb2f38bd31c0658241c6ef054edb Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 20:32:17 -0500 Subject: [PATCH 430/885] Adding env.yaml to .gitignore --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index d031c7a2..31e30fe4 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,4 @@ .vscode roles/get-ocp/files/ocp_ssh_pub .vault_pass.txt -env.yaml \ No newline at end of file +env.yaml From 4c7ec239466d3d1795463482e5d9095ee2727788 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 20:33:39 -0500 Subject: [PATCH 431/885] Added template for env.yaml but kept .gitignore rule for future use. --- env.yaml | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 env.yaml diff --git a/env.yaml b/env.yaml new file mode 100644 index 00000000..f645db53 --- /dev/null +++ b/env.yaml @@ -0,0 +1,41 @@ + +# to populate install_config +env_baseDomain: +env_compute_arch: s390x +env_control_count: 3 +env_control_arch: s390x +env_metadata_name: +env_cidr: 10.128.0.0/14 +env_host_prefix: 23 +env_network_type: OpenShiftSDN +env_service_network: 172.30.0.0/16 +env_fips: "false" # "true" or "false" (include quotes) +env_pullSecret: '' #paste it into these single quotes + +# to fill inventory +env_ip_kvm_host: +env_ip_bastion: +env_ip_bootstrap: +env_ip_control_0: +env_ip_control_1: +env_ip_control_2: +env_ip_compute_0: +env_ip_compute_1: + +# SSH +#Username to use for SSH into KVM and host bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. +env_ssh_username: +#Password to use for SSH into KVM host and bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. +env_ssh_pass: +# Ansible passwordless SSH +env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible +env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) + +# OpenShift cluster's SSH key comment +env_ssh_ocp_comm: "" + +# networking +env_dns_nameserver: +env_default_gateway: +env_netmask: +env_ftp: \ No newline at end of file From 4e980fc6c97db913b7f863fc65548a22bc8b3fdb Mon Sep 17 00:00:00 2001 From: jacobemery Date: Wed, 1 Sep 2021 20:35:10 -0500 Subject: [PATCH 432/885] env.yaml --- env.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.yaml b/env.yaml index f645db53..20a90c5a 100644 --- a/env.yaml +++ b/env.yaml @@ -38,4 +38,4 @@ env_ssh_ocp_comm: "" env_dns_nameserver: env_default_gateway: env_netmask: -env_ftp: \ No newline at end of file +env_ftp: \ No newline at end of file From 5a19e4e74c78ccf7e8ec3ed5ecea7ed37d8d1744 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Fri, 3 Sep 2021 12:00:02 -0500 Subject: [PATCH 433/885] Consolidated setup into pre-main playbook setup.yaml in order to avoid confusion and group all setup tasks together. --- README.md | 2 +- ansible.cfg | 5 +-- env.yaml | 42 ++++++++++--------- inventory | 20 --------- main.yaml | 3 +- roles/encrypt_files/tasks/main.yaml | 6 +++ roles/install_dependencies/tasks/main.yaml | 4 ++ .../tasks/main.yaml | 17 +++----- roles/setup_vault/tasks/main.yaml | 10 ++++- roles/teardown_vms/tasks/main.yaml | 17 ++++++-- setup.yaml | 3 +- teardown.yaml | 4 +- 12 files changed, 70 insertions(+), 63 deletions(-) create mode 100644 roles/encrypt_files/tasks/main.yaml rename roles/{ansible_setup => set_inventory}/tasks/main.yaml (79%) diff --git a/README.md b/README.md index ff95edfe..4728d552 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ ### Provisioning * **Step 5: Running the Main Playbook** - * If you are not already there, navigate to the folder where you saved the Git Repository in your terminal + * If you are not already there, navigate to the folder where you saved the Git repository in your terminal * Execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Watch Ansible as it completes the installation, correcting errors if they arise. * If the process fails in error, you should be able to run the same shell command to start the process from the top. diff --git a/ansible.cfg b/ansible.cfg index a0a3609c..ace86cca 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,7 +1,6 @@ [defaults] inventory=inventory -private_key_file=~/.ssh/ansible -vault_password_file = .vault_pass.txt + [inventory] cache=True -private_key_file = ~/.ssh/ansible + diff --git a/env.yaml b/env.yaml index 20a90c5a..6814bdb1 100644 --- a/env.yaml +++ b/env.yaml @@ -1,41 +1,43 @@ + # to populate install_config -env_baseDomain: +env_baseDomain: ocpz.wsclab.endicott.ibm.com env_compute_arch: s390x env_control_count: 3 env_control_arch: s390x -env_metadata_name: +env_metadata_name: distribution env_cidr: 10.128.0.0/14 env_host_prefix: 23 env_network_type: OpenShiftSDN env_service_network: 172.30.0.0/16 env_fips: "false" # "true" or "false" (include quotes) -env_pullSecret: '' #paste it into these single quotes +env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"registry.connect.redhat.com":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"},"registry.redhat.io":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"}}}' #paste it into these single quotes here + # to fill inventory -env_ip_kvm_host: -env_ip_bastion: -env_ip_bootstrap: -env_ip_control_0: -env_ip_control_1: -env_ip_control_2: -env_ip_compute_0: -env_ip_compute_1: +env_ip_kvm_host: 9.60.87.132 +env_ip_bastion: 9.60.87.139 +env_ip_bootstrap: 9.60.87.133 +env_ip_control_0: 9.60.87.136 +env_ip_control_1: 9.60.87.137 +env_ip_control_2: 9.60.87.138 +env_ip_compute_0: 9.60.87.134 +env_ip_compute_1: 9.60.87.135 + # SSH -#Username to use for SSH into KVM and host bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. -env_ssh_username: -#Password to use for SSH into KVM host and bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. -env_ssh_pass: +env_ssh_username: jacob #Username to use for SSH into KVM and host bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. +env_ssh_pass: ibmzrocks #Password to use for SSH into KVM host and bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. + # Ansible passwordless SSH env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) # OpenShift cluster's SSH key comment -env_ssh_ocp_comm: "" +env_ssh_ocp_comm: "ocpz_distribution" # networking -env_dns_nameserver: -env_default_gateway: -env_netmask: -env_ftp: \ No newline at end of file +env_dns_nameserver: 9.60.87.139 +env_default_gateway: 9.60.86.1 +env_netmask: 255.255.254.0 +env_ftp: 9.60.86.81 diff --git a/inventory b/inventory index f914a36d..e82d1e18 100755 --- a/inventory +++ b/inventory @@ -5,23 +5,3 @@ [localhost:vars] ansible_python_interpreter=/usr/bin/python3 - -# BEGIN ANSIBLE MANAGED BLOCK -[kvm_host] -9.60.87.132 - -[bastion] -9.60.87.139 - -[bootstrap] -9.60.87.133 - -[control_nodes] -9.60.87.136 -9.60.87.137 -9.60.87.138 - -[compute_nodes] -9.60.87.134 -9.60.87.135 -# END ANSIBLE MANAGED BLOCK \ No newline at end of file diff --git a/main.yaml b/main.yaml index 44f9dfb1..dfa896db 100644 --- a/main.yaml +++ b/main.yaml @@ -9,8 +9,9 @@ - env.yaml vars: - ssh_target_ip: "{{ env_ip_kvm_host }}" + - files_to_encrypt: [ 'env.yaml' ] # add to this list as needed roles: - - ansible_setup + - encrypt_files - ssh_key_gen - ssh_copy_id diff --git a/roles/encrypt_files/tasks/main.yaml b/roles/encrypt_files/tasks/main.yaml new file mode 100644 index 00000000..5dbaf7b6 --- /dev/null +++ b/roles/encrypt_files/tasks/main.yaml @@ -0,0 +1,6 @@ +--- + +- name: encrypt provided files_to_encrypt from main playbook to protect sensitive data + tags: setup + shell: ansible-vault encrypt {{ items }} + loop: "{{ files_to_encrypt }}" \ No newline at end of file diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml index d071c939..84540602 100644 --- a/roles/install_dependencies/tasks/main.yaml +++ b/roles/install_dependencies/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- name: Gather facts to get OS family to see which setup script to run + tags: setup + ansible.builtin.gather_facts: + - name: install Ansible dependencies and packages shell: ansible-setup-mac.sh when: ansible_facts['os_family'] == "Darwin" diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml similarity index 79% rename from roles/ansible_setup/tasks/main.yaml rename to roles/set_inventory/tasks/main.yaml index 7a037c25..d787ea41 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -70,18 +70,13 @@ register: inv_check failed_when: inv_check.rc != 0 -# .vault_pass.txt is in the .gitignore file, and will therefore not be uploaded to Git should you do a git push. +- name: Gather facts to re-read inventory after changes made to inventory + tags: setup + ansible.builtin.gather_facts: + - name: fill ansible.cfg with provided variable ansible ssh key file name tags: setup ansible.builtin.lineinfile: path: ansible.cfg - regexp: '^private_key_file=' - line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} - -- name: encrypt env.yaml to protect sensitive data - tags: setup - shell: ansible-vault encrypt env.yaml - -- name: Ansible generic setup to re-read inventory file after populated in previous tasks - tags: setup - ansible.builtin.gather_facts: \ No newline at end of file + insertafter: "^[defaults]" + line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} \ No newline at end of file diff --git a/roles/setup_vault/tasks/main.yaml b/roles/setup_vault/tasks/main.yaml index b25568da..ddf4a6d0 100644 --- a/roles/setup_vault/tasks/main.yaml +++ b/roles/setup_vault/tasks/main.yaml @@ -11,9 +11,15 @@ state: absent when: vault_pass_check.stat.exists +# .vault_pass.txt is in the .gitignore file, and will therefore not be uploaded to Git should you do a push. - name: fill .vault_pass.txt with user-provided password lineinfile: path: .vault_pass.txt - state: present create: yes - line: "{{ vault_pass }}" \ No newline at end of file + line: "{{ vault_pass }}" + +- name: fill ansible.cfg with default location to find Ansible vault password file + ansible.builtin.lineinfile: + path: ansible.cfg + insertafter: "^[defaults]" + line: vault_password_file = .vault_pass.txt \ No newline at end of file diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml index 97ace6a3..02bad35a 100644 --- a/roles/teardown_vms/tasks/main.yaml +++ b/roles/teardown_vms/tasks/main.yaml @@ -8,17 +8,28 @@ debug: var: "{{ vms | length }}" -- name: shutdown listed VMs +- name: register only running VMs + community.libvirt.virt: + command: list_vms + state: running + register: running_vms + +- name: print only running vms + debug: + var: running_vms.list_vms + +- name: shutdown running VMs community.libvirt.virt: name: "{{ item }}" command: shutdown - loop: "{{ vms }}" + loop: "{{ running_vms.list_vms }}" + when: - name: wait up to 5 minute for VMs to shutdown gracefully pause: minutes: 5 -- name: undefine listed VMs +- name: undefine VMs given from teardown.yaml community.libvirt.virt: name: "{{ item }}" command: undefine diff --git a/setup.yaml b/setup.yaml index 4e19ae13..8bd72e0c 100644 --- a/setup.yaml +++ b/setup.yaml @@ -12,4 +12,5 @@ unsafe: yes # this just means you can use special characters. The password is safe. roles: - setup_vault - - install_dependencies \ No newline at end of file + - install_dependencies + - set_inventory \ No newline at end of file diff --git a/teardown.yaml b/teardown.yaml index 5ce3da30..86f07a4f 100644 --- a/teardown.yaml +++ b/teardown.yaml @@ -1,5 +1,7 @@ --- + + # Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. # If you have more nodes than what is present in the "vms" list below, feel free to add more to the list. # After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastionvm,bastion,create_nodes'" @@ -9,7 +11,7 @@ become: true gather_facts: no vars: - - vms: ['bastion', bootstrap', 'control-0', 'control-1', 'control-2', 'compute-0', 'compute-1'] + - vms: ['bastion', 'bootstrap', 'control-0', 'control-1', 'control-2', 'compute-0', 'compute-1'] roles: - teardown_vms From 463813eb4d29d907ebcab8f97ce5bfcb762065b1 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Fri, 3 Sep 2021 12:00:02 -0500 Subject: [PATCH 434/885] Consolidated setup into pre-main playbook setup.yaml in order to avoid confusion and group all setup tasks together. --- README.md | 2 +- ansible.cfg | 5 ++--- inventory | 20 ------------------- main.yaml | 3 ++- roles/encrypt_files/tasks/main.yaml | 6 ++++++ roles/install_dependencies/tasks/main.yaml | 4 ++++ .../tasks/main.yaml | 17 ++++++---------- roles/setup_vault/tasks/main.yaml | 10 ++++++++-- roles/teardown_vms/tasks/main.yaml | 17 +++++++++++++--- setup.yaml | 3 ++- teardown.yaml | 4 +++- 11 files changed, 48 insertions(+), 43 deletions(-) create mode 100644 roles/encrypt_files/tasks/main.yaml rename roles/{ansible_setup => set_inventory}/tasks/main.yaml (79%) diff --git a/README.md b/README.md index ff95edfe..4728d552 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ ### Provisioning * **Step 5: Running the Main Playbook** - * If you are not already there, navigate to the folder where you saved the Git Repository in your terminal + * If you are not already there, navigate to the folder where you saved the Git repository in your terminal * Execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Watch Ansible as it completes the installation, correcting errors if they arise. * If the process fails in error, you should be able to run the same shell command to start the process from the top. diff --git a/ansible.cfg b/ansible.cfg index a0a3609c..ace86cca 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,7 +1,6 @@ [defaults] inventory=inventory -private_key_file=~/.ssh/ansible -vault_password_file = .vault_pass.txt + [inventory] cache=True -private_key_file = ~/.ssh/ansible + diff --git a/inventory b/inventory index f914a36d..e82d1e18 100755 --- a/inventory +++ b/inventory @@ -5,23 +5,3 @@ [localhost:vars] ansible_python_interpreter=/usr/bin/python3 - -# BEGIN ANSIBLE MANAGED BLOCK -[kvm_host] -9.60.87.132 - -[bastion] -9.60.87.139 - -[bootstrap] -9.60.87.133 - -[control_nodes] -9.60.87.136 -9.60.87.137 -9.60.87.138 - -[compute_nodes] -9.60.87.134 -9.60.87.135 -# END ANSIBLE MANAGED BLOCK \ No newline at end of file diff --git a/main.yaml b/main.yaml index 44f9dfb1..dfa896db 100644 --- a/main.yaml +++ b/main.yaml @@ -9,8 +9,9 @@ - env.yaml vars: - ssh_target_ip: "{{ env_ip_kvm_host }}" + - files_to_encrypt: [ 'env.yaml' ] # add to this list as needed roles: - - ansible_setup + - encrypt_files - ssh_key_gen - ssh_copy_id diff --git a/roles/encrypt_files/tasks/main.yaml b/roles/encrypt_files/tasks/main.yaml new file mode 100644 index 00000000..5dbaf7b6 --- /dev/null +++ b/roles/encrypt_files/tasks/main.yaml @@ -0,0 +1,6 @@ +--- + +- name: encrypt provided files_to_encrypt from main playbook to protect sensitive data + tags: setup + shell: ansible-vault encrypt {{ items }} + loop: "{{ files_to_encrypt }}" \ No newline at end of file diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml index d071c939..84540602 100644 --- a/roles/install_dependencies/tasks/main.yaml +++ b/roles/install_dependencies/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- name: Gather facts to get OS family to see which setup script to run + tags: setup + ansible.builtin.gather_facts: + - name: install Ansible dependencies and packages shell: ansible-setup-mac.sh when: ansible_facts['os_family'] == "Darwin" diff --git a/roles/ansible_setup/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml similarity index 79% rename from roles/ansible_setup/tasks/main.yaml rename to roles/set_inventory/tasks/main.yaml index 7a037c25..d787ea41 100644 --- a/roles/ansible_setup/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -70,18 +70,13 @@ register: inv_check failed_when: inv_check.rc != 0 -# .vault_pass.txt is in the .gitignore file, and will therefore not be uploaded to Git should you do a git push. +- name: Gather facts to re-read inventory after changes made to inventory + tags: setup + ansible.builtin.gather_facts: + - name: fill ansible.cfg with provided variable ansible ssh key file name tags: setup ansible.builtin.lineinfile: path: ansible.cfg - regexp: '^private_key_file=' - line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} - -- name: encrypt env.yaml to protect sensitive data - tags: setup - shell: ansible-vault encrypt env.yaml - -- name: Ansible generic setup to re-read inventory file after populated in previous tasks - tags: setup - ansible.builtin.gather_facts: \ No newline at end of file + insertafter: "^[defaults]" + line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} \ No newline at end of file diff --git a/roles/setup_vault/tasks/main.yaml b/roles/setup_vault/tasks/main.yaml index b25568da..ddf4a6d0 100644 --- a/roles/setup_vault/tasks/main.yaml +++ b/roles/setup_vault/tasks/main.yaml @@ -11,9 +11,15 @@ state: absent when: vault_pass_check.stat.exists +# .vault_pass.txt is in the .gitignore file, and will therefore not be uploaded to Git should you do a push. - name: fill .vault_pass.txt with user-provided password lineinfile: path: .vault_pass.txt - state: present create: yes - line: "{{ vault_pass }}" \ No newline at end of file + line: "{{ vault_pass }}" + +- name: fill ansible.cfg with default location to find Ansible vault password file + ansible.builtin.lineinfile: + path: ansible.cfg + insertafter: "^[defaults]" + line: vault_password_file = .vault_pass.txt \ No newline at end of file diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml index 97ace6a3..02bad35a 100644 --- a/roles/teardown_vms/tasks/main.yaml +++ b/roles/teardown_vms/tasks/main.yaml @@ -8,17 +8,28 @@ debug: var: "{{ vms | length }}" -- name: shutdown listed VMs +- name: register only running VMs + community.libvirt.virt: + command: list_vms + state: running + register: running_vms + +- name: print only running vms + debug: + var: running_vms.list_vms + +- name: shutdown running VMs community.libvirt.virt: name: "{{ item }}" command: shutdown - loop: "{{ vms }}" + loop: "{{ running_vms.list_vms }}" + when: - name: wait up to 5 minute for VMs to shutdown gracefully pause: minutes: 5 -- name: undefine listed VMs +- name: undefine VMs given from teardown.yaml community.libvirt.virt: name: "{{ item }}" command: undefine diff --git a/setup.yaml b/setup.yaml index 4e19ae13..8bd72e0c 100644 --- a/setup.yaml +++ b/setup.yaml @@ -12,4 +12,5 @@ unsafe: yes # this just means you can use special characters. The password is safe. roles: - setup_vault - - install_dependencies \ No newline at end of file + - install_dependencies + - set_inventory \ No newline at end of file diff --git a/teardown.yaml b/teardown.yaml index 5ce3da30..86f07a4f 100644 --- a/teardown.yaml +++ b/teardown.yaml @@ -1,5 +1,7 @@ --- + + # Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. # If you have more nodes than what is present in the "vms" list below, feel free to add more to the list. # After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastionvm,bastion,create_nodes'" @@ -9,7 +11,7 @@ become: true gather_facts: no vars: - - vms: ['bastion', bootstrap', 'control-0', 'control-1', 'control-2', 'compute-0', 'compute-1'] + - vms: ['bastion', 'bootstrap', 'control-0', 'control-1', 'control-2', 'compute-0', 'compute-1'] roles: - teardown_vms From ae57a58fa719c3a61762863588b9460aceccdf8e Mon Sep 17 00:00:00 2001 From: jacobemery Date: Fri, 3 Sep 2021 12:59:26 -0500 Subject: [PATCH 435/885] removed setup scripts and incorporated them into install dependencies' main playbook for simplicity --- ansible.cfg | 3 +-- .../files/ansible-setup-linux.sh | 8 ------- .../files/ansible-setup-mac.sh | 7 ------ roles/install_dependencies/tasks/main.yaml | 24 +++++++++++++++---- roles/set_inventory/tasks/main.yaml | 2 +- roles/setup_vault/tasks/main.yaml | 2 +- 6 files changed, 23 insertions(+), 23 deletions(-) delete mode 100755 roles/install_dependencies/files/ansible-setup-linux.sh delete mode 100755 roles/install_dependencies/files/ansible-setup-mac.sh diff --git a/ansible.cfg b/ansible.cfg index ace86cca..80a4489f 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -2,5 +2,4 @@ inventory=inventory [inventory] -cache=True - +cache=True \ No newline at end of file diff --git a/roles/install_dependencies/files/ansible-setup-linux.sh b/roles/install_dependencies/files/ansible-setup-linux.sh deleted file mode 100755 index 64ac4cf6..00000000 --- a/roles/install_dependencies/files/ansible-setup-linux.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -ansible-galaxy collection install community.general -ansible-galaxy collection install community.crypto -ansible-galaxy collection install ansible.posix -ansible-galaxy collection install community.libvirt -sudo dnf install sshpass -y -sudo dnf install openssh -y \ No newline at end of file diff --git a/roles/install_dependencies/files/ansible-setup-mac.sh b/roles/install_dependencies/files/ansible-setup-mac.sh deleted file mode 100755 index f918d37a..00000000 --- a/roles/install_dependencies/files/ansible-setup-mac.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -ansible-galaxy collection install community.general -ansible-galaxy collection install community.crypto -ansible-galaxy collection install ansible.posix -ansible-galaxy collection install community.libvirt -brew install openssh \ No newline at end of file diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml index 84540602..c280aead 100644 --- a/roles/install_dependencies/tasks/main.yaml +++ b/roles/install_dependencies/tasks/main.yaml @@ -1,13 +1,29 @@ --- - name: Gather facts to get OS family to see which setup script to run - tags: setup + tags: t ansible.builtin.gather_facts: - name: install Ansible dependencies and packages - shell: ansible-setup-mac.sh + tags: t + shell: "{{ item }}" + loop: + - ansible-galaxy collection install community.general + - ansible-galaxy collection install community.crypto + - ansible-galaxy collection install ansible.posix + - ansible-galaxy collection install community.libvirt + - brew install openssh when: ansible_facts['os_family'] == "Darwin" - name: install Ansible dependencies and packages - shell: ansible-setup-linux.sh - when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" \ No newline at end of file + tags: t + shell: "{{ item }}" + loop: + - ansible-galaxy collection install community.general + - ansible-galaxy collection install community.crypto + - ansible-galaxy collection install ansible.posix + - ansible-galaxy collection install community.libvirt + - sudo dnf install sshpass -y + - sudo dnf install openssh -y + when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" + \ No newline at end of file diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index d787ea41..75c1f946 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -78,5 +78,5 @@ tags: setup ansible.builtin.lineinfile: path: ansible.cfg - insertafter: "^[defaults]" + insertafter: '\[defaults\]' line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} \ No newline at end of file diff --git a/roles/setup_vault/tasks/main.yaml b/roles/setup_vault/tasks/main.yaml index ddf4a6d0..2d472c09 100644 --- a/roles/setup_vault/tasks/main.yaml +++ b/roles/setup_vault/tasks/main.yaml @@ -21,5 +21,5 @@ - name: fill ansible.cfg with default location to find Ansible vault password file ansible.builtin.lineinfile: path: ansible.cfg - insertafter: "^[defaults]" + insertafter: '\[defaults\]' line: vault_password_file = .vault_pass.txt \ No newline at end of file From e40034d3a743bba792fb64900c46e96eda06d463 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Fri, 3 Sep 2021 12:59:26 -0500 Subject: [PATCH 436/885] removed setup scripts and incorporated them into install dependencies' main playbook for simplicity --- ansible.cfg | 3 +-- .../files/ansible-setup-linux.sh | 8 ------- .../files/ansible-setup-mac.sh | 7 ------ roles/install_dependencies/tasks/main.yaml | 24 +++++++++++++++---- roles/set_inventory/tasks/main.yaml | 2 +- roles/setup_vault/tasks/main.yaml | 2 +- 6 files changed, 23 insertions(+), 23 deletions(-) delete mode 100755 roles/install_dependencies/files/ansible-setup-linux.sh delete mode 100755 roles/install_dependencies/files/ansible-setup-mac.sh diff --git a/ansible.cfg b/ansible.cfg index ace86cca..80a4489f 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -2,5 +2,4 @@ inventory=inventory [inventory] -cache=True - +cache=True \ No newline at end of file diff --git a/roles/install_dependencies/files/ansible-setup-linux.sh b/roles/install_dependencies/files/ansible-setup-linux.sh deleted file mode 100755 index 64ac4cf6..00000000 --- a/roles/install_dependencies/files/ansible-setup-linux.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -ansible-galaxy collection install community.general -ansible-galaxy collection install community.crypto -ansible-galaxy collection install ansible.posix -ansible-galaxy collection install community.libvirt -sudo dnf install sshpass -y -sudo dnf install openssh -y \ No newline at end of file diff --git a/roles/install_dependencies/files/ansible-setup-mac.sh b/roles/install_dependencies/files/ansible-setup-mac.sh deleted file mode 100755 index f918d37a..00000000 --- a/roles/install_dependencies/files/ansible-setup-mac.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -ansible-galaxy collection install community.general -ansible-galaxy collection install community.crypto -ansible-galaxy collection install ansible.posix -ansible-galaxy collection install community.libvirt -brew install openssh \ No newline at end of file diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml index 84540602..c280aead 100644 --- a/roles/install_dependencies/tasks/main.yaml +++ b/roles/install_dependencies/tasks/main.yaml @@ -1,13 +1,29 @@ --- - name: Gather facts to get OS family to see which setup script to run - tags: setup + tags: t ansible.builtin.gather_facts: - name: install Ansible dependencies and packages - shell: ansible-setup-mac.sh + tags: t + shell: "{{ item }}" + loop: + - ansible-galaxy collection install community.general + - ansible-galaxy collection install community.crypto + - ansible-galaxy collection install ansible.posix + - ansible-galaxy collection install community.libvirt + - brew install openssh when: ansible_facts['os_family'] == "Darwin" - name: install Ansible dependencies and packages - shell: ansible-setup-linux.sh - when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" \ No newline at end of file + tags: t + shell: "{{ item }}" + loop: + - ansible-galaxy collection install community.general + - ansible-galaxy collection install community.crypto + - ansible-galaxy collection install ansible.posix + - ansible-galaxy collection install community.libvirt + - sudo dnf install sshpass -y + - sudo dnf install openssh -y + when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" + \ No newline at end of file diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index d787ea41..75c1f946 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -78,5 +78,5 @@ tags: setup ansible.builtin.lineinfile: path: ansible.cfg - insertafter: "^[defaults]" + insertafter: '\[defaults\]' line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} \ No newline at end of file diff --git a/roles/setup_vault/tasks/main.yaml b/roles/setup_vault/tasks/main.yaml index ddf4a6d0..2d472c09 100644 --- a/roles/setup_vault/tasks/main.yaml +++ b/roles/setup_vault/tasks/main.yaml @@ -21,5 +21,5 @@ - name: fill ansible.cfg with default location to find Ansible vault password file ansible.builtin.lineinfile: path: ansible.cfg - insertafter: "^[defaults]" + insertafter: '\[defaults\]' line: vault_password_file = .vault_pass.txt \ No newline at end of file From f424846a45839c7216f2d3d4e57deafadf2a947d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Sep 2021 13:01:25 -0500 Subject: [PATCH 437/885] Update env.yaml emptied env.yaml --- env.yaml | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/env.yaml b/env.yaml index 6814bdb1..1f262588 100644 --- a/env.yaml +++ b/env.yaml @@ -1,43 +1,43 @@ # to populate install_config -env_baseDomain: ocpz.wsclab.endicott.ibm.com +env_baseDomain: env_compute_arch: s390x env_control_count: 3 env_control_arch: s390x -env_metadata_name: distribution +env_metadata_name: env_cidr: 10.128.0.0/14 env_host_prefix: 23 env_network_type: OpenShiftSDN env_service_network: 172.30.0.0/16 env_fips: "false" # "true" or "false" (include quotes) -env_pullSecret: '{"auths":{"cloud.openshift.com":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"quay.io":{"auth":"b3BlbnNoaWZ0LXJlbGVhc2UtZGV2K29jbV9hY2Nlc3NfYzRjMGVlNDZmNmI0NDcyN2E5ZmZiZWVmMDgyNTYzNzg6VTgzMlYxSDdJMTBQM0cxVzNESkRWVlhXMVVTVjVUUjdVNjVGNzkwSldJVEEzSTcxOUlLTEJSWjI2MjVOQ0FDSg==","email":"jacob.emery@ibm.com"},"registry.connect.redhat.com":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"},"registry.redhat.io":{"auth":"fHVoYy1wb29sLTMyMzRiMTdjLWIyOGQtNDYxMS1iNGE0LTc2MjAwY2YwYTNiYzpleUpoYkdjaU9pSlNVelV4TWlKOS5leUp6ZFdJaU9pSXhOREl5WmprM00yVXpZemMwTVRRek9UYzNNRGMzT0dNNE9XSTJNRGRqT1NKOS5WMW5sd1ptV1JueVJqTU9BLXRtR2dOVlJBQXk0VGs3RlVWazdmS2Nta2s2UVB3VFk0eWRNX2NNLXl5cWNZdGJUdjdLUkpNTjYyRG1yaThuc2QyY3Z4NU1xOWtKSnR1dHAyNjVEdDZZemxwNjctenEwWnZtcFJFUG82V3RrSWFyejlaUDB4X1ZWcGF6TS12aGVxTWpkUXNvMHZieXFZVGU5ZDIyTWUtTHVKSFJPWGY1c296NWdhQkw3UGVOelkyUGJpb3p5Zk5PZGhNb3ZIRlkwUXF6dmpXWHM2WUxMc21KeVo3amxmbDNhOHhKWnJkT0NNRUFXQzg2N3ljc01xWGNVVnNUSUZBRHdHTHNUSFJ0cG0wakFMdDNYZVpfNEJRQnp0UFVtMksxQVMyRE0zN2pJbGplVjBWelV0N2E1XzVrcjVOTmlQc0R0VGlOWmRXX1ZKREtWc0FVQ2hUY2JFT1RYUGtmTGpZR05aQmJ6dGVYLUlMWWIwc2dncEZqX3BzMDU1SUpwQ2NDWnB4dXZPZEk5Y01ZajdRbFl2TzV2c0RxQUZ2dXVUdGhPMzQ2SzNRUHdQLXNxUmFqU2NoOEVSSTZSaFZKaE53eVRPMGx0ellvRjZrMU0yUHhjX3VFZzFPVm1QSWxmZTNuWnFJUmczcExOM3cybnBqYXlkMnJxc2N2WU85NXRsbkFOa3B4YjJnVjVyYXZ5STBwMmx3VjZha2VXTGN4MlU1dGZWRHgzWW44VmtwdFlOeFUwQ016d2RWeWN6RlV6aFc2eUlvNmtMR09DQ3ExSVZGcVFKd191TjlWYVZlWjZHajVBcGlvc20wTllJd1QtVGFHbjNRUlB1bVBuSElIZlJGUUYzSF95SjhyUnNSRFJualpZT0ZvWmstQkNjTmdpOW1nb04yQQ==","email":"jacob.emery@ibm.com"}}}' #paste it into these single quotes here +env_pullSecret: '' #paste it into these single quotes here # to fill inventory -env_ip_kvm_host: 9.60.87.132 -env_ip_bastion: 9.60.87.139 -env_ip_bootstrap: 9.60.87.133 -env_ip_control_0: 9.60.87.136 -env_ip_control_1: 9.60.87.137 -env_ip_control_2: 9.60.87.138 -env_ip_compute_0: 9.60.87.134 -env_ip_compute_1: 9.60.87.135 +env_ip_kvm_host: +env_ip_bastion: +env_ip_bootstrap: +env_ip_control_0: +env_ip_control_1: +env_ip_control_2: +env_ip_compute_0: +env_ip_compute_1: # SSH -env_ssh_username: jacob #Username to use for SSH into KVM and host bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. -env_ssh_pass: ibmzrocks #Password to use for SSH into KVM host and bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. +env_ssh_username: #Username to use for SSH into KVM and host bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. +env_ssh_pass: #Password to use for SSH into KVM host and bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. # Ansible passwordless SSH env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) # OpenShift cluster's SSH key comment -env_ssh_ocp_comm: "ocpz_distribution" +env_ssh_ocp_comm: "" # networking -env_dns_nameserver: 9.60.87.139 -env_default_gateway: 9.60.86.1 -env_netmask: 255.255.254.0 -env_ftp: 9.60.86.81 +env_dns_nameserver: +env_default_gateway: +env_netmask: +env_ftp: From 66095d9328d2a957bdb6bbde67e8dbb41145f7ee Mon Sep 17 00:00:00 2001 From: jacobemery Date: Fri, 3 Sep 2021 14:46:08 -0500 Subject: [PATCH 438/885] Fixed bugs in setup playbook --- .gitignore | 2 ++ roles/install_dependencies/tasks/main.yaml | 3 --- roles/set_inventory/tasks/main.yaml | 1 - roles/setup_vault/tasks/main.yaml | 2 +- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 31e30fe4..663b903f 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,5 @@ roles/get-ocp/files/ocp_ssh_pub .vault_pass.txt env.yaml +inventory +ansible.cfg diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml index c280aead..cfef5772 100644 --- a/roles/install_dependencies/tasks/main.yaml +++ b/roles/install_dependencies/tasks/main.yaml @@ -1,11 +1,9 @@ --- - name: Gather facts to get OS family to see which setup script to run - tags: t ansible.builtin.gather_facts: - name: install Ansible dependencies and packages - tags: t shell: "{{ item }}" loop: - ansible-galaxy collection install community.general @@ -16,7 +14,6 @@ when: ansible_facts['os_family'] == "Darwin" - name: install Ansible dependencies and packages - tags: t shell: "{{ item }}" loop: - ansible-galaxy collection install community.general diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index 75c1f946..9c4ec745 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -62,7 +62,6 @@ {{ env_ip_compute_0 }} {{ env_ip_compute_1 }} state: present - backup: yes - name: check inventory setup tags: setup diff --git a/roles/setup_vault/tasks/main.yaml b/roles/setup_vault/tasks/main.yaml index 2d472c09..5c5dc6f6 100644 --- a/roles/setup_vault/tasks/main.yaml +++ b/roles/setup_vault/tasks/main.yaml @@ -22,4 +22,4 @@ ansible.builtin.lineinfile: path: ansible.cfg insertafter: '\[defaults\]' - line: vault_password_file = .vault_pass.txt \ No newline at end of file + line: vault_password_file=.vault_pass.txt \ No newline at end of file From 1528dd6e4e6e973e4d857cafce64a2a41b839dfc Mon Sep 17 00:00:00 2001 From: jacobemery Date: Fri, 3 Sep 2021 14:46:08 -0500 Subject: [PATCH 439/885] Fixed bugs in setup playbook --- .gitignore | 2 ++ roles/install_dependencies/tasks/main.yaml | 3 --- roles/set_inventory/tasks/main.yaml | 1 - roles/setup_vault/tasks/main.yaml | 2 +- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 31e30fe4..663b903f 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,5 @@ roles/get-ocp/files/ocp_ssh_pub .vault_pass.txt env.yaml +inventory +ansible.cfg diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml index c280aead..cfef5772 100644 --- a/roles/install_dependencies/tasks/main.yaml +++ b/roles/install_dependencies/tasks/main.yaml @@ -1,11 +1,9 @@ --- - name: Gather facts to get OS family to see which setup script to run - tags: t ansible.builtin.gather_facts: - name: install Ansible dependencies and packages - tags: t shell: "{{ item }}" loop: - ansible-galaxy collection install community.general @@ -16,7 +14,6 @@ when: ansible_facts['os_family'] == "Darwin" - name: install Ansible dependencies and packages - tags: t shell: "{{ item }}" loop: - ansible-galaxy collection install community.general diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index 75c1f946..9c4ec745 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -62,7 +62,6 @@ {{ env_ip_compute_0 }} {{ env_ip_compute_1 }} state: present - backup: yes - name: check inventory setup tags: setup diff --git a/roles/setup_vault/tasks/main.yaml b/roles/setup_vault/tasks/main.yaml index 2d472c09..5c5dc6f6 100644 --- a/roles/setup_vault/tasks/main.yaml +++ b/roles/setup_vault/tasks/main.yaml @@ -22,4 +22,4 @@ ansible.builtin.lineinfile: path: ansible.cfg insertafter: '\[defaults\]' - line: vault_password_file = .vault_pass.txt \ No newline at end of file + line: vault_password_file=.vault_pass.txt \ No newline at end of file From 1e842be8294e33fdab925c0a08ea803b7a5378e1 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 16:52:14 -0500 Subject: [PATCH 440/885] Debugged SSH issues. Implemented the use of an ssh-agent. --- roles/check_ssh/tasks/main.yaml | 6 +----- roles/ssh_copy_id/tasks/main.yaml | 14 +++++++++----- roles/ssh_key_gen/tasks/main.yaml | 18 ++---------------- 3 files changed, 12 insertions(+), 26 deletions(-) diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index f185a71a..ce7d56bf 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -9,8 +9,4 @@ - name: print the connectivity test results tags: keymastr debug: - var: ssh_connection_test.stdout_lines - -- name: pause for you to do any necessary setup - pause: - minutes: 60 \ No newline at end of file + var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index f9fb4a29..2930e673 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,11 +1,15 @@ --- -- name: distribute the ssh key to a remote host - tags: ssh,ssh-copy-id - shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" - register: ssh_copy_id_execution +- name: Load in variables from env.yaml + tags: ssh-copy-id + include_vars: env.yaml + +- name: ssh copy id to remote host + tags: ssh-copy-id + command: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 '{{ env_ssh_username }}@{{ ssh_target_ip }}'" + register: ssh_copy - name: Print results of copying ssh id to remote host. tags: ssh,ssh-copy-id debug: - var: ssh_copy_id_execution \ No newline at end of file + var: ssh_copy \ No newline at end of file diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index c60f4a70..92bc3493 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -16,7 +16,7 @@ file: path: "~/.ssh" state: directory - mode: "0700" + mode: '700' register: ssh_directory_creation when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false @@ -49,18 +49,4 @@ - name: Print results of ssh key pair creation tags: keymastr debug: - var: ssh_key_creation - -- name: add the new ssh key to the ansible.cfg file - tags: keymastr - lineinfile: - path: ansible.cfg - line: "private_key_file = ~/.ssh/{{env_ssh_ans_name}}" - state: present - backup: yes - register: ssh_config_file_key_addition - -- name: Print results of adding ssh key to ansible.cfg file - tags: keymastr - debug: - var: ssh_config_file_key_addition + var: ssh_key_creation \ No newline at end of file From 472501113acaa575f6a0116d1ede1baeaa33507f Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 16:52:14 -0500 Subject: [PATCH 441/885] Debugged SSH issues. Implemented the use of an ssh-agent. --- roles/check_ssh/tasks/main.yaml | 6 +----- roles/ssh_copy_id/tasks/main.yaml | 14 +++++++++----- roles/ssh_key_gen/tasks/main.yaml | 18 ++---------------- 3 files changed, 12 insertions(+), 26 deletions(-) diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index f185a71a..ce7d56bf 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -9,8 +9,4 @@ - name: print the connectivity test results tags: keymastr debug: - var: ssh_connection_test.stdout_lines - -- name: pause for you to do any necessary setup - pause: - minutes: 60 \ No newline at end of file + var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index f9fb4a29..2930e673 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,11 +1,15 @@ --- -- name: distribute the ssh key to a remote host - tags: ssh,ssh-copy-id - shell: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 \"{{ env_ssh_username }}@{{ ssh_target_ip }}\"" - register: ssh_copy_id_execution +- name: Load in variables from env.yaml + tags: ssh-copy-id + include_vars: env.yaml + +- name: ssh copy id to remote host + tags: ssh-copy-id + command: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 '{{ env_ssh_username }}@{{ ssh_target_ip }}'" + register: ssh_copy - name: Print results of copying ssh id to remote host. tags: ssh,ssh-copy-id debug: - var: ssh_copy_id_execution \ No newline at end of file + var: ssh_copy \ No newline at end of file diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index c60f4a70..92bc3493 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -16,7 +16,7 @@ file: path: "~/.ssh" state: directory - mode: "0700" + mode: '700' register: ssh_directory_creation when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false @@ -49,18 +49,4 @@ - name: Print results of ssh key pair creation tags: keymastr debug: - var: ssh_key_creation - -- name: add the new ssh key to the ansible.cfg file - tags: keymastr - lineinfile: - path: ansible.cfg - line: "private_key_file = ~/.ssh/{{env_ssh_ans_name}}" - state: present - backup: yes - register: ssh_config_file_key_addition - -- name: Print results of adding ssh key to ansible.cfg file - tags: keymastr - debug: - var: ssh_config_file_key_addition + var: ssh_key_creation \ No newline at end of file From c8ef2775023e56e7c4e72fe22a252f9c7b0b6e37 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:01:28 -0500 Subject: [PATCH 442/885] Added ignore_errors to encrypt files main playbook in order to ensure idempotency. The task was failing the entire process when env.yaml was already encrypted. --- roles/encrypt_files/tasks/main.yaml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/roles/encrypt_files/tasks/main.yaml b/roles/encrypt_files/tasks/main.yaml index 5dbaf7b6..1fe8c80d 100644 --- a/roles/encrypt_files/tasks/main.yaml +++ b/roles/encrypt_files/tasks/main.yaml @@ -1,6 +1,8 @@ --- -- name: encrypt provided files_to_encrypt from main playbook to protect sensitive data +- name: encrypt env.yaml, skip if already encrypted tags: setup - shell: ansible-vault encrypt {{ items }} - loop: "{{ files_to_encrypt }}" \ No newline at end of file + command: ansible-vault encrypt "{{ item }}" + loop: "{{ files_to_encrypt }}" + register: encrypt_files + ignore_errors: yes \ No newline at end of file From ce44666e5df1af8e48572163dc9e97d80dc33c8d Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:01:28 -0500 Subject: [PATCH 443/885] Added ignore_errors to encrypt files main playbook in order to ensure idempotency. The task was failing the entire process when env.yaml was already encrypted. --- roles/encrypt_files/tasks/main.yaml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/roles/encrypt_files/tasks/main.yaml b/roles/encrypt_files/tasks/main.yaml index 5dbaf7b6..1fe8c80d 100644 --- a/roles/encrypt_files/tasks/main.yaml +++ b/roles/encrypt_files/tasks/main.yaml @@ -1,6 +1,8 @@ --- -- name: encrypt provided files_to_encrypt from main playbook to protect sensitive data +- name: encrypt env.yaml, skip if already encrypted tags: setup - shell: ansible-vault encrypt {{ items }} - loop: "{{ files_to_encrypt }}" \ No newline at end of file + command: ansible-vault encrypt "{{ item }}" + loop: "{{ files_to_encrypt }}" + register: encrypt_files + ignore_errors: yes \ No newline at end of file From 2e04218a6a23ef6d4a0958c7f9ac58226993d6b8 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:08:15 -0500 Subject: [PATCH 444/885] Removed sshpass from list of dependencies to install --- roles/install_dependencies/tasks/main.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml index cfef5772..fde850c2 100644 --- a/roles/install_dependencies/tasks/main.yaml +++ b/roles/install_dependencies/tasks/main.yaml @@ -20,7 +20,6 @@ - ansible-galaxy collection install community.crypto - ansible-galaxy collection install ansible.posix - ansible-galaxy collection install community.libvirt - - sudo dnf install sshpass -y - sudo dnf install openssh -y when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" \ No newline at end of file From 2135dc3e5dbfa9981d2d54164a526d314f5934b9 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:08:15 -0500 Subject: [PATCH 445/885] Removed sshpass from list of dependencies to install --- roles/install_dependencies/tasks/main.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml index cfef5772..fde850c2 100644 --- a/roles/install_dependencies/tasks/main.yaml +++ b/roles/install_dependencies/tasks/main.yaml @@ -20,7 +20,6 @@ - ansible-galaxy collection install community.crypto - ansible-galaxy collection install ansible.posix - ansible-galaxy collection install community.libvirt - - sudo dnf install sshpass -y - sudo dnf install openssh -y when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" \ No newline at end of file From 1084b4e4e6bb2a4f5c113d65a40cd86d80613620 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:09:40 -0500 Subject: [PATCH 446/885] Moved creation of ocp_ssh_pub file to set_inventory to separate setup tasks in a way that makes more sense --- roles/set_inventory/tasks/main.yaml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index 9c4ec745..ba03b158 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -78,4 +78,16 @@ ansible.builtin.lineinfile: path: ansible.cfg insertafter: '\[defaults\]' - line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} \ No newline at end of file + line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} + +- name: delete ocp_ssh_pub file if it exists already to ensure idempotence + tags: getocp, bastion + file: + state: absent + path: roles/get-ocp/files/ocp_ssh_pub + +- name: create ocp_ssh_pub if it needs to be + file: + path: roles/get-ocp/files/ocp_ssh_pub + mode: '0755' + state: touch \ No newline at end of file From 01e01f43201b442040457588aa1735b6cde3c71b Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:09:40 -0500 Subject: [PATCH 447/885] Moved creation of ocp_ssh_pub file to set_inventory to separate setup tasks in a way that makes more sense --- roles/set_inventory/tasks/main.yaml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index 9c4ec745..ba03b158 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -78,4 +78,16 @@ ansible.builtin.lineinfile: path: ansible.cfg insertafter: '\[defaults\]' - line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} \ No newline at end of file + line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} + +- name: delete ocp_ssh_pub file if it exists already to ensure idempotence + tags: getocp, bastion + file: + state: absent + path: roles/get-ocp/files/ocp_ssh_pub + +- name: create ocp_ssh_pub if it needs to be + file: + path: roles/get-ocp/files/ocp_ssh_pub + mode: '0755' + state: touch \ No newline at end of file From d131500f2358dc792a2eb66d1e17cd78aef9bd0a Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:10:38 -0500 Subject: [PATCH 448/885] Fixed small bug in get-ocp playbook --- roles/get-ocp/tasks/main.yaml | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index f6143050..17959d8d 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -85,27 +85,6 @@ group: root mode: '0755' -- name: check to see if ocp_ssh_pub file exists - tags: getocp,bastion - stat: - path: roles/get-ocp/files/ocp_ssh_pub - register: ocp_ssh_pub - -- name: delete ocp_ssh_pub file - tags: getocp, bastion - file: - state: absent - path: roles/get-ocp/files/ocp_ssh_pub - when: ocp_ssh_pub.stat.exists == true - register: ocp_ssh_pub_del - -- name: create ocp_ssh_pub if it needs to be - file: - path: roles/get-ocp/files/ocp_ssh_pub - mode: '0755' - state: touch - when: ocp_ssh_pub_del.changed == true or ocp_ssh_pub.stat.exists == false - - name: Fetch ssh key from bastion for use in install-config tags: getocp,bastion ansible.builtin.fetch: From 8c9c5685f1581b241cc77a4e6bff00c186153bef Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:10:38 -0500 Subject: [PATCH 449/885] Fixed small bug in get-ocp playbook --- roles/get-ocp/tasks/main.yaml | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index f6143050..17959d8d 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -85,27 +85,6 @@ group: root mode: '0755' -- name: check to see if ocp_ssh_pub file exists - tags: getocp,bastion - stat: - path: roles/get-ocp/files/ocp_ssh_pub - register: ocp_ssh_pub - -- name: delete ocp_ssh_pub file - tags: getocp, bastion - file: - state: absent - path: roles/get-ocp/files/ocp_ssh_pub - when: ocp_ssh_pub.stat.exists == true - register: ocp_ssh_pub_del - -- name: create ocp_ssh_pub if it needs to be - file: - path: roles/get-ocp/files/ocp_ssh_pub - mode: '0755' - state: touch - when: ocp_ssh_pub_del.changed == true or ocp_ssh_pub.stat.exists == false - - name: Fetch ssh key from bastion for use in install-config tags: getocp,bastion ansible.builtin.fetch: From f949d67eaa6e58a28cf2b2f4de53ab06d390b536 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:13:49 -0500 Subject: [PATCH 450/885] Added ssh agent to store ssh key passwords to reduce number of times a password is prompted during installation. --- roles/ssh_agent/tasks/main.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 roles/ssh_agent/tasks/main.yaml diff --git a/roles/ssh_agent/tasks/main.yaml b/roles/ssh_agent/tasks/main.yaml new file mode 100644 index 00000000..f3910dfc --- /dev/null +++ b/roles/ssh_agent/tasks/main.yaml @@ -0,0 +1,11 @@ +--- + +- name: add ansible ssh key to ssh-agent. See README Step 5 note for additional details. + tags: ssh-agent + shell: eval $(ssh-agent) && ssh-add ~/.ssh/{{env_ssh_ans_name}} + register: ssh_agent_setup + +- name: print results from setting up ssh agent + tags: ssh-agent + debug: + var: ssh_agent_setup From 15cb8de5656309ba68da2c2f8d908325dada1179 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:13:49 -0500 Subject: [PATCH 451/885] Added ssh agent to store ssh key passwords to reduce number of times a password is prompted during installation. --- roles/ssh_agent/tasks/main.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 roles/ssh_agent/tasks/main.yaml diff --git a/roles/ssh_agent/tasks/main.yaml b/roles/ssh_agent/tasks/main.yaml new file mode 100644 index 00000000..f3910dfc --- /dev/null +++ b/roles/ssh_agent/tasks/main.yaml @@ -0,0 +1,11 @@ +--- + +- name: add ansible ssh key to ssh-agent. See README Step 5 note for additional details. + tags: ssh-agent + shell: eval $(ssh-agent) && ssh-add ~/.ssh/{{env_ssh_ans_name}} + register: ssh_agent_setup + +- name: print results from setting up ssh agent + tags: ssh-agent + debug: + var: ssh_agent_setup From e031db1e712cca8d2d83f9b8148e84c5bb1cf2eb Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:15:50 -0500 Subject: [PATCH 452/885] Updated README as I went along with most recent installation to reflect updates and to be even more detailed. --- README.md | 58 +++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 43 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 4728d552..eca25c2d 100644 --- a/README.md +++ b/README.md @@ -66,25 +66,52 @@ * If you are not already there, navigate to the folder where you saved the Git repository in your terminal * Execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Watch Ansible as it completes the installation, correcting errors if they arise. - * If the process fails in error, you should be able to run the same shell command to start the process from the top. - * Alternatively, use tags to run only the tasks that have that tag. See main.yaml to determine what you would like - to run. There is also a list of all the tags at the bottom of this page for reference. + * To look at what is running in detail, from the main directory open roles/'task-you-want-to-inspect'/tasks/main.yaml + * If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of the main playbook run, use tags. See main.yaml to determine what you part you would like to run and use those tags when running the main playbook. There is also a list of all the tags at the bottom of this page for reference. Example: "ansible-playbook main.yaml --ask-become-pass -- tags 'bastion,get-ocp' + * Note: we chose to not edit the user's .bash_profile/.bashrc with an automatic ssh-add command because that would change the user's local workstation set-up in a way that was undesirable. Therefore, if you close out your terminal session in the middle of provisioning, you will need to run "ansible-playbook main.yaml --tags ssh-agent" before doing anything else. * **Step 6: Bastion Configuration** * Once the create_bastion task runs, it will pause the playbook to give you time to configure it. * Use a web browser to open the cockpit by going to: "https://your-KVM-host-IP-address-here:9090" - * Click on the "Virtual Machines" tab, then click on bastion from the list, click on the black terminal - screen and press Enter. Complete its installation with these options enabled: - * server - * hardware monitoring utilities - * networking file system client - * remote management for linux - * headless mgmt - * system tools - * basic web server - * network servers + * Click on the "Virtual Machines" tab, then click on bastion from the list, click on the black terminal screen and press Enter. Wait until you see it asking for you to make a selection. + * To finish the bastion's installation, you will need to configure the VM by doing the following: + * Press 2 to enter Text Mode (hit the Enter key after every step) + * From the main menu, press 3 to configure the Installation Source + * Press 3 to use Network + * Type in the URL that points to your RHEL ISO + * From the main menu, press 4 to go to Software Selection + * Press 2 for Server, Enter, then "c" to continue + * From this list, press the number and then hit Enter for each: + * 1 - Hardware Monitoring utilities + * 8 - Networking File System Client2 + * 9 - Network Servers + * 11 - Remote Management for Linux + * 13 - Basic Web Server + * 17 - Headless Management + * 21 - System Tools + * Press "c" to continue, and 'c' again to get back to the main menu + * From the main menu, press 5 to set the installation destination + * If there is a disk already checked, press "c" to use the continue. If not, select the disk you would like to use. + * If it is not already selected, press 2 to use all free space, otherwise, press "c" to continue. + * Select "LVM" from the list and press "c" to continue + * From the main menu, press 7 to set Network Configurations + * Press 2 to configure device enc1 + * Press 1 and enter the bastion's IP address + * Press 2 and enter the netmask + * Press 3 and enter the default gateway + * Press 4 and type "ignore" + * Press 6 and enter DNS nameservers + * Press 'c' to continue + * From the main menu, press 9 to set the Root Password + * From the main menu, press 10 to create a user + * Press 1 to create user + * Press 2 to set full name + * Press 3 to set a username + * Press 5 to set a password + * Press 6 to give the user root access (optional) + * From the main menu, double check that all check boxes have an X * Once you fill out all the required configuration settings, press "b" to begin installation. - * Once you see "bastion login", come back to the terminal to continue your run by pressing "ctrl+c" and then - "c". If there was a problem and you need to stop the playbook, press "ctrl+c" and then "a". + * Wait for the installation to complete, this may take some time. Monitor its progress, it may need you to press 'Enter' to continue. Once the installation completes, you will have to press the 'Run' button on the cockpit for it to start up and finish configuration. + * Once you see "bastion login", come back to the terminal to continue your run by pressing "ctrl+c" and then "c". If there was a problem and you need to stop the playbook, press "ctrl+c" and then "a". If configuration and installation took longer than the pause and the playbook continued and then failed, continue the playbook by running the following command: "ansible-playbook main.yaml --ask-become-pass --tags 'bastion,create_nodes'" * **Step 7: Starting Up Bootstrap and Control Nodes** * The playbook will continue to run, preparing the bootstrap and control nodes. * To monitor the nodes as they come up, watch them on the cockpit at: "https://your-KVM-host-IP-address-here:9090" @@ -171,4 +198,5 @@ * prep = run all setup playbooks * selinux = for tasks related to SELinux settings * setup = first-time setup of ansible +* ssh-agent = setting up ansible ssh-agent * ssh-copy-id = for copying ssh id \ No newline at end of file From e8f3ea1ce6d0dd7a0c594bbb848e32fb3b3989e3 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:15:50 -0500 Subject: [PATCH 453/885] Updated README as I went along with most recent installation to reflect updates and to be even more detailed. --- README.md | 58 +++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 43 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index 4728d552..eca25c2d 100644 --- a/README.md +++ b/README.md @@ -66,25 +66,52 @@ * If you are not already there, navigate to the folder where you saved the Git repository in your terminal * Execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Watch Ansible as it completes the installation, correcting errors if they arise. - * If the process fails in error, you should be able to run the same shell command to start the process from the top. - * Alternatively, use tags to run only the tasks that have that tag. See main.yaml to determine what you would like - to run. There is also a list of all the tags at the bottom of this page for reference. + * To look at what is running in detail, from the main directory open roles/'task-you-want-to-inspect'/tasks/main.yaml + * If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of the main playbook run, use tags. See main.yaml to determine what you part you would like to run and use those tags when running the main playbook. There is also a list of all the tags at the bottom of this page for reference. Example: "ansible-playbook main.yaml --ask-become-pass -- tags 'bastion,get-ocp' + * Note: we chose to not edit the user's .bash_profile/.bashrc with an automatic ssh-add command because that would change the user's local workstation set-up in a way that was undesirable. Therefore, if you close out your terminal session in the middle of provisioning, you will need to run "ansible-playbook main.yaml --tags ssh-agent" before doing anything else. * **Step 6: Bastion Configuration** * Once the create_bastion task runs, it will pause the playbook to give you time to configure it. * Use a web browser to open the cockpit by going to: "https://your-KVM-host-IP-address-here:9090" - * Click on the "Virtual Machines" tab, then click on bastion from the list, click on the black terminal - screen and press Enter. Complete its installation with these options enabled: - * server - * hardware monitoring utilities - * networking file system client - * remote management for linux - * headless mgmt - * system tools - * basic web server - * network servers + * Click on the "Virtual Machines" tab, then click on bastion from the list, click on the black terminal screen and press Enter. Wait until you see it asking for you to make a selection. + * To finish the bastion's installation, you will need to configure the VM by doing the following: + * Press 2 to enter Text Mode (hit the Enter key after every step) + * From the main menu, press 3 to configure the Installation Source + * Press 3 to use Network + * Type in the URL that points to your RHEL ISO + * From the main menu, press 4 to go to Software Selection + * Press 2 for Server, Enter, then "c" to continue + * From this list, press the number and then hit Enter for each: + * 1 - Hardware Monitoring utilities + * 8 - Networking File System Client2 + * 9 - Network Servers + * 11 - Remote Management for Linux + * 13 - Basic Web Server + * 17 - Headless Management + * 21 - System Tools + * Press "c" to continue, and 'c' again to get back to the main menu + * From the main menu, press 5 to set the installation destination + * If there is a disk already checked, press "c" to use the continue. If not, select the disk you would like to use. + * If it is not already selected, press 2 to use all free space, otherwise, press "c" to continue. + * Select "LVM" from the list and press "c" to continue + * From the main menu, press 7 to set Network Configurations + * Press 2 to configure device enc1 + * Press 1 and enter the bastion's IP address + * Press 2 and enter the netmask + * Press 3 and enter the default gateway + * Press 4 and type "ignore" + * Press 6 and enter DNS nameservers + * Press 'c' to continue + * From the main menu, press 9 to set the Root Password + * From the main menu, press 10 to create a user + * Press 1 to create user + * Press 2 to set full name + * Press 3 to set a username + * Press 5 to set a password + * Press 6 to give the user root access (optional) + * From the main menu, double check that all check boxes have an X * Once you fill out all the required configuration settings, press "b" to begin installation. - * Once you see "bastion login", come back to the terminal to continue your run by pressing "ctrl+c" and then - "c". If there was a problem and you need to stop the playbook, press "ctrl+c" and then "a". + * Wait for the installation to complete, this may take some time. Monitor its progress, it may need you to press 'Enter' to continue. Once the installation completes, you will have to press the 'Run' button on the cockpit for it to start up and finish configuration. + * Once you see "bastion login", come back to the terminal to continue your run by pressing "ctrl+c" and then "c". If there was a problem and you need to stop the playbook, press "ctrl+c" and then "a". If configuration and installation took longer than the pause and the playbook continued and then failed, continue the playbook by running the following command: "ansible-playbook main.yaml --ask-become-pass --tags 'bastion,create_nodes'" * **Step 7: Starting Up Bootstrap and Control Nodes** * The playbook will continue to run, preparing the bootstrap and control nodes. * To monitor the nodes as they come up, watch them on the cockpit at: "https://your-KVM-host-IP-address-here:9090" @@ -171,4 +198,5 @@ * prep = run all setup playbooks * selinux = for tasks related to SELinux settings * setup = first-time setup of ansible +* ssh-agent = setting up ansible ssh-agent * ssh-copy-id = for copying ssh id \ No newline at end of file From a412333354854e8ca7fe231e010e5f4a47b2743e Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:16:32 -0500 Subject: [PATCH 454/885] added ssh_agent role to main.yaml --- main.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/main.yaml b/main.yaml index dfa896db..f4f8e163 100644 --- a/main.yaml +++ b/main.yaml @@ -1,7 +1,7 @@ --- - hosts: localhost - tags: localhost, prep + tags: setup connection: local become: false gather_facts: no @@ -14,6 +14,7 @@ - encrypt_files - ssh_key_gen - ssh_copy_id + - ssh_agent - hosts: kvm_host tags: kvm_host,kvm_prep @@ -38,6 +39,8 @@ gather_facts: no vars: - ssh_target_ip: "{{ env_ip_bastion }}" + vars_files: + - env.yaml roles: - ssh_copy_id # to connect to bastion From cccb810626d91b56ad4447ec2b54bb4ad0a22496 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:16:32 -0500 Subject: [PATCH 455/885] added ssh_agent role to main.yaml --- main.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/main.yaml b/main.yaml index dfa896db..f4f8e163 100644 --- a/main.yaml +++ b/main.yaml @@ -1,7 +1,7 @@ --- - hosts: localhost - tags: localhost, prep + tags: setup connection: local become: false gather_facts: no @@ -14,6 +14,7 @@ - encrypt_files - ssh_key_gen - ssh_copy_id + - ssh_agent - hosts: kvm_host tags: kvm_host,kvm_prep @@ -38,6 +39,8 @@ gather_facts: no vars: - ssh_target_ip: "{{ env_ip_bastion }}" + vars_files: + - env.yaml roles: - ssh_copy_id # to connect to bastion From 733d304f45f4862a4b4eb0bab2e31eb470b27381 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:19:51 -0500 Subject: [PATCH 456/885] Removed roles/get-ocp/files/ocp_ssh_pub to avoid pushing it to Github --- roles/get-ocp/files/ocp_ssh_pub | 1 - 1 file changed, 1 deletion(-) delete mode 100644 roles/get-ocp/files/ocp_ssh_pub diff --git a/roles/get-ocp/files/ocp_ssh_pub b/roles/get-ocp/files/ocp_ssh_pub deleted file mode 100644 index 6f580fed..00000000 --- a/roles/get-ocp/files/ocp_ssh_pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC9ZoxqTm4Jwopmrj61dyvolXcMVo0ebGknuKntIN+oUPuuyT0PA/2SMagE2HkaEACoO+6WRfy2uohvlsIkEsqNyfmwx7AXcEnf/jtvRdI6421rgd4hTyihlt4S9MEHVIOvtQpNfB4bA+7e+QGcWQBa66JOB9BTYYt80et0bXBwtpSQbimSRXohaisvqfUYvhXaMM8Gx+QnoN22atCw8hgA4pSztXzZBM6zSraUl7YgbFziAFdseFXlyZ3CkdvB+Ma7t5A1SszoHqFCpvIt6dCQqqg6CdQne/k789vFx+Pj5aQ+FZqfO6KNfEhEzZ7qLmyTC1M2VeXMEyWSJRpRTXUJQ9ag3z0GWzGZDUcSaz7vrJOEspuTMn4YuppwOHEVbyzesKdQdergf4g3bj6aIRnYHrYkh36CdKC+DB3+G9GBr94wDZfpClM00dGeRHsFg0GYl9btAGEvOckDdfz9kP4ND2S9fiSfVJh3WIqyU2QkRuJXZJHOu2iIpK4X2+dhv+1giavNIxBy54vJXob0hHM/Tw4YzYfQsLZjvuEkM6Z7OcSNsfpNYo4Q2izJzXI89W4obcU6obZzzDJDZR49Jtc6jUS9fkvNTpuANwLupCrVjvS46i1Y1cbJejosHlQ+zDg8U7zsy30nMAcYqrHytD7tx6fwyVO9fem2q0Ta5P2CCQ== ocpz_distribution From 669d5cb69fe5b53e5d34a8a055b1a0247943ae43 Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 7 Sep 2021 17:19:51 -0500 Subject: [PATCH 457/885] Removed roles/get-ocp/files/ocp_ssh_pub to avoid pushing it to Github --- roles/get-ocp/files/ocp_ssh_pub | 1 - 1 file changed, 1 deletion(-) delete mode 100644 roles/get-ocp/files/ocp_ssh_pub diff --git a/roles/get-ocp/files/ocp_ssh_pub b/roles/get-ocp/files/ocp_ssh_pub deleted file mode 100644 index 6f580fed..00000000 --- a/roles/get-ocp/files/ocp_ssh_pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC9ZoxqTm4Jwopmrj61dyvolXcMVo0ebGknuKntIN+oUPuuyT0PA/2SMagE2HkaEACoO+6WRfy2uohvlsIkEsqNyfmwx7AXcEnf/jtvRdI6421rgd4hTyihlt4S9MEHVIOvtQpNfB4bA+7e+QGcWQBa66JOB9BTYYt80et0bXBwtpSQbimSRXohaisvqfUYvhXaMM8Gx+QnoN22atCw8hgA4pSztXzZBM6zSraUl7YgbFziAFdseFXlyZ3CkdvB+Ma7t5A1SszoHqFCpvIt6dCQqqg6CdQne/k789vFx+Pj5aQ+FZqfO6KNfEhEzZ7qLmyTC1M2VeXMEyWSJRpRTXUJQ9ag3z0GWzGZDUcSaz7vrJOEspuTMn4YuppwOHEVbyzesKdQdergf4g3bj6aIRnYHrYkh36CdKC+DB3+G9GBr94wDZfpClM00dGeRHsFg0GYl9btAGEvOckDdfz9kP4ND2S9fiSfVJh3WIqyU2QkRuJXZJHOu2iIpK4X2+dhv+1giavNIxBy54vJXob0hHM/Tw4YzYfQsLZjvuEkM6Z7OcSNsfpNYo4Q2izJzXI89W4obcU6obZzzDJDZR49Jtc6jUS9fkvNTpuANwLupCrVjvS46i1Y1cbJejosHlQ+zDg8U7zsy30nMAcYqrHytD7tx6fwyVO9fem2q0Ta5P2CCQ== ocpz_distribution From 2ca9d52f317de41c969f0b181aaae53e170a49fe Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 8 Sep 2021 09:48:37 -0500 Subject: [PATCH 458/885] Deleted variable values in env.yaml --- env.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/env.yaml b/env.yaml index 1f262588..b841b5d1 100644 --- a/env.yaml +++ b/env.yaml @@ -6,10 +6,10 @@ env_compute_arch: s390x env_control_count: 3 env_control_arch: s390x env_metadata_name: -env_cidr: 10.128.0.0/14 +env_cidr: env_host_prefix: 23 env_network_type: OpenShiftSDN -env_service_network: 172.30.0.0/16 +env_service_network: env_fips: "false" # "true" or "false" (include quotes) env_pullSecret: '' #paste it into these single quotes here From d5b9d5218737745039a1af8517661bbb51527289 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 8 Sep 2021 09:50:02 -0500 Subject: [PATCH 459/885] Deleted variable values in env.yaml --- env.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.yaml b/env.yaml index b841b5d1..a102f2d7 100644 --- a/env.yaml +++ b/env.yaml @@ -7,7 +7,7 @@ env_control_count: 3 env_control_arch: s390x env_metadata_name: env_cidr: -env_host_prefix: 23 +env_host_prefix: env_network_type: OpenShiftSDN env_service_network: env_fips: "false" # "true" or "false" (include quotes) From a83d9f72ee05da50884008f60cd3bc92c8e737ba Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 9 Sep 2021 09:58:56 -0500 Subject: [PATCH 460/885] Added YAML to .gitattributes so that YAML files are detected. --- .gitattributes | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitattributes b/.gitattributes index dfe07704..165cd6be 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,4 @@ # Auto detect text files and perform LF normalization * text=auto +*.yaml linguist-detectable=true +*.yaml linguist-language=YAML From 8e749aafb2929641b8756d2bfd44d453f68d8ac1 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 9 Sep 2021 09:58:56 -0500 Subject: [PATCH 461/885] Added YAML to .gitattributes so that YAML files are detected. --- .gitattributes | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitattributes b/.gitattributes index dfe07704..165cd6be 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,2 +1,4 @@ # Auto detect text files and perform LF normalization * text=auto +*.yaml linguist-detectable=true +*.yaml linguist-language=YAML From 951a9d28f59a005797398169292193332d0badcb Mon Sep 17 00:00:00 2001 From: jacobemery Date: Tue, 14 Sep 2021 15:26:12 -0500 Subject: [PATCH 462/885] Cleared env.yaml cache --- .gitignore | 3 +-- env.yaml | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 2 deletions(-) create mode 100644 env.yaml diff --git a/.gitignore b/.gitignore index 663b903f..a96314c1 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,5 @@ .vscode roles/get-ocp/files/ocp_ssh_pub .vault_pass.txt -env.yaml inventory -ansible.cfg +ansible.cfg \ No newline at end of file diff --git a/env.yaml b/env.yaml new file mode 100644 index 00000000..c85c8109 --- /dev/null +++ b/env.yaml @@ -0,0 +1,41 @@ + +# to populate install_config +env_baseDomain: +env_compute_arch: s390x +env_control_count: 3 +env_control_arch: s390x +env_metadata_name: +env_cidr: +env_host_prefix: 23 +env_network_type: OpenShiftSDN +env_service_network: +env_fips: "false" # "true" or "false" (include quotes) +env_pullSecret: '' #paste it into these single quotes + +# to fill inventory +env_ip_kvm_host: +env_ip_bastion: +env_ip_bootstrap: +env_ip_control_0: +env_ip_control_1: +env_ip_control_2: +env_ip_compute_0: +env_ip_compute_1: + +# SSH +#Username to use for SSH into KVM and host bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. +env_ssh_username: +#Password to use for SSH into KVM host and bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. +env_ssh_pass: +# Ansible passwordless SSH +env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible +env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) + +# OpenShift cluster's SSH key comment +env_ssh_ocp_comm: "" + +# networking +env_dns_nameserver: +env_default_gateway: +env_netmask: +env_ftp: \ No newline at end of file From a6b9be1bd50975210625e48433d32f455a51e2cd Mon Sep 17 00:00:00 2001 From: pswilso2017 Date: Wed, 15 Sep 2021 15:54:37 -0500 Subject: [PATCH 463/885] Edited Readme --- README.md | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/README.md b/README.md index eca25c2d..ebed310a 100644 --- a/README.md +++ b/README.md @@ -20,8 +20,8 @@ * homebrew package manager installed (how-to: https://brew.sh/) * Updated software for command line tools (run "softwareupdate --all --install" in your terminal) * Access to a logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: - * 6 Integrated Facilities for Linux (IFLs) - * 75 GB of RAM + * 6 Dedicated Integrated Facilities for Linux (IFLs) + * 85 GB of RAM * 1 TB of disk space * On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed with the following options enabled: @@ -46,15 +46,15 @@ * In a web browser, navigate to https://console.redhat.com/openshift/install/ibmz/user-provisioned * Download your local command line tools (oc and kubectl) * Copy the OpenShift pull secret (for use in the next step) -* **Step 2: Set Variables** +* **Step 3: Set Variables** * In a text editor of your choice, open env.yaml, found in the main directory of this repository * Fill out all of the required variables for your specific installation -* **Step 3: DNS Configuration** +* **Step 4: DNS Configuration** * Get DNS configuration files (forward (.db), reverse (.rev), and named.conf), or have them pre-defined by your networking team. * Place them in the roles/dns/files folder * Please leave the named.conf the same name. * Rename the .db and .rev files with the same name you set for "env_metadata_name" in env.yaml (i.e. distribution.rev) -* **Step 4: Setup Script** +* **Step 5: Setup Script** * Navigate to the folder where you saved the Git Repository * Run "ansible-playbook setup.yaml --ask-become-pass" * When the setup playbook starts, it will prompt you for a password to use for encrypting Ansible vault files @@ -62,14 +62,14 @@ * If you would like to decrypt a file protected by Ansible vault, run: "ansible-vault decrypt file-name-here" ### Provisioning -* **Step 5: Running the Main Playbook** +* **Step 6: Running the Main Playbook** * If you are not already there, navigate to the folder where you saved the Git repository in your terminal * Execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Watch Ansible as it completes the installation, correcting errors if they arise. * To look at what is running in detail, from the main directory open roles/'task-you-want-to-inspect'/tasks/main.yaml * If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of the main playbook run, use tags. See main.yaml to determine what you part you would like to run and use those tags when running the main playbook. There is also a list of all the tags at the bottom of this page for reference. Example: "ansible-playbook main.yaml --ask-become-pass -- tags 'bastion,get-ocp' * Note: we chose to not edit the user's .bash_profile/.bashrc with an automatic ssh-add command because that would change the user's local workstation set-up in a way that was undesirable. Therefore, if you close out your terminal session in the middle of provisioning, you will need to run "ansible-playbook main.yaml --tags ssh-agent" before doing anything else. -* **Step 6: Bastion Configuration** +* **Step 7: Bastion Configuration** * Once the create_bastion task runs, it will pause the playbook to give you time to configure it. * Use a web browser to open the cockpit by going to: "https://your-KVM-host-IP-address-here:9090" * Click on the "Virtual Machines" tab, then click on bastion from the list, click on the black terminal screen and press Enter. Wait until you see it asking for you to make a selection. @@ -112,7 +112,7 @@ * Once you fill out all the required configuration settings, press "b" to begin installation. * Wait for the installation to complete, this may take some time. Monitor its progress, it may need you to press 'Enter' to continue. Once the installation completes, you will have to press the 'Run' button on the cockpit for it to start up and finish configuration. * Once you see "bastion login", come back to the terminal to continue your run by pressing "ctrl+c" and then "c". If there was a problem and you need to stop the playbook, press "ctrl+c" and then "a". If configuration and installation took longer than the pause and the playbook continued and then failed, continue the playbook by running the following command: "ansible-playbook main.yaml --ask-become-pass --tags 'bastion,create_nodes'" -* **Step 7: Starting Up Bootstrap and Control Nodes** +* **Step 8: Starting Up Bootstrap and Control Nodes** * The playbook will continue to run, preparing the bootstrap and control nodes. * To monitor the nodes as they come up, watch them on the cockpit at: "https://your-KVM-host-IP-address-here:9090" * Click on the "Virtual Machines" tab and then click on the VM you want to monitor. Click on the black @@ -120,7 +120,7 @@ * Once you see "node-name login" prompt come back to the terminal where you ran Ansible and press "ctrl+c" and then "c" to continue running the playbook. * If you encounter an error that does not resolve with time, press "ctrl+c" and then "a" to stop the process and debug. -* **Step 8: Bootkube Verification** +* **Step 9: Bootkube Verification** * SSH into the bastion (run "ssh your-bastion-IP-address-here" in the terminal) * From there, change to root user (run "su root") and type in the root password that you set during configuration * Then SSH into the bootstrap as core ("ssh core@your-bootstrap-IP-address-here") @@ -130,36 +130,36 @@ * This may take some time, 30 minutes or more. Check in occassionally by running "journalctl -u bootkube.service" again to update the log. Remember to hold the spacebar to go to the bottom, press "q" to quit. * Once all control nodes are connected, the bootkube log will read "bootkube.service complete". -* **Step 9: Starting Up Compute Nodes** +* **Step 10: Starting Up Compute Nodes** * Repeat Step 7 with the Compute nodes. * Monitor their status at the cockpit, found at "https://your-KVM-host-IP-address:9090" * They are ready once their terminal screen shows a login prompt * Once all your compute nodes are up and running, and bootkube is complete, you are ready for cluster verification ### Verification -* **Step 10: Export Kube Config** +* **Step 11: Export Kube Config** * SSH into the bastion (run "ssh your-bastion-IP-address-here") * Change to root user (run "su root") and type in your password from when you configured the bastion. * Then run "export KUBECONFIG=/ocpinst/auth/kubeconfig" * Check that worked by running "oc whoami", which should return "system:admin" -* **Step 11: Approve Certificates** +* **Step 12: Approve Certificates** * From the bastion, running as root user (as above) run "oc get csr". This will bring up a list of certificates that need approval. * To approve all certificates at the same time, run the following command: "for i in \`oc get csr --no-headers | grep -i pending | awk '{ print $1 }\'`; do oc adm certificate approve $i; done" * It may take some time for all the certificates that need approval to show up. Keep running "oc get csr" to check to make sure that no new certificates have appeared since you last approved them. * Once all certificates read "Approved, Issued". You're ready for the next step. -* **Step 11: Wait for Cluster To Become Operational** +* **Step 13: Wait for Cluster To Become Operational** * From the bastion, as root user (as above) check node status by running: "oc get nodes". All nodes need to be "Ready" in the "Status" column. * From the bastion, as root user (as above) run "oc get clusteroperators". All cluster operators need to be "True" in the "Available" column. * It may take hours, especially the cluster operators. Run the above two bullets' commmands to check in occasionally. * Once all nodes are ready and cluster operators are available, you are ready to continue to the next step. -* **Step 12: Verify OpenShift Installation** +* **Step 14: Verify OpenShift Installation** * From the bastion as root user (as above), navigate to /ocpinst ("cd /ocpinst") * Run "./openshift-install --dir=/ocpinst wait-for install-complete" * If installation is ready, running the above command will give you some information about how to log into the OpenShift cluster's dashboard. * Copy the provided URL into a web browser and use "kubeadmin" as login and the provided password for first time sign-on. -* **Step 14: Celebrate!** +* **Step 15: Celebrate!** * Your OpenShift cluster provisioning and installation is now complete. * Optional: Leave the bootstrap running as is, shut it down and destroy it, or convert it into a compute node. From 9ae65aefd3ae0ae352b0420cb83c364ff8ce1635 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 29 Oct 2021 15:36:32 -0500 Subject: [PATCH 464/885] Fixed types in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ebed310a..8f17b2a7 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,7 @@ * Press 2 for Server, Enter, then "c" to continue * From this list, press the number and then hit Enter for each: * 1 - Hardware Monitoring utilities - * 8 - Networking File System Client2 + * 8 - Networking File System Client * 9 - Network Servers * 11 - Remote Management for Linux * 13 - Basic Web Server From aba9e03dda5fbe862e85276eddb51deada9bf991 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 29 Oct 2021 15:37:38 -0500 Subject: [PATCH 465/885] Added variables for use in bastion configuration automation --- env.yaml | 53 +++++++++++++++++++++-------- roles/set_inventory/tasks/main.yaml | 41 ++++++++++++++++++---- 2 files changed, 72 insertions(+), 22 deletions(-) diff --git a/env.yaml b/env.yaml index c85c8109..05dd72d3 100644 --- a/env.yaml +++ b/env.yaml @@ -1,18 +1,30 @@ +# Red Hat account with license to regsiter VMs +env_rh_email: +env_rh_pass: -# to populate install_config -env_baseDomain: +# to populate OpenShift install config file. +env_apiVersion: v1 +env_baseDomain: +env_hyperthreading_compute: Enabled +env_compute_name: compute +env_compute_count: 2 env_compute_arch: s390x +env_hyperthreading_control: Enabled +env_control_name: control env_control_count: 3 env_control_arch: s390x env_metadata_name: -env_cidr: +env_cidr: 10.128.0.0/14 env_host_prefix: 23 env_network_type: OpenShiftSDN -env_service_network: +env_service_network: 172.30.0.0/16 env_fips: "false" # "true" or "false" (include quotes) env_pullSecret: '' #paste it into these single quotes +# OpenShift SSH key is generated via Ansible in ssh_ocp_key_gen role -# to fill inventory +# IP addresses for the nodes that Ansible will be run against. This will automatically fill out the inventory file when setup.yaml is run. +# If you would like to add more control/compute nodes, add a line below following the same naming conventions. +# Double check that the env_control/compute_count variables above match the total number of corresponding nodes below (which are 0 indexed). env_ip_kvm_host: env_ip_bastion: env_ip_bootstrap: @@ -22,20 +34,31 @@ env_ip_control_2: env_ip_compute_0: env_ip_compute_1: -# SSH -#Username to use for SSH into KVM and host bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. -env_ssh_username: -#Password to use for SSH into KVM host and bastion for first-time set-up of ansible passwordless SSH. Assumes same for both kvm and bastion. -env_ssh_pass: -# Ansible passwordless SSH -env_ssh_ans_name: ansible #Ansible ssh key pair filename. default=ansible -env_ssh_ans_pass: "" #Preferred Ansible ssh password. Recommended no password. default=""(no password) +# Ansible passwordless SSH setup. Pre-filled with recommended values. +#Ansible ssh key pair filename +env_ssh_ans_name: ansible +#Ansible SSH password. Strongly recommended to leave as is (no password). +env_ssh_ans_pass: "" # OpenShift cluster's SSH key comment env_ssh_ocp_comm: "" -# networking +# Networking env_dns_nameserver: env_default_gateway: env_netmask: -env_ftp: \ No newline at end of file +env_ftp: + +# Compute node configuration, will also be used for bootstrap. Pre-filled values are minimum requirements. +env_comp_disk_size: 120 +env_comp_ram: 1600 +env_comp_cpu: host +env_comp_vcpu: 4 +env_comp_os_variant: rhel8.0 + +# Control node configuration. Pre-filled values are minimum requirements. +env_cont_disk_size: 120 +env_cont_ram: 1600 +env_cont_cpu: host +env_cont_vcpu: 4 +env_cont_os_variant: rhel8.0 diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index ba03b158..7d5338ab 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -10,8 +10,16 @@ msg: Required variable "{{item}}" has not been provided in env.yaml file. when: vars[item] is undefined loop: + - env_rh_email + - env_rh_pass + - env_apiVersion - env_baseDomain + - env_hyperthreading_compute + - env_compute_name + - env_compute_count - env_compute_arch + - env_hyperthreading_control + - env_control_name - env_control_count - env_control_arch - env_metadata_name @@ -29,8 +37,6 @@ - env_ip_control_2 - env_ip_compute_0 - env_ip_compute_1 - - env_ssh_username - - env_ssh_pass - env_ssh_ans_name - env_ssh_ans_pass - env_ssh_ocp_comm @@ -38,6 +44,16 @@ - env_default_gateway - env_netmask - env_ftp + - env_comp_disk_size + - env_comp_ram + - env_comp_cpu + - env_comp_vcpu + - env_comp_os_variant + - env_cont_disk_size + - env_cont_ram + - env_cont_cpu + - env_cont_vcpu + - env_cont_os_variant - name: Populate inventory file with ip variables from env.yaml tags: setup @@ -54,15 +70,26 @@ {{ env_ip_bootstrap }} [control_nodes] - {{ env_ip_control_0 }} - {{ env_ip_control_1 }} - {{ env_ip_control_2 }} [compute_nodes] - {{ env_ip_compute_0 }} - {{ env_ip_compute_1 }} state: present + +- name: Populate inventory file with control IPs from env.yaml, the number of which depends on env_control_count + tags: setup + lineinfile: + path: inventory + line: "{{env_ip_control_{{ item }}}}" + insertafter: '\[control_nodes]' + with_sequence: count={{env_control_count}} +- name: Populate inventory file with compute IPs from env.yaml, the number of which depends on env_compute_count + tags: setup + lineinfile: + path: inventory + line: "{{ env_ip_compute_{{ item }} }}" + insertafter: '\[compute_nodes]' + with_sequence: count={{env_compute_count}} + - name: check inventory setup tags: setup command: ansible-inventory --list From 5e8c58cdf0804873f7b5e06962b23be741b583b8 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 29 Oct 2021 15:38:39 -0500 Subject: [PATCH 466/885] Deleted test-specific DNS config files --- roles/dns/files/distribution.db | 48 -------------------- roles/dns/files/distribution.rev | 24 ---------- roles/dns/files/named.conf | 78 -------------------------------- 3 files changed, 150 deletions(-) delete mode 100644 roles/dns/files/distribution.db delete mode 100644 roles/dns/files/distribution.rev delete mode 100644 roles/dns/files/named.conf diff --git a/roles/dns/files/distribution.db b/roles/dns/files/distribution.db deleted file mode 100644 index 35be68ba..00000000 --- a/roles/dns/files/distribution.db +++ /dev/null @@ -1,48 +0,0 @@ -$TTL 86400 -@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com.( - 2020021821 ;Serial - 3600 ;Refresh - 1800 ;Retry - 604800 ;Expire - 86400 ;Minimum TTL -) - -;Name Server Information -@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. - -;IP Address for Name Server -bastion IN A 9.60.87.139 - -;entry for bootstrap host. -;bootstrap IN A 9.60.87.133 -bootstrap.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.133 - -;entries for the master nodes -control-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.138 -;control-0 IN A 9.60.87.138 -control-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.137 -;control-1 IN A 9.60.87.137 -control-2.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.136 -;control-2 IN A 9.60.87.136 - -;entry for the bastion host -bastion IN A 9.60.87.139 - -;entries for the worker nodes -compute-0.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.135 -;compute-0 IN A 9.60.87.135 -compute-1.distribution.ocpz.wsclab.endicott.ibm.com IN A 9.60.87.134 -;compute-1 IN A 9.60.87.134 - -;entry of your load balancer -haproxy IN A 9.60.87.139 - -;The api identifies the IP of your load balancer. -api IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. -api-int IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. - -;The wildcard also identifies the load balancer. -apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. -*.apps IN CNAME haproxy.distribution.ocpz.wsclab.endicott.ibm.com. - -;EOF diff --git a/roles/dns/files/distribution.rev b/roles/dns/files/distribution.rev deleted file mode 100644 index 51ff3eb9..00000000 --- a/roles/dns/files/distribution.rev +++ /dev/null @@ -1,24 +0,0 @@ -$TTL 86400 -@ IN SOA bastion.distribution.ocpz.wsclab.endicott.ibm.com. admin.distribution.ocpz.wsclab.endicott.ibm.com ( - 2020011800 ;Serial - 3600 ;Refresh - 1800 ;Retry - 604800 ;Expire - 86400 ;Minimum TTL -) -;Name Server Information -@ IN NS bastion.distribution.ocpz.wsclab.endicott.ibm.com. -bastion IN A 9.60.87.139 - -;Reverse lookup for Name Server -139 IN PTR bastion.distribution.ocpz.wsclab.endicott.ibm.com. - -;PTR Record IP address to Hostname -138 IN PTR control-0.distribution.ocpz.wsclab.endicott.ibm.com. -137 IN PTR control-1.distribution.ocpz.wsclab.endicott.ibm.com. -136 IN PTR control-2.distribution.ocpz.wsclab.endicott.ibm.com. -135 IN PTR compute-0.distribution.ocpz.wsclab.endicott.ibm.com. -134 IN PTR compute-1.distribution.ocpz.wsclab.endicott.ibm.com. -133 IN PTR bootstrap.distribution.ocpz.wsclab.endicott.ibm.com. -139 IN PTR api-int.distribution.ocpz.wsclab.endicott.ibm.com. -139 IN PTR api.distribution.ocpz.wsclab.endicott.ibm.com. diff --git a/roles/dns/files/named.conf b/roles/dns/files/named.conf deleted file mode 100644 index b07a27be..00000000 --- a/roles/dns/files/named.conf +++ /dev/null @@ -1,78 +0,0 @@ -// -// named.conf -// -// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS -// server as a caching only nameserver (as a localhost DNS resolver only). -// -// See /usr/share/doc/bind*/sample/ for example named configuration files. -// - -options { -// listen-on port 53 { 127.0.0.1; }; - listen-on port 53 { any; }; - listen-on-v6 port 53 { ::1; }; - directory "/var/named"; - dump-file "/var/named/data/cache_dump.db"; - statistics-file "/var/named/data/named_stats.txt"; - memstatistics-file "/var/named/data/named_mem_stats.txt"; - secroots-file "/var/named/data/named.secroots"; - recursing-file "/var/named/data/named.recursing"; - allow-query { any; }; - - /* - - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. - - If you are building a RECURSIVE (caching) DNS server, you need to enable - recursion. - - If your recursive DNS server has a public IP address, you MUST enable access - control to limit queries to your legitimate users. Failing to do so will - cause your server to become part of large scale DNS amplification - attacks. Implementing BCP38 within your network would greatly - reduce such attack surface - */ - recursion yes; - - dnssec-enable no; - dnssec-validation no; - - managed-keys-directory "/var/named/dynamic"; - - pid-file "/run/named/named.pid"; - session-keyfile "/run/named/session.key"; - - /* https://fedoraproject.org/wiki/Changes/CryptoPolicy */ - include "/etc/crypto-policies/back-ends/bind.config"; -}; - -logging { - channel default_debug { - file "data/named.run"; - severity dynamic; - }; -}; - -zone "." IN { - type forward; - forwarders { 9.60.70.82; }; -// type hint; -// file "named.ca"; -}; - -include "/etc/named.rfc1912.zones"; -include "/etc/named.root.key"; - -//forward zone -zone "distribution.ocpz.wsclab.endicott.ibm.com" IN { - type master; - file "distribution.db"; - allow-update { any; }; - allow-query { any; }; -}; - -//backward zone -zone "87.60.9.in-addr.arpa" IN { - type master; - file "distribution.rev"; - allow-update { any; }; - allow-query { any; }; -}; - From d2e0ce16b9e76d62df51293e8a80c92c45172002 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 2 Nov 2021 16:40:09 -0500 Subject: [PATCH 467/885] Added links to README for easier navigation and reference. Signed-off-by: Jacob Emery --- README.md | 52 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 8f17b2a7..e665ab38 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,30 @@ # Ansible-OpenShift-Provisioning -## Scope +## Table of Contents +[Scope](#Scope) +[Supported Operating Systems](#Supported-Operating-Systems) +[Pre-Requisites](#Pre-Requisites) +[Instructions](#Installation-Instructions) +[Setup](#Setup) +[Provisioning](#Provisioning) +[Verification] (#Verification) +[Teardown](#Teardown) +[Tags](#Tags) -* The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an - IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. +## Scope +* The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. * This README file gives extremely detailed step-by-step instructions for you to use as a reference. It assumes near zero experience with Ansible. -## Supported Operating Systems (for local workstation): - +## Supported Operating Systems +for local workstation running Ansible * Linux (RedHat and Debian) * MacOS X ## Pre-requisites: - -* Red Hat Enterprise Linux (RHEL) license +* Red Hat Enterprise Linux (RHEL) license or free trial +* Red Hat OpenShift Container Platform license or free trial * Python3 intalled on your local computer (how-to: https://realpython.com/installing-python/) -* Ansible installed on your local computer (how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) +* Ansible installed on your local computer (how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) * If you are using Mac OS X for your localhost workstation to run Ansible, you also need to have: * homebrew package manager installed (how-to: https://brew.sh/) * Updated software for command line tools (run "softwareupdate --all --install" in your terminal) @@ -47,18 +56,18 @@ * Download your local command line tools (oc and kubectl) * Copy the OpenShift pull secret (for use in the next step) * **Step 3: Set Variables** - * In a text editor of your choice, open env.yaml, found in the main directory of this repository + * In a text editor of your choice, open [env.yaml](env.yaml) * Fill out all of the required variables for your specific installation * **Step 4: DNS Configuration** * Get DNS configuration files (forward (.db), reverse (.rev), and named.conf), or have them pre-defined by your networking team. - * Place them in the roles/dns/files folder + * Place them in the [roles/dns/files folder](roles/dns/files) * Please leave the named.conf the same name. - * Rename the .db and .rev files with the same name you set for "env_metadata_name" in env.yaml (i.e. distribution.rev) + * Rename the .db and .rev files with the same name you set for "env_metadata_name" in [env.yaml](env.yaml) (i.e. distribution.rev) * **Step 5: Setup Script** * Navigate to the folder where you saved the Git Repository * Run "ansible-playbook setup.yaml --ask-become-pass" * When the setup playbook starts, it will prompt you for a password to use for encrypting Ansible vault files - * No files are encrypted until you run the main playbook in step 5 below + * No files are encrypted until you run the [main playbook](main.yaml) in step 5 below * If you would like to decrypt a file protected by Ansible vault, run: "ansible-vault decrypt file-name-here" ### Provisioning @@ -67,10 +76,10 @@ * Execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Watch Ansible as it completes the installation, correcting errors if they arise. * To look at what is running in detail, from the main directory open roles/'task-you-want-to-inspect'/tasks/main.yaml - * If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of the main playbook run, use tags. See main.yaml to determine what you part you would like to run and use those tags when running the main playbook. There is also a list of all the tags at the bottom of this page for reference. Example: "ansible-playbook main.yaml --ask-become-pass -- tags 'bastion,get-ocp' + * If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of the main playbook run, use [tags](#Tags). See the [main playbook](main.yaml) to determine what part you would like to run and use those tags when running the [main playbook](main.yaml). Example: "ansible-playbook main.yaml --ask-become-pass -- tags 'bastion,get-ocp' * Note: we chose to not edit the user's .bash_profile/.bashrc with an automatic ssh-add command because that would change the user's local workstation set-up in a way that was undesirable. Therefore, if you close out your terminal session in the middle of provisioning, you will need to run "ansible-playbook main.yaml --tags ssh-agent" before doing anything else. * **Step 7: Bastion Configuration** - * Once the create_bastion task runs, it will pause the playbook to give you time to configure it. + * Once the [create_bastion](roles/create_bastion/tasks/main.yaml) task runs, it will pause the playbook to give you time to configure it. * Use a web browser to open the cockpit by going to: "https://your-KVM-host-IP-address-here:9090" * Click on the "Virtual Machines" tab, then click on bastion from the list, click on the black terminal screen and press Enter. Wait until you see it asking for you to make a selection. * To finish the bastion's installation, you will need to configure the VM by doing the following: @@ -111,18 +120,19 @@ * From the main menu, double check that all check boxes have an X * Once you fill out all the required configuration settings, press "b" to begin installation. * Wait for the installation to complete, this may take some time. Monitor its progress, it may need you to press 'Enter' to continue. Once the installation completes, you will have to press the 'Run' button on the cockpit for it to start up and finish configuration. - * Once you see "bastion login", come back to the terminal to continue your run by pressing "ctrl+c" and then "c". If there was a problem and you need to stop the playbook, press "ctrl+c" and then "a". If configuration and installation took longer than the pause and the playbook continued and then failed, continue the playbook by running the following command: "ansible-playbook main.yaml --ask-become-pass --tags 'bastion,create_nodes'" + * Once you see "bastion login", come back to the terminal to continue your run by pressing "ctrl+c" and then "c". + * If there was a problem and you need to stop the playbook, press "ctrl+c" and then "a" to Abort. If configuration and installation took longer than the pause and the playbook continued and then failed, continue the playbook by running the following command: "ansible-playbook main.yaml --ask-become-pass --tags 'bastion,create_nodes'" * **Step 8: Starting Up Bootstrap and Control Nodes** * The playbook will continue to run, preparing the bootstrap and control nodes. * To monitor the nodes as they come up, watch them on the cockpit at: "https://your-KVM-host-IP-address-here:9090" * Click on the "Virtual Machines" tab and then click on the VM you want to monitor. Click on the black terminal screen and press Enter. - * Once you see "node-name login" prompt come back to the terminal where you ran Ansible and press "ctrl+c" and + * Once you see "'node-name' login" prompt come back to the terminal where you ran Ansible and press "ctrl+c" and then "c" to continue running the playbook. * If you encounter an error that does not resolve with time, press "ctrl+c" and then "a" to stop the process and debug. * **Step 9: Bootkube Verification** * SSH into the bastion (run "ssh your-bastion-IP-address-here" in the terminal) - * From there, change to root user (run "su root") and type in the root password that you set during configuration + * From there, change to root user (run "su root") and type in the root password that you set during bastion configuration * Then SSH into the bootstrap as core ("ssh core@your-bootstrap-IP-address-here") * Run "journalctl -u bootkube.service" to watch the bootstrap connect to the control nodes (hold spacebar to get to the bottom of the log). Press "q" to exit the log. @@ -165,7 +175,6 @@ * Optional: Leave the bootstrap running as is, shut it down and destroy it, or convert it into a compute node. ## Teardown: - * If you would like to teardown your VMs, first determine whether you would like to do a full or partial teardown. * Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full" * Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial" @@ -175,8 +184,13 @@ "run ansible-playbook main.yaml --ask-become-pass --tags "bastionvm,bastion,create_nodes" * Once you run the partial teardown, to start the main.yaml playbook back from that point, run main.yaml with the tags "bastion,create_nodes". -## Tags (in alphabetical order): +## Tags +* If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of playbooks run, use tags. Open a playbook and look at the "tags: " section under hosts for each play to determine what you part you would like to run and then use those tags when running the main playbook. +Examples: +ansible-playbook main.yaml --ask-become-pass --tags getocp (for one tag), or +ansible-playbook main.yaml --ask-become-pass --tags 'bastion,get-ocp' (for multiple tags) +In alphabetical order: * bastion = configuration of bastion for OCP * bastionvm = creation of Bastion KVM guest * boostrap = creation of Boostrap KVM guest From 631b7f013989c0d01314615eee0737ff966d019b Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 2 Nov 2021 16:47:55 -0500 Subject: [PATCH 468/885] Made README table of contents into a list Signed-off-by: Jacob Emery --- README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index e665ab38..8e450fc6 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,15 @@ # Ansible-OpenShift-Provisioning ## Table of Contents -[Scope](#Scope) -[Supported Operating Systems](#Supported-Operating-Systems) -[Pre-Requisites](#Pre-Requisites) -[Instructions](#Installation-Instructions) -[Setup](#Setup) -[Provisioning](#Provisioning) -[Verification] (#Verification) -[Teardown](#Teardown) -[Tags](#Tags) +* [Scope](#Scope) +* [Supported Operating Systems](#Supported-Operating-Systems) +* [Pre-Requisites](#Pre-Requisites) +* [Instructions](#Installation-Instructions) +* [Setup](#Setup) +* [Provisioning](#Provisioning) +* [Verification] (#Verification) +* [Teardown](#Teardown) +* [Tags](#Tags) ## Scope * The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. From d793990a09042571f1ffaa5644e7cbab8e9a377a Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 2 Nov 2021 16:56:54 -0500 Subject: [PATCH 469/885] Fixed Verification in Table of Contents Signed-off-by: Jacob Emery --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8e450fc6..6568656e 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ * [Instructions](#Installation-Instructions) * [Setup](#Setup) * [Provisioning](#Provisioning) -* [Verification] (#Verification) +* [Verification](#Verification) * [Teardown](#Teardown) * [Tags](#Tags) @@ -20,7 +20,7 @@ for local workstation running Ansible * Linux (RedHat and Debian) * MacOS X -## Pre-requisites: +## Pre-Requisites: * Red Hat Enterprise Linux (RHEL) license or free trial * Red Hat OpenShift Container Platform license or free trial * Python3 intalled on your local computer (how-to: https://realpython.com/installing-python/) From 4a6b77f23cea2d0435ff5a5bb42ae87b0570396f Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 2 Nov 2021 16:59:45 -0500 Subject: [PATCH 470/885] Fixed tags section in README Signed-off-by: Jacob Emery --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 6568656e..1f804bab 100644 --- a/README.md +++ b/README.md @@ -185,10 +185,10 @@ for local workstation running Ansible * Once you run the partial teardown, to start the main.yaml playbook back from that point, run main.yaml with the tags "bastion,create_nodes". ## Tags -* If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of playbooks run, use tags. Open a playbook and look at the "tags: " section under hosts for each play to determine what you part you would like to run and then use those tags when running the main playbook. -Examples: -ansible-playbook main.yaml --ask-become-pass --tags getocp (for one tag), or -ansible-playbook main.yaml --ask-become-pass --tags 'bastion,get-ocp' (for multiple tags) +If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of playbooks run, use tags. Open a playbook and look at the "tags: " section under hosts for each play to determine what you part you would like to run and then use those tags when running the main playbook. +* Examples: +* ansible-playbook main.yaml --ask-become-pass --tags getocp (for one tag), or +* ansible-playbook main.yaml --ask-become-pass --tags 'bastion,get-ocp' (for multiple tags) In alphabetical order: * bastion = configuration of bastion for OCP From e6ae6a04ef28464c47a5a0db2f5ebe525a70ecca Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 2 Nov 2021 17:00:35 -0500 Subject: [PATCH 471/885] Edited Tags section in README Signed-off-by: Jacob Emery --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1f804bab..d3906594 100644 --- a/README.md +++ b/README.md @@ -186,7 +186,7 @@ for local workstation running Ansible ## Tags If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of playbooks run, use tags. Open a playbook and look at the "tags: " section under hosts for each play to determine what you part you would like to run and then use those tags when running the main playbook. -* Examples: +Examples: * ansible-playbook main.yaml --ask-become-pass --tags getocp (for one tag), or * ansible-playbook main.yaml --ask-become-pass --tags 'bastion,get-ocp' (for multiple tags) From 3e0e8ea1ee5ae9c3e2719fccaaf6476f43d737b8 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 2 Nov 2021 17:01:36 -0500 Subject: [PATCH 472/885] Changed Tags section in README back Signed-off-by: Jacob Emery --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d3906594..1f804bab 100644 --- a/README.md +++ b/README.md @@ -186,7 +186,7 @@ for local workstation running Ansible ## Tags If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of playbooks run, use tags. Open a playbook and look at the "tags: " section under hosts for each play to determine what you part you would like to run and then use those tags when running the main playbook. -Examples: +* Examples: * ansible-playbook main.yaml --ask-become-pass --tags getocp (for one tag), or * ansible-playbook main.yaml --ask-become-pass --tags 'bastion,get-ocp' (for multiple tags) From bb04fa992767d58c5f71adf71e0e6c20cdba80b1 Mon Sep 17 00:00:00 2001 From: Phillip Wilson Date: Tue, 2 Nov 2021 17:03:41 -0500 Subject: [PATCH 473/885] Placeholder inventory file to begin to conform to community guidelines Signed-off-by: Phillip Wilson --- inventories/inventory.yml | 1 + 1 file changed, 1 insertion(+) create mode 100644 inventories/inventory.yml diff --git a/inventories/inventory.yml b/inventories/inventory.yml new file mode 100644 index 00000000..570cff6d --- /dev/null +++ b/inventories/inventory.yml @@ -0,0 +1 @@ +#placeholder as we move to community directory structure From f256cc51c9d2a8a11ae5fe8822dade8b1cd722be Mon Sep 17 00:00:00 2001 From: Phillip Wilson Date: Tue, 2 Nov 2021 17:15:38 -0500 Subject: [PATCH 474/885] Adding punctuation to coment Signed-off-by: Phillip Wilson --- inventories/inventory.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inventories/inventory.yml b/inventories/inventory.yml index 570cff6d..e290e64f 100644 --- a/inventories/inventory.yml +++ b/inventories/inventory.yml @@ -1 +1 @@ -#placeholder as we move to community directory structure +# Placeholder as we move to community directory structure. From 33a2465cbdc8493b5d3f15d55c0d3617dd7fea2f Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 13:55:52 -0600 Subject: [PATCH 475/885] Removed .vault_pass.txt from .gitignore because encryption role was removed Signed-off-by: Jacob Emery --- .gitignore | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index a96314c1..c3c27d86 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,6 @@ .DS_Store .iso .vscode -roles/get-ocp/files/ocp_ssh_pub -.vault_pass.txt inventory +env.yaml ansible.cfg \ No newline at end of file From bee36e9fe51b943709605002009510968dce77cf Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 13:57:56 -0600 Subject: [PATCH 476/885] Removed manual bastion configuration step because that is automated now. Also removed a couple steps in the provisioning section because those were automated too. Changed the names of some tags. Fixed some typos. Signed-off-by: Jacob Emery --- README.md | 187 ++++++++++++++++-------------------------------------- 1 file changed, 56 insertions(+), 131 deletions(-) diff --git a/README.md b/README.md index 1f804bab..06c42984 100644 --- a/README.md +++ b/README.md @@ -12,8 +12,8 @@ * [Tags](#Tags) ## Scope -* The goal of this playbook is to setup and deploy a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing KVM as the virtualization method. -* This README file gives extremely detailed step-by-step instructions for you to use as a reference. It assumes near zero experience with Ansible. +* The goal of this playbook is to automate the setup and deployment of a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing Kernel Virtual Machine (KVM) as the virtualization method. +* This README file gives extremely detailed step-by-step instructions for you to use as a reference. It assumes near-zero experience with Ansible. ## Supported Operating Systems for local workstation running Ansible @@ -21,29 +21,19 @@ for local workstation running Ansible * MacOS X ## Pre-Requisites: -* Red Hat Enterprise Linux (RHEL) license or free trial -* Red Hat OpenShift Container Platform license or free trial +* Red Hat OpenShift Container Platform license or free trial (includes licenses for RHEL and CoreOS) * Python3 intalled on your local computer (how-to: https://realpython.com/installing-python/) * Ansible installed on your local computer (how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) * If you are using Mac OS X for your localhost workstation to run Ansible, you also need to have: * homebrew package manager installed (how-to: https://brew.sh/) * Updated software for command line tools (run "softwareupdate --all --install" in your terminal) * Access to a logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: - * 6 Dedicated Integrated Facilities for Linux (IFLs) + * 6 Integrated Facilities for Linux (IFLs) with SMT2 enabled * 85 GB of RAM * 1 TB of disk space -* On that LPAR, bare-metal Red Hat Enterprise Linux (RHEL) 8.4 with Kernel Virtual Machine (KVM) installed with - the following options enabled: - * server - * hardware monitoring utilities - * networking file system client - * remote management for linux - * virtualization hypervisor - * headless management - * system tools +* On that LPAR, Red Hat Enterprise Linux (RHEL) with networking configured and a root password set * On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses -* Fully Qualified Domain Names (FQDN) names for all IPv4 addresses -* DNS configuration files (forward (.db), reverse (.rev), and named.conf). +* Fully Qualified Domain Names (FQDN) names for all IPv4 addresses ## Installation Instructions: @@ -53,164 +43,99 @@ for local workstation running Ansible * Run "git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git" * **Step 2: Get OpenShift Information** * In a web browser, navigate to https://console.redhat.com/openshift/install/ibmz/user-provisioned - * Download your local command line tools (oc and kubectl) * Copy the OpenShift pull secret (for use in the next step) * **Step 3: Set Variables** * In a text editor of your choice, open [env.yaml](env.yaml) - * Fill out all of the required variables for your specific installation -* **Step 4: DNS Configuration** - * Get DNS configuration files (forward (.db), reverse (.rev), and named.conf), or have them pre-defined by your networking team. - * Place them in the [roles/dns/files folder](roles/dns/files) - * Please leave the named.conf the same name. - * Rename the .db and .rev files with the same name you set for "env_metadata_name" in [env.yaml](env.yaml) (i.e. distribution.rev) -* **Step 5: Setup Script** - * Navigate to the folder where you saved the Git Repository + * Fill out the variables to match your specific installation. Many variables are pre-filled with defaults. For a default installation, you only need to fill in the empty variables. +* **Step 4: Setup Script** + * Navigate to the folder where you cloned the Git Repository * Run "ansible-playbook setup.yaml --ask-become-pass" - * When the setup playbook starts, it will prompt you for a password to use for encrypting Ansible vault files - * No files are encrypted until you run the [main playbook](main.yaml) in step 5 below - * If you would like to decrypt a file protected by Ansible vault, run: "ansible-vault decrypt file-name-here" ### Provisioning -* **Step 6: Running the Main Playbook** - * If you are not already there, navigate to the folder where you saved the Git repository in your terminal - * Execute the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" +* **Step 5: Running the Main Playbook** + * If you are not already there, navigate to the folder where you cloned the Git repository in your terminal. + * Run the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Watch Ansible as it completes the installation, correcting errors if they arise. - * To look at what is running in detail, from the main directory open roles/'task-you-want-to-inspect'/tasks/main.yaml - * If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of the main playbook run, use [tags](#Tags). See the [main playbook](main.yaml) to determine what part you would like to run and use those tags when running the [main playbook](main.yaml). Example: "ansible-playbook main.yaml --ask-become-pass -- tags 'bastion,get-ocp' - * Note: we chose to not edit the user's .bash_profile/.bashrc with an automatic ssh-add command because that would change the user's local workstation set-up in a way that was undesirable. Therefore, if you close out your terminal session in the middle of provisioning, you will need to run "ansible-playbook main.yaml --tags ssh-agent" before doing anything else. -* **Step 7: Bastion Configuration** - * Once the [create_bastion](roles/create_bastion/tasks/main.yaml) task runs, it will pause the playbook to give you time to configure it. - * Use a web browser to open the cockpit by going to: "https://your-KVM-host-IP-address-here:9090" - * Click on the "Virtual Machines" tab, then click on bastion from the list, click on the black terminal screen and press Enter. Wait until you see it asking for you to make a selection. - * To finish the bastion's installation, you will need to configure the VM by doing the following: - * Press 2 to enter Text Mode (hit the Enter key after every step) - * From the main menu, press 3 to configure the Installation Source - * Press 3 to use Network - * Type in the URL that points to your RHEL ISO - * From the main menu, press 4 to go to Software Selection - * Press 2 for Server, Enter, then "c" to continue - * From this list, press the number and then hit Enter for each: - * 1 - Hardware Monitoring utilities - * 8 - Networking File System Client - * 9 - Network Servers - * 11 - Remote Management for Linux - * 13 - Basic Web Server - * 17 - Headless Management - * 21 - System Tools - * Press "c" to continue, and 'c' again to get back to the main menu - * From the main menu, press 5 to set the installation destination - * If there is a disk already checked, press "c" to use the continue. If not, select the disk you would like to use. - * If it is not already selected, press 2 to use all free space, otherwise, press "c" to continue. - * Select "LVM" from the list and press "c" to continue - * From the main menu, press 7 to set Network Configurations - * Press 2 to configure device enc1 - * Press 1 and enter the bastion's IP address - * Press 2 and enter the netmask - * Press 3 and enter the default gateway - * Press 4 and type "ignore" - * Press 6 and enter DNS nameservers - * Press 'c' to continue - * From the main menu, press 9 to set the Root Password - * From the main menu, press 10 to create a user - * Press 1 to create user - * Press 2 to set full name - * Press 3 to set a username - * Press 5 to set a password - * Press 6 to give the user root access (optional) - * From the main menu, double check that all check boxes have an X - * Once you fill out all the required configuration settings, press "b" to begin installation. - * Wait for the installation to complete, this may take some time. Monitor its progress, it may need you to press 'Enter' to continue. Once the installation completes, you will have to press the 'Run' button on the cockpit for it to start up and finish configuration. - * Once you see "bastion login", come back to the terminal to continue your run by pressing "ctrl+c" and then "c". - * If there was a problem and you need to stop the playbook, press "ctrl+c" and then "a" to Abort. If configuration and installation took longer than the pause and the playbook continued and then failed, continue the playbook by running the following command: "ansible-playbook main.yaml --ask-become-pass --tags 'bastion,create_nodes'" -* **Step 8: Starting Up Bootstrap and Control Nodes** - * The playbook will continue to run, preparing the bootstrap and control nodes. - * To monitor the nodes as they come up, watch them on the cockpit at: "https://your-KVM-host-IP-address-here:9090" - * Click on the "Virtual Machines" tab and then click on the VM you want to monitor. Click on the black - terminal screen and press Enter. - * Once you see "'node-name' login" prompt come back to the terminal where you ran Ansible and press "ctrl+c" and - then "c" to continue running the playbook. - * If you encounter an error that does not resolve with time, press "ctrl+c" and then "a" to stop the process and debug. -* **Step 9: Bootkube Verification** - * SSH into the bastion (run "ssh your-bastion-IP-address-here" in the terminal) - * From there, change to root user (run "su root") and type in the root password that you set during bastion configuration - * Then SSH into the bootstrap as core ("ssh core@your-bootstrap-IP-address-here") - * Run "journalctl -u bootkube.service" to watch the bootstrap connect to the control nodes (hold spacebar to - get to the bottom of the log). Press "q" to exit the log. - * Expect lots of errors, as the control nodes may not be entirely up yet. - * This may take some time, 30 minutes or more. Check in occassionally by running "journalctl -u bootkube.service" again - to update the log. Remember to hold the spacebar to go to the bottom, press "q" to quit. - * Once all control nodes are connected, the bootkube log will read "bootkube.service complete". -* **Step 10: Starting Up Compute Nodes** - * Repeat Step 7 with the Compute nodes. - * Monitor their status at the cockpit, found at "https://your-KVM-host-IP-address:9090" - * They are ready once their terminal screen shows a login prompt - * Once all your compute nodes are up and running, and bootkube is complete, you are ready for cluster verification + * If all goes smoothly, this will take approximately 25 minutes. + * To look at what is running in detail, open roles/'task-you-want-to-inspect'/tasks/main.yaml + * If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of the main playbook run, use [tags](#Tags). See the [main playbook](main.yaml) to determine what part you would like to run and use those tags when running the [main playbook](main.yaml). Example: "ansible-playbook main.yaml --ask-become-pass --tags 'get-ocp,create_nodes'" + * Note: we chose to not edit the user's .bash_profile/.bashrc with an automatic ssh-add command because that would change the user's local workstation set-up in a way that was potentially undesirable. Therefore, if you close out your terminal session in the middle of provisioning, you will need to run "ansible-playbook main.yaml --tags ssh-agent" before doing anything else. ### Verification -* **Step 11: Export Kube Config** - * SSH into the bastion (run "ssh your-bastion-IP-address-here") - * Change to root user (run "su root") and type in your password from when you configured the bastion. +* **Step 6: Bootkube Verification** + * SSH into the bastion (run "ssh root@your-bastion-IP-address-here" in the terminal) + * Then SSH into the bootstrap as core ("ssh core@your-bootstrap-IP-address-here") + * Run "journalctl -u bootkube.service" to watch the bootstrap connect to the control nodes (hold spacebar to get to the bottom of the log). Press "q" to exit the log. + * Expect lots of errors in this log, as the control nodes may not be entirely up yet. + * This may take some time. Check in occassionally by running the above command again to update the log. + * Once all control nodes are connected, the end of the bootkube log will read "bootkube.service complete". +* **Step 7: Export Kube Config** + * Disconnect from bootstrap (Press "Ctrl+d") + * Make sure you are connected to the bastion as root (if not, run "ssh root@your-bastion-IP-address-here") * Then run "export KUBECONFIG=/ocpinst/auth/kubeconfig" * Check that worked by running "oc whoami", which should return "system:admin" -* **Step 12: Approve Certificates** - * From the bastion, running as root user (as above) run "oc get csr". This will bring up a list of certificates that need approval. + * If this doesn't work, just give it some time for the control nodes to connect and try again. + * If you are getting "oc: command not found", disconnect from the bastion (press "Ctrl+d") and repeat this step. +* **Step 8: Approve Certificates** + * From the bastion as root user (as above) run "oc get csr". This will bring up a list of certificates that need approval. * To approve all certificates at the same time, run the following command: "for i in \`oc get csr --no-headers | grep -i pending | awk '{ print $1 }\'`; do oc adm certificate approve $i; done" - * It may take some time for all the certificates that need approval to show up. Keep running "oc get csr" to check to make sure that - no new certificates have appeared since you last approved them. + * If you are viewing this file outside of GitHub, remove \ characters in the above command before running. The slashes are escape characters for formatting on GitHub. + * It may take some time for all the certificates that need approval to show up. Keep running "oc get csr" to check to make sure that no new certificates have appeared since you last approved them. * Once all certificates read "Approved, Issued". You're ready for the next step. -* **Step 13: Wait for Cluster To Become Operational** +* **Step 9: Wait for Cluster To Become Operational** * From the bastion, as root user (as above) check node status by running: "oc get nodes". All nodes need to be "Ready" in the "Status" column. - * From the bastion, as root user (as above) run "oc get clusteroperators". All cluster operators need to be "True" in the "Available" column. - * It may take hours, especially the cluster operators. Run the above two bullets' commmands to check in occasionally. - * Once all nodes are ready and cluster operators are available, you are ready to continue to the next step. -* **Step 14: Verify OpenShift Installation** - * From the bastion as root user (as above), navigate to /ocpinst ("cd /ocpinst") - * Run "./openshift-install --dir=/ocpinst wait-for install-complete" - * If installation is ready, running the above command will give you some information about how to log into the OpenShift cluster's dashboard. - * Copy the provided URL into a web browser and use "kubeadmin" as login and the provided password for first time sign-on. -* **Step 15: Celebrate!** - * Your OpenShift cluster provisioning and installation is now complete. + * From the bastion, as root user (as above) run "oc get clusteroperators". All cluster operators need to be "True" in the "Available" column. If there are messages regarding revisions, give it some time and check back in a few minutes by running the same command again. + * This may take some time, especially the cluster operators. Run the above two bullets' commmands to check-in occasionally. + * Once all nodes are ready and cluster operators are available with no messages, you are ready to continue to the next step. +* **Step 10: Verify OpenShift Installation** + * Run "/ocpinst/openshift-install --dir=/ocpinst wait-for install-complete" + * If installation is ready, running the above command will give you some information about how to log into the OpenShift cluster's dashboard. + * Copy the provided URL into a web browser using the provided username (kubeadmin) and password for first time sign-on. + * Congratulations! Your OpenShift cluster provisioning and installation is now complete. -* Optional: Leave the bootstrap running as is, shut it down and destroy it, or convert it into a compute node. +* Optional: Leave the bootstrap running as is, shut it down and destroy it (Run "ansible-playbook teardown.yaml --ask-become-pass --tags boot_teardown"), or convert it into another compute node. ## Teardown: -* If you would like to teardown your VMs, first determine whether you would like to do a full or partial teardown. -* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full" -* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial" +* If you would like to teardown your VMs, first determine whether you would like to do a full, partial, or bootstrap teardown, specified below. +* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full_teardown" +* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial_teardown" +* Bootstrap: The bootstrap is not needed after OpenShift fully installs. To easily tear it down, run: "ansible-playbook teardown.yaml --ask-become-pass --tags boot_teardown" * If you have provisioned more than the minimum number of nodes for your installation, add them to the respective list found in roles/teardown_vms/tasks/main.yaml. * Once you run the full teardown, to start the main.yaml playbook back from that point, run: "run ansible-playbook main.yaml --ask-become-pass --tags "bastionvm,bastion,create_nodes" -* Once you run the partial teardown, to start the main.yaml playbook back from that point, run main.yaml with the tags "bastion,create_nodes". +* Once you run the partial teardown, to start the main.yaml playbook back from that point, run main.yaml with "--tags 'getocp,create_nodes'" ## Tags -If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of playbooks run, use tags. Open a playbook and look at the "tags: " section under hosts for each play to determine what you part you would like to run and then use those tags when running the main playbook. +If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of playbooks run, use tags. To determine what you part of a playbook or role you would like to run, open the file (either main.yaml or a role/tasks/main.yaml file) and look at the "tags: " section for a task and then use those tags when running the main playbook (examples below). * Examples: * ansible-playbook main.yaml --ask-become-pass --tags getocp (for one tag), or * ansible-playbook main.yaml --ask-become-pass --tags 'bastion,get-ocp' (for multiple tags) -In alphabetical order: +List of Tags (in alphabetical order): * bastion = configuration of bastion for OCP * bastionvm = creation of Bastion KVM guest -* boostrap = creation of Boostrap KVM guest +* bootstrap = creation of Boostrap KVM guest +* boot_teardown = for use with teardown.yaml to bring down the bootstrap * compute = creation of the Compute nodes KVM guests * control = creation of the Control nodes KVM guests * create_nodes = tasks from the second set of kvm plays * dns = configuration of DNS server on bastion * firewall = for tasks related to firewall settings -* full = for use with teardown.yaml to bring down all VMs +* full_teardown = for use with teardown.yaml to bring down all VMs * getocp = download of OCP installer and http server configuration * haproxy = configuration of haproxy on bastion kvm guest -* httpconf = configuration of httpd server on bastion kvm guest +* httpd = configuration of httpd server on bastion kvm guest * keymastr = ssh key configuration and testing * kvm_host = tasks to apply to KVM host for OCP cluster * kvm_prep = tasks from the first set of kvm plays * localhost = for tasks that apply to the local machine running Ansible -* partial = for use with teardown.yaml to bring down VMs except bastion +* partial_teardown = for use with teardown.yaml to bring down all VMs except the bastion * pkg = install and update all packages * prep = run all setup playbooks * selinux = for tasks related to SELinux settings * setup = first-time setup of ansible * ssh-agent = setting up ansible ssh-agent -* ssh-copy-id = for copying ssh id \ No newline at end of file +* ssh-copy-id = for copying ssh id +* subscription = Attach Red Hat Subscription \ No newline at end of file From df978a926b7334cae53f1a3f53f3e8f46a737333 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 13:58:52 -0600 Subject: [PATCH 477/885] Added location of private key file to ansible.cfg Signed-off-by: Jacob Emery --- ansible.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible.cfg b/ansible.cfg index 80a4489f..d432c9f8 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,4 +1,5 @@ [defaults] +private_key_file=~/.ssh/ansible inventory=inventory [inventory] From 7395f2cac79195cd08b553aeee187b410ebc36e4 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:02:14 -0600 Subject: [PATCH 478/885] Added custom host names, DNS on bastion option, auto-attach rhel subscription option, Red Hat username and password variables, qcow2 URL variable instead of FTP, variable for network interface name, variable for DNS forwarder, more variables for bastion, bootstrap, control and compute resource allocations, variables for ocp client and installer, variables for mirrors of CoreOS kernel, initramfs and rootfs for version controlling. Signed-off-by: Jacob Emery --- env.yaml | 116 ++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 84 insertions(+), 32 deletions(-) diff --git a/env.yaml b/env.yaml index 05dd72d3..a523e1f2 100644 --- a/env.yaml +++ b/env.yaml @@ -1,8 +1,13 @@ -# Red Hat account with license to regsiter VMs -env_rh_email: -env_rh_pass: +# Red Hat account with license to regsiter VMs. +# If you do not want to automatically attach RHEL subscription: +# comment out env_rh_username and env_rh_passwd variables below, and +# change auto_attach_rhel_sub variable to False. +# Note: If no subscription is attached manually, an error will occur when the install_packages role runs. +env_rh_username: +env_rh_passwd: +auto_attach_rhel_sub: True # make sure to also comment out the above variables if False -# to populate OpenShift install config file. +# To populate OpenShift install config file. env_apiVersion: v1 env_baseDomain: env_hyperthreading_compute: Enabled @@ -22,43 +27,90 @@ env_fips: "false" # "true" or "false" (include quotes) env_pullSecret: '' #paste it into these single quotes # OpenShift SSH key is generated via Ansible in ssh_ocp_key_gen role +# RHEL KVM Guest Image link. Please ensure it is for System Z s390x architecture. +# The address will start with https://access.cdn.redhat.com/content/origin/files/... +# Link will expire after a few hours. Copy the link again if you have waited a while since pasting here. +# If you don't want to download via ephemeral URL, download the qcow2 file from Red Hat and place it in the KVM host at /var/lib/libvirt/images/bastion_base.qcow2 +env_rhel_qcow2: + # IP addresses for the nodes that Ansible will be run against. This will automatically fill out the inventory file when setup.yaml is run. -# If you would like to add more control/compute nodes, add a line below following the same naming conventions. -# Double check that the env_control/compute_count variables above match the total number of corresponding nodes below (which are 0 indexed). -env_ip_kvm_host: -env_ip_bastion: -env_ip_bootstrap: -env_ip_control_0: -env_ip_control_1: -env_ip_control_2: -env_ip_compute_0: -env_ip_compute_1: +env_ip_kvm_host: +env_ip_bastion: +env_ip_bootstrap: +env_ip_control_0: +env_ip_control_1: +env_ip_control_2: +env_ip_compute_0: +env_ip_compute_1: + +# Node names, leave as is or change to custom names. +env_kvm_host_name: kvm_host +env_bastion_name: bastion +env_bootstrap_name: bootstrap +env_control_0_name: control-0 +env_control_1_name: control-1 +env_control_2_name: control-2 +env_compute_0_name: compute-0 +env_compute_1_name: compute-1 + +# Networking +env_dns_nameserver: #If you are using the bastion as a DNS server (and have set the below variable env_dns_on_bastion to True) this variable is the same as env_ip_bastion. +env_default_gateway: +env_netmask: +env_net_int_name: #KVM network interface name: i.e. enc1 +env_dns_forwarder: #Upstream DNS server, can use 8.8.8.8 as a default +env_dns_on_bastion: True #Set to False if you do not want to setup a DNS server on the bastion because you already have a DNS server. + +# Username and password for user on VMs +env_vm_uid: +env_vm_passwd: + +# Root password for VMs +env_vm_root_passwd: # Ansible passwordless SSH setup. Pre-filled with recommended values. -#Ansible ssh key pair filename +# Ansible ssh key pair filename (don't include file extension) env_ssh_ans_name: ansible -#Ansible SSH password. Strongly recommended to leave as is (no password). +# Ansible SSH password. Keep quotes. Strongly recommended to leave as is (no password). env_ssh_ans_pass: "" -# OpenShift cluster's SSH key comment +# OpenShift cluster's SSH key comment. Keep quotes. Can leave as is (no comment). env_ssh_ocp_comm: "" -# Networking -env_dns_nameserver: -env_default_gateway: -env_netmask: -env_ftp: +# Bastion configuration. Pre-filled values are minimum requirements. +env_bastion_disk_size: 30 +env_bastion_ram: 4096 +env_bastion_vcpu: 4 +env_bastion_os_variant: 8.4 #RHEL version. Make sure this matches the version you copied for RHEL iso. -# Compute node configuration, will also be used for bootstrap. Pre-filled values are minimum requirements. -env_comp_disk_size: 120 -env_comp_ram: 1600 -env_comp_cpu: host -env_comp_vcpu: 4 -env_comp_os_variant: rhel8.0 +# Bootstrap node configuration. Pre-filled values are minimum requirements. +env_boot_disk_size: 100 #120 preferred +env_boot_ram: 16384 +env_boot_cpu: host +env_boot_vcpu: 4 +env_boot_os_variant: 8.4 #RH CoreOS version. Make sure this matches the version you copied for RHEL iso. # Control node configuration. Pre-filled values are minimum requirements. -env_cont_disk_size: 120 -env_cont_ram: 1600 +env_cont_disk_size: 100 #120 preferred +env_cont_ram: 16384 env_cont_cpu: host -env_cont_vcpu: 4 -env_cont_os_variant: rhel8.0 +env_cont_vcpu: 4 #8 preferred +env_cont_os_variant: 8.4 #RH CoreOS version. Make sure this matches the version you copied for RHEL iso. + +# Compute node configuration. Pre-filled values are minimum requirements. +env_comp_disk_size: 100 #120 preferred +env_comp_ram: 8192 +env_comp_cpu: host +env_comp_vcpu: 2 #6 preferred +env_comp_os_variant: 8.4 #RH CoreOS version. Make sure this matches the version you copied for RHEL iso + +# If you would like to download the latest stable version of OpenShift, leave as is. +# Otherwise, replace these links with preferred versions. Used in get-ocp role. +env_ocp_client: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz +env_ocp_installer: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + +# This version of Red Hat CoreOS works. Feel free to replace these links with preferred versions. +# Used in prep_kvm_guests and get-ocp roles. +env_rhcos_kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.7/latest/rhcos-4.7.33-s390x-live-kernel-s390x +env_rhcos_initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.7/latest/rhcos-4.7.33-s390x-live-initramfs.s390x.img +env_rhcos_rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.7/latest/rhcos-4.7.33-s390x-live-rootfs.s390x.img \ No newline at end of file From 51c82955d5fe53fdde0b78d83abe545bd36e4cbe Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:03:22 -0600 Subject: [PATCH 479/885] Added ansible_connection=local for localhost in inventory Signed-off-by: Jacob Emery --- inventory | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inventory b/inventory index e82d1e18..a7ceda3d 100755 --- a/inventory +++ b/inventory @@ -1,7 +1,7 @@ # will populate from ansible_setup playbook [localhost] -127.0.0.1 +127.0.0.1 ansible_connection=local [localhost:vars] ansible_python_interpreter=/usr/bin/python3 From 6fe9b886f3b677996088c3ce95fdfaeb2491ff9e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:05:12 -0600 Subject: [PATCH 480/885] Removed encrypt files roles because it was unnecessary. Added attach_subscription role for KVM host and bastion. Removed unnecessary packages from bastion. Added packages for KVM host. Signed-off-by: Jacob Emery --- main.yaml | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/main.yaml b/main.yaml index f4f8e163..762f72e3 100644 --- a/main.yaml +++ b/main.yaml @@ -9,40 +9,40 @@ - env.yaml vars: - ssh_target_ip: "{{ env_ip_kvm_host }}" - - files_to_encrypt: [ 'env.yaml' ] # add to this list as needed roles: - - encrypt_files - ssh_key_gen - ssh_copy_id - ssh_agent - + - hosts: kvm_host tags: kvm_host,kvm_prep become: true vars_files: - env.yaml vars: # feel free to add more packages as needed - - packages: [ 'libvirt-devel', 'libvirt-daemon-kvm', 'qemu-kvm', 'virt-manager', 'libvirt-daemon-config-network', 'libvirt-client', 'qemu-img', 'libvirt' ] + - packages: ['@server-product-environment','@hardware-monitoring','@network-file-system-client','@remote-system-management', + '@headless-management','@system-tools','libvirt-devel','libvirt-daemon-kvm','qemu-kvm','virt-manager','genisoimage', + 'libvirt-daemon-config-network','libvirt-client','qemu-img','virt-install','virt-viewer','libvirt-daemon-kvm','libvirt'] roles: - check_ssh + - attach_subscription - install_packages - set_selinux_permissive - enable_packages - macvtap - - mount_rhel - create_bastion - hosts: localhost - tags: localhost,bastion + tags: bastion connection: local become: false gather_facts: no - vars: - - ssh_target_ip: "{{ env_ip_bastion }}" vars_files: - env.yaml + vars: + - ssh_target_ip: "{{ env_ip_bastion }}" roles: - - ssh_copy_id # to connect to bastion + - ssh_copy_id - hosts: bastion tags: bastion @@ -50,10 +50,11 @@ vars_files: - env.yaml vars: # feel free to add more packages as needed - - packages: [ 'haproxy', 'httpd', 'mod_ssl', 'bind', 'bind-utils', 'openssh' ] + - packages: ['haproxy','httpd','bind','bind-utils','expect','firewalld','mod_ssl'] roles: - check_ssh - - install_packages + - attach_subscription + - install_packages #RHEL subscription already attached - ssh-ocp-key-gen # SSH key for bastion to connect to nodes - set_selinux_permissive - set_firewall From 8af011792dc70286668fbad5c60d8e23f9944574 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:06:21 -0600 Subject: [PATCH 481/885] Added boot_teardown option to teardown.yaml to automate tearing down bootstrap after successful installation. Signed-off-by: Jacob Emery --- teardown.yaml | 41 ++++++++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/teardown.yaml b/teardown.yaml index 86f07a4f..a02b1919 100644 --- a/teardown.yaml +++ b/teardown.yaml @@ -1,17 +1,19 @@ --- - - # Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. # If you have more nodes than what is present in the "vms" list below, feel free to add more to the list. # After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastionvm,bastion,create_nodes'" - hosts: kvm_host - tags: full + tags: full_teardown become: true gather_facts: no - vars: - - vms: ['bastion', 'bootstrap', 'control-0', 'control-1', 'control-2', 'compute-0', 'compute-1'] + vars_files: + - env.yaml + pre_tasks: + - name: Create list of VMs to teardown. + set_fact: + vms: ['{{env_bastion_name}}', '{{env_bootstrap_name}}', '{{env_control_0_name}}', '{{env_control_1_name}}', '{{env_control_2_name}}', '{{env_compute_0_name}}', '{{env_compute_1_name}}'] roles: - teardown_vms @@ -20,17 +22,38 @@ # After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastion,create_nodes'" - hosts: bastion - tags: partial + tags: partial_teardown become: true gather_facts: no + vars_files: + - env.yaml roles: - reset_files - hosts: kvm_host - tags: partial + tags: partial_teardown + become: true + gather_facts: no + vars_files: + - env.yaml + pre_tasks: + - name: Create list of VMs to teardown. + set_fact: + vms: ['{{env_bootstrap_name}}','{{env_control_0_name}}','{{env_control_1_name}}','{{env_control_2_name}}','{{env_compute_0_name}}','{{env_compute_1_name}}'] + roles: + - teardown_vms + +# Use the "boot_teardown" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. + +- hosts: kvm_host + tags: boot_teardown become: true gather_facts: no - vars: - - vms: ['bootstrap', 'control-0', 'control-1', 'control-2', 'compute-0', 'compute-1'] + vars_files: + - env.yaml + pre_tasks: + - name: Create list of VMs to teardown. + set_fact: + vms: ['{{env_bootstrap_name}}'] roles: - teardown_vms From aaef20b19d5d216212d8f02f98bdaac0672a6390 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:06:56 -0600 Subject: [PATCH 482/885] Added a role to automate attaching a Red Hat subscription Signed-off-by: Jacob Emery --- roles/attach_subscription/tasks/main.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 roles/attach_subscription/tasks/main.yaml diff --git a/roles/attach_subscription/tasks/main.yaml b/roles/attach_subscription/tasks/main.yaml new file mode 100644 index 00000000..a0421602 --- /dev/null +++ b/roles/attach_subscription/tasks/main.yaml @@ -0,0 +1,10 @@ +--- + +- name: Attach RHEL subscription + tags: subscription + community.general.redhat_subscription: + state: present + username: "{{env_rh_username}}" + password: "{{env_rh_passwd}}" + auto_attach: yes + force_register: yes \ No newline at end of file From 8f901591679cdf5697e97153da4c92f51491357a Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:07:55 -0600 Subject: [PATCH 483/885] Added functionality to template out the forward, reverse and named.conf DNS configuration files. Signed-off-by: Jacob Emery --- roles/dns/tasks/main.yaml | 39 +++++++++----- roles/dns/templates/dns-named.conf.j2 | 77 +++++++++++++++++++++++++++ roles/dns/templates/dns.db.j2 | 38 +++++++++++++ roles/dns/templates/dns.rev.j2 | 24 +++++++++ 4 files changed, 166 insertions(+), 12 deletions(-) create mode 100644 roles/dns/templates/dns-named.conf.j2 create mode 100644 roles/dns/templates/dns.db.j2 create mode 100644 roles/dns/templates/dns.rev.j2 diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index 164fe609..2925eea0 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -1,5 +1,9 @@ --- +- name: Load in variables from env.yaml + tags: dns,setup + include_vars: env.yaml + - name: enable named tags: dns,bastion ansible.builtin.systemd: @@ -12,31 +16,42 @@ name: named state: started -- name: Copy named.conf file to bastion +- name: split IP addresses for use in templates + tags: dns,bastion + set_fact: + bastion_split_ip: "{{ env_ip_bastion.split('.') }}" + bootstrap_split_ip: "{{ env_ip_bootstrap.split('.') }}" + cont_0_split_ip: "{{ env_ip_control_0.split('.') }}" + cont_1_split_ip: "{{ env_ip_control_1.split('.') }}" + cont_2_split_ip: "{{ env_ip_control_2.split('.') }}" + comp_0_split_ip: "{{ env_ip_compute_0.split('.') }}" + comp_1_split_ip: "{{ env_ip_compute_1.split('.') }}" + +- name: Template named.conf file to bastion tags: dns,bastion - ansible.builtin.copy: - src: named.conf - dest: /etc/ + template: + src: dns-named.conf.j2 + dest: /etc/named.conf owner: root group: root mode: '0755' backup: yes -- name: Copy DNS .db file to bastion +- name: Template DNS .db file to bastion tags: dns,bastion - ansible.builtin.copy: - src: "{{ env_metadata_name }}.db" - dest: /var/named + template: + src: dns.db.j2 + dest: /var/named/{{env_metadata_name}}.db owner: named group: named mode: '0755' backup: yes -- name: Copy DNS .rev file to bastion +- name: Template DNS .rev file to bastion tags: dns,bastion - ansible.builtin.copy: - src: "{{ env_metadata_name }}.rev" - dest: /var/named + template: + src: dns.rev.j2 + dest: /var/named/{{env_metadata_name}}.rev owner: named group: named mode: '0755' diff --git a/roles/dns/templates/dns-named.conf.j2 b/roles/dns/templates/dns-named.conf.j2 new file mode 100644 index 00000000..4baf30e5 --- /dev/null +++ b/roles/dns/templates/dns-named.conf.j2 @@ -0,0 +1,77 @@ +// +// named.conf +// +// Provided by Red Hat bind package to configure the ISC BIND named(8) DNS +// server as a caching only nameserver (as a localhost DNS resolver only). +// +// See /usr/share/doc/bind*/sample/ for example named configuration files. +// + +options { +// listen-on port 53 { 127.0.0.1; }; + listen-on port 53 { any; }; + listen-on-v6 port 53 { ::1; }; + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + statistics-file "/var/named/data/named_stats.txt"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + secroots-file "/var/named/data/named.secroots"; + recursing-file "/var/named/data/named.recursing"; + allow-query { any; }; + forwarders { {{ env_dns_forwarder }}; }; + + /* + - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. + - If you are building a RECURSIVE (caching) DNS server, you need to enable + recursion. + - If your recursive DNS server has a public IP address, you MUST enable access + control to limit queries to your legitimate users. Failing to do so will + cause your server to become part of large scale DNS amplification + attacks. Implementing BCP38 within your network would greatly + reduce such attack surface + */ + recursion yes; + + dnssec-enable no; + dnssec-validation no; + + managed-keys-directory "/var/named/dynamic"; + + pid-file "/run/named/named.pid"; + session-keyfile "/run/named/session.key"; + + /* https://fedoraproject.org/wiki/Changes/CryptoPolicy */ + include "/etc/crypto-policies/back-ends/bind.config"; +}; + +logging { + channel default_debug { + file "data/named.run"; + severity dynamic; + }; +}; + +zone "." IN { + type hint; + file "named.ca"; +}; + +include "/etc/named.rfc1912.zones"; +include "/etc/named.root.key"; + +//forward zone +zone "{{ env_baseDomain }}" IN { + type master; + file "/var/named/{{ env_metadata_name }}.db"; + allow-update { any; }; + allow-query { any; }; +}; + +//backward zone +zone "{{ bastion_split_ip.2 }}.{{ bastion_split_ip.1 }}.{{ bastion_split_ip.0 }}.in-addr.arpa" IN { + type master; + file "/var/named/{{ env_metadata_name }}.rev"; + allow-update { any; }; + allow-query { any; }; +}; + diff --git a/roles/dns/templates/dns.db.j2 b/roles/dns/templates/dns.db.j2 new file mode 100644 index 00000000..e965d6bd --- /dev/null +++ b/roles/dns/templates/dns.db.j2 @@ -0,0 +1,38 @@ +$TTL 86400 +@ IN SOA {{ env_bastion_name }}.{{ env_baseDomain }}. admin.{{ env_baseDomain }}.( + 2020021821 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) + +;Name Server / Bastion Information +@ IN NS {{ env_bastion_name }}.{{ env_baseDomain }}. + +;IP Address for Name Server +{{ env_bastion_name }} IN A {{ env_ip_bastion }} + +;entry for bootstrap host. +{{ env_bootstrap_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_bootstrap }} + +;entries for the master nodes +{{ env_control_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_control_0 }} +{{ env_control_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_control_1 }} +{{ env_control_2_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_control_2 }} + +;entries for the worker nodes +{{ env_compute_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_compute_0 }} +{{ env_compute_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_compute_1 }} + +;entry of your load balancer +haproxy IN A {{ env_ip_bastion }} + +;The api identifies the IP of your load balancer. +api.{{ env_metadata_name }} IN CNAME haproxy.{{ env_baseDomain }}. +api-int.{{ env_metadata_name }} IN CNAME haproxy.{{ env_baseDomain }}. + +;The wildcard also identifies the load balancer. +*.apps.{{ env_metadata_name }} IN CNAME haproxy.{{ env_baseDomain }}. + +;EOF \ No newline at end of file diff --git a/roles/dns/templates/dns.rev.j2 b/roles/dns/templates/dns.rev.j2 new file mode 100644 index 00000000..8a70509e --- /dev/null +++ b/roles/dns/templates/dns.rev.j2 @@ -0,0 +1,24 @@ +$TTL 86400 +@ IN SOA {{ env_bastion_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. admin.{{ env_metadata_name }}.{{ env_baseDomain }} ( + 2020011800 ;Serial + 3600 ;Refresh + 1800 ;Retry + 604800 ;Expire + 86400 ;Minimum TTL +) +;Name Server Information +@ IN NS {{ env_bastion_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. +{{ env_bastion_name }} IN A {{ env_ip_bastion }} + +;Reverse lookup for Name Server +{{ bastion_split_ip.3 }} IN PTR {{ env_bastion_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. + +;PTR Record IP address to Hostname +{{ cont_0_split_ip.3 }} IN PTR {{ env_control_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. +{{ cont_1_split_ip.3 }} IN PTR {{ env_control_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. +{{ cont_2_split_ip.3 }} IN PTR {{ env_control_2_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. +{{ comp_0_split_ip.3 }} IN PTR {{ env_compute_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. +{{ comp_1_split_ip.3 }} IN PTR {{ env_compute_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. +{{ bootstrap_split_ip.3 }} IN PTR {{ env_bootstrap_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. +{{ bastion_split_ip.3 }} IN PTR api-int.{{ env_metadata_name }}.{{ env_baseDomain }}. +{{ bastion_split_ip.3 }} IN PTR api.{{ env_metadata_name }}.{{ env_baseDomain }}. \ No newline at end of file From eb37c31358e779b33ae117d08b992b33d3c445ce Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:08:49 -0600 Subject: [PATCH 484/885] Removed setup_vault role from setup.yaml because it was unnecessary and created too much complexity Signed-off-by: Jacob Emery --- setup.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/setup.yaml b/setup.yaml index 8bd72e0c..a540e74d 100644 --- a/setup.yaml +++ b/setup.yaml @@ -5,12 +5,6 @@ connection: local become: false gather_facts: yes - vars_prompt: - - name: vault_pass - prompt: Please provide a secure password to be used for encrypting your sensitive files in Ansible - private: yes - unsafe: yes # this just means you can use special characters. The password is safe. roles: - - setup_vault - install_dependencies - set_inventory \ No newline at end of file From 6bd07677deb36a15960d2adee99fa630c74e3721 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:10:15 -0600 Subject: [PATCH 485/885] Added tags to macvtap tasks Signed-off-by: Jacob Emery --- roles/macvtap/tasks/main.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/macvtap/tasks/main.yaml b/roles/macvtap/tasks/main.yaml index 1ae4d245..9c8383e7 100644 --- a/roles/macvtap/tasks/main.yaml +++ b/roles/macvtap/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Set up macvtap bridge - tags: kvmhost + tags: kvmhost, macvtap community.libvirt.virt_net: command: define name: macvtap-net @@ -9,14 +9,14 @@ xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" - name: Start macvtap-net - tags: kvmhost + tags: kvmhost, macvtap community.libvirt.virt_net: autostart: yes command: start name: macvtap-net - name: Set autostart for macvtap-net - tags: kvmhost + tags: kvmhost, macvtap community.libvirt.virt_net: autostart: yes name: macvtap-net From 4040c043b3ec6dee7b6576460df7fbe0129f1231 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:11:17 -0600 Subject: [PATCH 486/885] Created a variable for network interface name in macvtap template that comes from env.yaml Signed-off-by: Jacob Emery --- roles/macvtap/templates/macvtap.xml.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/macvtap/templates/macvtap.xml.j2 b/roles/macvtap/templates/macvtap.xml.j2 index 388477ea..0a061025 100644 --- a/roles/macvtap/templates/macvtap.xml.j2 +++ b/roles/macvtap/templates/macvtap.xml.j2 @@ -1,6 +1,6 @@ macvtap-net - - + + From 03a0a2a95897cc44b608d65d602f8835f60992e6 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:12:04 -0600 Subject: [PATCH 487/885] Changed the name in install_packages task to help user know that this step may take a while Signed-off-by: Jacob Emery --- roles/install_packages/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index 762a3a15..aed54e5b 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -5,7 +5,7 @@ debug: var: packages -- name: installing required packages +- name: installing required packages. This may take a while, depending on the number of packages to be installed. tags: pkg ansible.builtin.package: name: "{{ item }}" From a63d1cc3749621b44f06efab4af93e6a1a27ee25 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:12:57 -0600 Subject: [PATCH 488/885] Added expect package to install_dependencies for use with ssh-copy-id expect script Signed-off-by: Jacob Emery --- roles/install_dependencies/tasks/main.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml index fde850c2..7a3c844d 100644 --- a/roles/install_dependencies/tasks/main.yaml +++ b/roles/install_dependencies/tasks/main.yaml @@ -11,6 +11,7 @@ - ansible-galaxy collection install ansible.posix - ansible-galaxy collection install community.libvirt - brew install openssh + - brew install expect when: ansible_facts['os_family'] == "Darwin" - name: install Ansible dependencies and packages @@ -21,5 +22,6 @@ - ansible-galaxy collection install ansible.posix - ansible-galaxy collection install community.libvirt - sudo dnf install openssh -y + - sudo dnf install expect -y when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" \ No newline at end of file From e8eff42f224657c4809e84034a9cc2f69fb6ca46 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:13:50 -0600 Subject: [PATCH 489/885] Added destination file names to kernel and initramfs pull down tasks in case the file is called something different Signed-off-by: Jacob Emery --- roles/prep_kvm_guests/tasks/main.yaml | 30 +++++++-------------------- 1 file changed, 7 insertions(+), 23 deletions(-) diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 687b21e9..982b5302 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -1,37 +1,21 @@ --- -- name: get rhcos qcow2 file +- name: Load in variables from env.yaml tags: kvm_host - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-qemu.s390x.qcow2.gz - dest: /var/lib/libvirt/images/ - force: yes - mode: '0755' - -- name: Unzip rhcos qcow2 files - tags: kvm_host - command: gunzip -f /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2.gz - -- name: get rhcos initramfs image - tags: kvm_host - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/lib/libvirt/images/ - mode: '0755' - force: yes + include_vars: env.yaml - name: get rhcos kernel tags: kvm_host get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/lib/libvirt/images/ + url: "{{ env_rhcos_kernel }}" + dest: /var/lib/libvirt/images/rhcos-live-kernel-s390x mode: '0755' force: yes -- name: get rhcos rootfs image +- name: get rhcos initramfs image tags: kvm_host get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/lib/libvirt/images/ + url: "{{ env_rhcos_initramfs }}" + dest: /var/lib/libvirt/images/rhcos-live-initramfs.s390x.img mode: '0755' force: yes \ No newline at end of file From d195b55ae9aa7f985a3fae3162b58f6d78a9ed0e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:14:57 -0600 Subject: [PATCH 490/885] Simplified and improved teardown task for teardown.yaml by using virsh destroy instead of shutdown. Overall decreased complexity and improved performance. Signed-off-by: Jacob Emery --- roles/teardown_vms/tasks/main.yaml | 35 ++++++------------------------ 1 file changed, 7 insertions(+), 28 deletions(-) diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml index 02bad35a..28982cb2 100644 --- a/roles/teardown_vms/tasks/main.yaml +++ b/roles/teardown_vms/tasks/main.yaml @@ -1,36 +1,15 @@ --- -- name: print the list of VMs given from teardown.yaml - debug: - var: vms - -- name: print the number of VMs given from teardown.yaml - debug: - var: "{{ vms | length }}" - -- name: register only running VMs - community.libvirt.virt: - command: list_vms - state: running - register: running_vms - -- name: print only running vms - debug: - var: running_vms.list_vms - -- name: shutdown running VMs +- name: Destroy running VMs. Expect errors if some VMs are already destroyed. community.libvirt.virt: name: "{{ item }}" - command: shutdown - loop: "{{ running_vms.list_vms }}" - when: - -- name: wait up to 5 minute for VMs to shutdown gracefully - pause: - minutes: 5 + command: destroy + loop: "{{ vms }}" + ignore_errors: yes -- name: undefine VMs given from teardown.yaml +- name: Undefine remaining existing VMs. Expect errors if some VMs are already undefined. community.libvirt.virt: name: "{{ item }}" command: undefine - loop: "{{ vms }}" \ No newline at end of file + loop: "{{ vms }}" + ignore_errors: yes \ No newline at end of file From 147ab8f8b5675607c70d7a0c16479d8eae51add1 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:15:57 -0600 Subject: [PATCH 491/885] Added tasks in ssh_key_gen to create a variable that is usable in ssh_copy_id role for use in the templating of the expect script. Signed-off-by: Jacob Emery --- roles/ssh_key_gen/tasks/main.yaml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index 92bc3493..c4f17be1 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -33,10 +33,22 @@ - "{{env_ssh_ans_name}}" - "{{env_ssh_ans_name}}.pub" +- name: create a vars file for key path + tags: keymastr + file: + state: touch + path: roles/ssh_copy_id/vars/path_to_key_pair.yaml + +- name: Save key path for use in ssh-copy-id role + tags: keymastr + lineinfile: + line: "path_to_key_pair: {{ssh_key_file_exists_check.results[1].invocation.module_args.path}}" + path: roles/ssh_copy_id/vars/path_to_key_pair.yaml + - name: Print results of ssh key pair files check tags: keymastr debug: - var: ssh_key_file_exists_check.results[1].stat.exists + var: ssh_key_file_exists_check.results[0].stat.exists - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key tags: keymastr From 7b27487684afc299732fb23df7d43b4b078c1825 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:19:31 -0600 Subject: [PATCH 492/885] Completely reworked create_bastion role to automate its configuration and installation completely using cloud-init Signed-off-by: Jacob Emery --- roles/create_bastion/tasks/main.yaml | 135 ++++++++++++++---- .../templates/cloud_init.cfg.j2 | 62 ++++++++ .../templates/network_config_static.cfg.j2 | 10 ++ 3 files changed, 178 insertions(+), 29 deletions(-) create mode 100644 roles/create_bastion/templates/cloud_init.cfg.j2 create mode 100644 roles/create_bastion/templates/network_config_static.cfg.j2 diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index 4ab20e45..f0b2d6a7 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -1,51 +1,128 @@ --- -#Uncomment once we have a RHEL license -#- name: download RHEL ISO image to KVM -# get_url: -# url: {{ RHEL ISO URL }} -# dest: /var/lib/libvirt/images/rhel83.iso -# mode: '0775' +- name: Load in variables from env.yaml + tags: kvm_host, bastionvm + include_vars: env.yaml -#- name: Unzip RHEL iso -# ansible.builtin.unarchive: -# src: https://mirror.redhat.com/rhel/latest/latest/RHEL-8.3.0-20201009.2-s390x-dvd1.iso -# dest: /var/lib/libvirt/images/rhel83.iso -# remote_src: yes +- name: enable cockpit console + tags: kvm_host, bastionvm + command: systemctl enable --now cockpit.socket - #- name: create install mount directory - # command: mkdir /rhcos-install +- name: remove working directory for idempotency + tags: kvm_host, bastionvm + file: + path: /var/lib/libvirt/images/tmp + state: absent - #- name: mount rhcos install directory - # command: mount -o loop /var/lib/libvirt/images/rhel83.iso /rhcos-install/ +- name: create working directory + tags: kvm_host, bastionvm + file: + path: /var/lib/libvirt/images/tmp + state: directory + mode: '0755' -# - name: virtualize bastion server -# command: qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhcos-qemu.s390x.qcow2 /var/lib/libvirt/images/bastion.qcow2 30G +- name: check if RHEL qcow2 is already downloaded + tags: kvm_host, bastionvm + stat: + path: /var/lib/libvirt/images/bastion.qcow2 + register: rhel_qcow2 -- name: Load in variables from env.yaml +- name: download RHEL qcow2 file tags: kvm_host, bastionvm - include_vars: env.yaml + get_url: + url: "{{ env_rhel_qcow2 }}" + dest: /var/lib/libvirt/images/bastion_base.qcow2 + mode: '0775' + when: rhel_qcow2.stat.exists == false + register: rhel_qcow2_download + +- name: remove snapshot for idempotency + tags: kvm_host, bastionvm + file: + path: /var/lib/libvirt/images/{{env_bastion_name}}-snapshot-cloudimg.qcow2 + state: absent + +- name: create base image + tags: kvm_host, bastionvm + command: "qemu-img create -b /var/lib/libvirt/images/bastion_base.qcow2 -f qcow2 /var/lib/libvirt/images/{{env_bastion_name}}-snapshot-cloudimg.qcow2 {{env_bastion_disk_size}}G" + register: qemu_create + +- name: print result of creation of base image + tags: kvm_host, bastionvm + debug: + var: qemu_create + +- name: get info about qemu image creation + tags: kvm_host, bastionvm + command: "qemu-img info /var/lib/libvirt/images/{{env_bastion_name}}-snapshot-cloudimg.qcow2" + register: qemu_info -- name: check if bastion already exists +- name: print output from qemu image creation information + tags: kvm_host, bastionvm + debug: + var: qemu_info + +- name: create instance-id + tags: kvm_host, bastionvm + shell: "echo \"instance-id: $(uuidgen || echo i-abcdefg)\" > /var/lib/libvirt/images/tmp/meta-data" + register: uuidgen + +- name: print output from uuidgen command + tags: kvm_host, bastionvm + debug: + var: uuidgen + +- name: Use cloud_init.cfg.j2 template to make user-data file + tags: kvm_host, bastionvm + template: + src: cloud_init.cfg.j2 + dest: /var/lib/libvirt/images/tmp/user-data + +- name: Use network_config_static.cfg.j2 template to make network-config file + tags: kvm_host, bastionvm + template: + src: network_config_static.cfg.j2 + dest: /var/lib/libvirt/images/tmp/network-config + +- name: generate iso file + tags: kvm_host, bastionvm + command: genisoimage -output /var/lib/libvirt/images/{{env_bastion_name}}-seed.img -volid cidata -joliet -rock /var/lib/libvirt/images/tmp/meta-data /var/lib/libvirt/images/tmp/network-config /var/lib/libvirt/images/tmp/user-data + register: gen_iso + +- name: print output from generating iso + tags: kvm_host, bastionvm + debug: + var: gen_iso + +- name: check if bastion already exists. Expect an ignored error if it doesn't exist. tags: kvm_host, bastionvm community.libvirt.virt: - name: bastion + name: "{{ env_bastion_name }}" command: status register: bastion_check ignore_errors: true -- name: print status of bastion +- name: Boot bastion tags: kvm_host, bastionvm - debug: - var: bastion_check + command: virt-install + --name {{env_bastion_name}} \ + --virt-type kvm --memory {{env_bastion_ram}} --vcpus {{env_bastion_vcpu}} \ + --boot hd \ + --disk path=/var/lib/libvirt/images/{{env_bastion_name}}-seed.img,device=cdrom \ + --disk path=/var/lib/libvirt/images/{{env_bastion_name}}-snapshot-cloudimg.qcow2,device=disk \ + --graphics none \ + --os-type Linux --os-variant rhel{{env_bastion_os_variant}} \ + --network network=macvtap-net \ + --noautoconsole \ + --noreboot + when: bastion_check.failed == true -- name: start bastion install +- name: Start bastion VM tags: kvm_host, bastionvm - command: virt-install --connect qemu:///system --name bastion --memory 8192 --vcpus 4 --disk size=30 --cdrom /var/lib/libvirt/images/rhel83.iso --accelerate --import --network network=macvtap-net --extra-args "ip={{env_ip_bastion}}::{{env_default_gateway}}:{{env_netmask}}:bastion::none nameserver={{env_dns_nameserver}} inst.repo=http://{{env_ftp}}/linux/s390x/boot/rel/8.3/ ipv6.disable=1" --location /rhcos-install --qemu-commandline="-drive if=none,id=ignition,format=raw,file=/var/lib/libvirt/images/rhel83.iso,readonly=on -device virtio-blk,serial=ignition,drive=ignition" --noautoconsole - when: bastion_check.failed == true + command: virsh start {{env_bastion_name}} -- name: README - Pausing for 60 minutes for you to complete the bastion installation of rhel OS with your specific installation's requirements. Please go to your kvm host at https://your-kvm-host-ip-address-here:9090 to complete installation. Once you see the login prompt on the bastion's terminal, come back here and press "ctrl+c" and then "c" on your localhost terminal where you are seeing this message. +- name: wait 3 minutes for automated bastion installation and configuration to complete. To monitor, use a web browser to go to https://your-kvm-host-ip-address-here:9090, sign in as root and use the password you set for env_vm_root_passwd in env.yaml, then go to the Virtual Machines tab, click on bastion's hostname. tags: kvm_host, bastionvm pause: - minutes: 60 + minutes: 3 when: bastion_check.failed == true \ No newline at end of file diff --git a/roles/create_bastion/templates/cloud_init.cfg.j2 b/roles/create_bastion/templates/cloud_init.cfg.j2 new file mode 100644 index 00000000..91c2af3d --- /dev/null +++ b/roles/create_bastion/templates/cloud_init.cfg.j2 @@ -0,0 +1,62 @@ +#cloud-config +hostname: {{env_bastion_name}} +fqdn: {{env_bastion_name}}.{{ env_metadata_name }}.{{ env_baseDomain }} +manage_etc_hosts: true +users: + - name: {{ env_vm_uid }} + sudo: ALL=(ALL) NOPASSWD:ALL + groups: adm,sys + home: /home/{{ env_vm_uid }} + shell: /bin/bash + lock_passwd: false +# allow both password auth and cert auth via ssh (console access can still login) +ssh_pwauth: true +disable_root: false +chpasswd: + list: | + root:{{ env_vm_root_passwd }} + {{ env_vm_uid }}:{{ env_vm_passwd }} + expire: False + +# https://cloudinit.readthedocs.io/en/latest/topics/examples.html#register-redhat-subscription +# https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html/installation_and_configuration_guide/setting_up_cloud_init +# Attach Red Hat subscription +#rh_subscription: +# username: '{{ env_rh_username }}' +# password: '{{ env_rh_passwd }}' +# auto-attach: True + +#growpart: +# mode: auto +# devices: ['/'] +#disk_setup: +# /dev/vdb: +# table_type: gpt +# layout: True +# overwrite: False +#fs_setup: +# - label: DATA_XFS +# filesystem: xfs +# device: '/dev/vdb' +# partition: auto +# #cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s +#mounts: +# # [ /dev/vdx, /mountpoint, fstype ] +# - [ LABEL=DATA_XFS, /dataxfs, xfs ] + +# 3rd col=fs type, 4th col=permissions, 5th=backup enabled, 6th=fsck order +mount_default_fields: [ None, None, "ext4", "defaults,noatime","0","2"] + +# every boot +bootcmd: + - [ sh, -c, 'echo ran cloud-init again at $(date) | sudo tee -a /root/bootcmd.log' ] + - [ sh, -c, 'echo $(date) instid=$INSTANCE_ID | sudo tee -a /root/bootcmd.log' ] + +# run once for network static IP fix +runcmd: + - [ sh, -c, 'sed -i s/BOOTPROTO=dhcp/BOOTPROTO=static/ /etc/sysconfig/network-scripts/ifcfg-eth0' ] + - [ sh, -c, 'ifdown eth0 && sleep 1 && ifup eth0 && sleep 1 && ip a' ] + - [ sh, -c, 'echo $(date) instid=$INSTANCE_ID | sudo tee -a /root/runcmd.log' ] + +# written to /var/log/cloud-init.log +final_message: "The system is finally up, after $UPTIME seconds" diff --git a/roles/create_bastion/templates/network_config_static.cfg.j2 b/roles/create_bastion/templates/network_config_static.cfg.j2 new file mode 100644 index 00000000..2b793692 --- /dev/null +++ b/roles/create_bastion/templates/network_config_static.cfg.j2 @@ -0,0 +1,10 @@ +version: 2 +ethernets: + eth0: + dhcp4: false + # default libvirt network + addresses: [ {{ env_ip_bastion }} ] + gateway4: {{ env_default_gateway }} + nameservers: + search: [ {{ env_baseDomain }} ] + addresses: [ {{ env_dns_nameserver }},{{ env_dns_forwarder }} ] \ No newline at end of file From e1e52a937be696e75928d3141bcbace3e5c873fe Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:20:42 -0600 Subject: [PATCH 493/885] Removed encrypt files and setup_vault roles because it was unnecessary and increased complexity. Signed-off-by: Jacob Emery --- roles/encrypt_files/tasks/main.yaml | 8 -------- roles/setup_vault/tasks/main.yaml | 25 ------------------------- 2 files changed, 33 deletions(-) delete mode 100644 roles/encrypt_files/tasks/main.yaml delete mode 100644 roles/setup_vault/tasks/main.yaml diff --git a/roles/encrypt_files/tasks/main.yaml b/roles/encrypt_files/tasks/main.yaml deleted file mode 100644 index 1fe8c80d..00000000 --- a/roles/encrypt_files/tasks/main.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -- name: encrypt env.yaml, skip if already encrypted - tags: setup - command: ansible-vault encrypt "{{ item }}" - loop: "{{ files_to_encrypt }}" - register: encrypt_files - ignore_errors: yes \ No newline at end of file diff --git a/roles/setup_vault/tasks/main.yaml b/roles/setup_vault/tasks/main.yaml deleted file mode 100644 index 5c5dc6f6..00000000 --- a/roles/setup_vault/tasks/main.yaml +++ /dev/null @@ -1,25 +0,0 @@ ---- - -- name: check to see if .vault_pass.txt exists already - stat: - path: .vault_pass.txt - register: vault_pass_check - -- name: delete .vault_pass.txt if it exists already to ensure idempotence - file: - path: .vault_pass.txt - state: absent - when: vault_pass_check.stat.exists - -# .vault_pass.txt is in the .gitignore file, and will therefore not be uploaded to Git should you do a push. -- name: fill .vault_pass.txt with user-provided password - lineinfile: - path: .vault_pass.txt - create: yes - line: "{{ vault_pass }}" - -- name: fill ansible.cfg with default location to find Ansible vault password file - ansible.builtin.lineinfile: - path: ansible.cfg - insertafter: '\[defaults\]' - line: vault_password_file=.vault_pass.txt \ No newline at end of file From cdb2b945600954728e12daa6f9449c048138e7bd Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:22:54 -0600 Subject: [PATCH 494/885] Created variables for bootsrap, control and compute node specifications. Removed pause after each node installed and added wait=-1 to automatically reboot and move to the next Signed-off-by: Jacob Emery --- roles/create_bootstrap/tasks/main.yaml | 29 +++---- roles/create_compute_nodes/tasks/main.yaml | 56 +++++++------- roles/create_control_nodes/tasks/main.yaml | 89 +++++++++++----------- 3 files changed, 89 insertions(+), 85 deletions(-) diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 8d2ea665..905227e9 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -7,7 +7,7 @@ - name: check if bootstrap already exists tags: bootstrap community.libvirt.virt: - name: bootstrap + name: "{{ env_bootstrap_name }}" command: status register: bootstrap_check ignore_errors: yes @@ -20,17 +20,18 @@ - name: boot bootstrap tags: bootstrap command: | - virt-install --name bootstrap - --disk size=100 --ram 16000 --cpu host --vcpus 4 - --os-type linux --os-variant rhel8.0 - --network network=macvtap-net - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_bootstrap}}::{{env_default_gateway}}:{{env_netmask}}:bootstrap:enc1:none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_dns_nameserver}}:8080/ignition/bootstrap.ign" - --noautoconsole --wait=-1 - when: bootstrap_check.failed == true - -- name: Pause 15 minutes for installation. Once you see the login prompt on the bootstrap's terminal. Press "ctrl+C" and then "C" on your localhost terminal where you are seeing this message. - tags: bootstrap - pause: - minutes: 15 + virt-install \ + --name {{env_bootstrap_name}} \ + --disk /var/lib/libvirt/images/{{env_bootstrap_name}}-bootstrap.qcow2,size={{ env_boot_disk_size }} \ + --ram {{ env_boot_ram }} \ + --cpu {{ env_boot_cpu }} \ + --vcpus {{ env_boot_vcpu }} \ + --os-type linux \ + --os-variant rhel{{ env_boot_os_variant }} \ + --network network=macvtap-net \ + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_bootstrap}}::{{env_default_gateway}}:{{env_netmask}}:{{env_bootstrap_name}}::none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/bootstrap.ign" \ + --graphics none \ + --wait=-1 \ + --noautoconsole when: bootstrap_check.failed == true \ No newline at end of file diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index 8069a6d0..2a0fffd1 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -7,7 +7,7 @@ - name: check if compute-0 already exists tags: compute community.libvirt.virt: - name: compute-0 + name: "{{ env_compute_0_name }}" command: status register: compute_0_check ignore_errors: yes @@ -20,7 +20,7 @@ - name: check if compute-1 already exists tags: compute community.libvirt.virt: - name: compute-1 + name: "{{ env_compute_1_name }}" command: status register: compute_1_check ignore_errors: yes @@ -33,35 +33,35 @@ - name: install CoreOS on compute-0 node tags: compute command: | - virt-install --name compute-0 - --disk size=100 --ram 16000 --cpu host --vcpus 4 - --os-type linux --os-variant rhel8.0 - --network network=macvtap-net - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_compute_0}}::{{env_default_gateway}}:{{env_netmask}}:compute-0:enc1:none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/worker.ign" - --noautoconsole --wait=-1 - when: compute_0_check.failed == true - -- name: pause 15 minutes - tags: compute - pause: - minutes: 15 + virt-install \ + --name {{env_compute_0_name}} \ + --disk size={{env_comp_disk_size}} \ + --ram {{env_comp_ram}} \ + --cpu {{env_comp_cpu}} \ + --vcpus {{env_comp_vcpu}} \ + --os-type linux \ + --os-variant rhel{{env_comp_os_variant}} \ + --network network=macvtap-net \ + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_compute_0}}::{{env_default_gateway}}:{{env_netmask}}:{{env_compute_0_name}}::none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/worker.ign" \ + --wait=-1 \ + --noautoconsole when: compute_0_check.failed == true - name: install CoreOS on compute-1 node tags: compute command: | - virt-install --name compute-1 - --disk size=100 --ram 16000 --cpu host --vcpus 4 - --os-type linux --os-variant rhel8.0 - --network network=macvtap-net - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_compute_1}}::{{env_default_gateway}}:{{env_netmask}}:compute-1:enc1:none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/worker.ign" - --noautoconsole --wait=-1 - when: compute_1_check.failed == true - -- name: pause 15 minutes - tags: compute - pause: - minutes: 15 + virt-install \ + --name {{env_compute_1_name}} \ + --disk size={{env_comp_disk_size}} \ + --ram {{env_comp_ram}} \ + --cpu {{env_comp_cpu}} \ + --vcpus {{env_comp_vcpu}} \ + --os-type linux \ + --os-variant rhel{{env_comp_os_variant}} \ + --network network=macvtap-net \ + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_compute_1}}::{{env_default_gateway}}:{{env_netmask}}:{{env_compute_1_name}}::none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/worker.ign" \ + --wait=-1 \ + --noautoconsole when: compute_1_check.failed == true \ No newline at end of file diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 4d182439..7a052a3c 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -7,7 +7,7 @@ - name: check if control-0 already exists tags: control community.libvirt.virt: - name: control-0 + name: "{{ env_control_0_name }}" command: status register: control_0_check ignore_errors: yes @@ -20,7 +20,7 @@ - name: check if control-1 already exists tags: control community.libvirt.virt: - name: control-1 + name: "{{ env_control_1_name }}" command: status register: control_1_check ignore_errors: yes @@ -33,7 +33,7 @@ - name: check if control-2 already exists tags: control community.libvirt.virt: - name: control-2 + name: "{{ env_control_2_name }}" command: status register: control_2_check ignore_errors: yes @@ -46,53 +46,56 @@ - name: install CoreOS on control-0 node tags: control command: | - virt-install --name control-0 - --disk size=100 --ram 16000 --cpu host --vcpus 4 - --os-type linux --os-variant rhel8.0 - --network network=macvtap-net - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_0}}::{{env_default_gateway}}:{{env_netmask}}:control-0:enc1:none:1500 nameserver={{env_ip_bastion}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" - --noautoconsole --wait=-1 - when: control_0_check.failed == true - -- name: pause 15 minutes - tags: control - pause: - minutes: 15 + virt-install \ + --name {{env_control_0_name}} \ + --disk size={{env_cont_disk_size}} \ + --ram {{env_cont_ram}} \ + --cpu {{env_cont_cpu}} \ + --vcpus {{env_cont_vcpu}} \ + --os-type linux \ + --os-variant rhel{{env_cont_os_variant}} \ + --network network=macvtap-net \ + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_0}}::{{env_default_gateway}}:{{env_netmask}}:{{env_control_0_name}}::none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" \ + --graphics none \ + --wait=-1 \ + --noautoconsole when: control_0_check.failed == true - name: install CoreOS on control-1 node tags: control command: | - virt-install --name control-1 - --disk size=100 --ram 16000 --cpu host --vcpus 4 - --os-type linux --os-variant rhel8.0 - --network network=macvtap-net - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_1}}::{{env_default_gateway}}:{{env_netmask}}:control-1:enc1:none:1500 nameserver={{env_ip_bastion}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" - --noautoconsole --wait=-1 - when: control_1_check.failed == true - -- name: pause 15 minutes - tags: control - pause: - minutes: 15 + virt-install \ + --name {{env_control_1_name}} \ + --disk size={{env_cont_disk_size}} \ + --ram {{env_cont_ram}} \ + --cpu {{env_cont_cpu}} \ + --vcpus {{env_cont_vcpu}} \ + --os-type linux \ + --os-variant rhel{{env_cont_os_variant}} \ + --network network=macvtap-net \ + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_1}}::{{env_default_gateway}}:{{env_netmask}}:{{env_control_1_name}}::none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" \ + --graphics none \ + --wait=-1 \ + --noautoconsole when: control_1_check.failed == true - name: install CoreOS on control-2 node tags: control command: | - virt-install --name control-2 - --disk size=100 --ram 16000 --cpu host --vcpus 4 - --os-type linux --os-variant rhel8.0 - --network network=macvtap-net - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_2}}::{{env_default_gateway}}:{{env_netmask}}:control-2:enc1:none:1500 nameserver={{env_ip_bastion}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" - --noautoconsole --wait=-1 - when: control_2_check.failed == true - -- name: pause 45 minutes to wait for bootkube.service to complete connecting control nodes. See README for more information. - tags: control - pause: - minutes: 45 - when: control_2_check.failed == true + virt-install \ + --name {{env_control_2_name}} \ + --disk size={{env_cont_disk_size}} \ + --ram {{env_cont_ram}} \ + --cpu {{env_cont_cpu}} \ + --vcpus {{env_cont_vcpu}} \ + --os-type linux \ + --os-variant rhel{{env_cont_os_variant}} \ + --network network=macvtap-net \ + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_2}}::{{env_default_gateway}}:{{env_netmask}}:{{env_control_2_name}}::none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" \ + --graphics none \ + --wait=-1 \ + --noautoconsole + when: control_2_check.failed == true \ No newline at end of file From 970d9fc44150048ad886caf108571a50994ec094 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:23:38 -0600 Subject: [PATCH 495/885] Templated haproxy config file Signed-off-by: Jacob Emery --- roles/haproxy/templates/haproxy.cfg.j2 | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2 index 9199ff87..e0a94b52 100644 --- a/roles/haproxy/templates/haproxy.cfg.j2 +++ b/roles/haproxy/templates/haproxy.cfg.j2 @@ -33,26 +33,26 @@ frontend stats listen api-server-6443 bind *:6443 mode tcp - server bootstrap bootstrap.{{env_metadata_name}}.{{env_baseDomain}}:6443 check inter 1s backup - server control-0 control-0.{{env_metadata_name}}.{{env_baseDomain}}:6443 check inter 1s - server control-1 control-1.{{env_metadata_name}}.{{env_baseDomain}}:6443 check inter 1s - server control-2 control-2.{{env_metadata_name}}.{{env_baseDomain}}:6443 check inter 1s + server {{ env_bootstrap_name }} {{env_ip_bootstrap}}:6443 check inter 1s backup + server {{env_control_0_name}} {{env_ip_control_0}}:6443 check inter 1s + server {{env_control_1_name}} {{env_ip_control_1}}:6443 check inter 1s + server {{env_control_2_name}} {{env_ip_control_2}}:6443 check inter 1s listen machine-config-server-22623 bind *:22623 mode tcp - server bootstrap bootstrap.{{env_metadata_name}}.{{env_baseDomain}}:22623 check inter 1s backup - server control-0 control-0.{{env_metadata_name}}.{{env_baseDomain}}:22623 check inter 1s - server control-1 control-1.{{env_metadata_name}}.{{env_baseDomain}}:22623 check inter 1s - server control-2 control-2.{{env_metadata_name}}.{{env_baseDomain}}:22623 check inter 1s + server {{ env_bootstrap_name }} {{env_ip_bootstrap}}:22623 check inter 1s backup + server {{env_control_0_name}} {{env_ip_control_0}}:22623 check inter 1s + server {{env_control_1_name}} {{env_ip_control_1}}:22623 check inter 1s + server {{env_control_2_name}} {{env_ip_control_2}}:22623 check inter 1s listen ingress-router-443 bind *:443 mode tcp balance source - server compute-0 compute-0.{{env_metadata_name}}.{{env_baseDomain}}:443 check inter 1s - server compute-1 compute-1.{{env_metadata_name}}.{{env_baseDomain}}:443 check inter 1s + server {{ env_compute_0_name }} {{env_ip_compute_0}}:443 check inter 1s + server {{ env_compute_1_name }} {{env_ip_compute_1}}:443 check inter 1s listen ingress-router-80 bind *:80 mode tcp balance source - server compute-0 compute-0.{{env_metadata_name}}.{{env_baseDomain}}:80 check inter 1s - server compute-1 compute-1.{{env_metadata_name}}.{{env_baseDomain}}:80 check inter 1s + server {{ env_compute_0_name }} {{env_ip_compute_0}}:80 check inter 1s + server {{ env_compute_1_name }} {{env_ip_compute_1}}:80 check inter 1s From 4b83c698031cd9b2aa1a7d9ab32385a4093db1ed Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:24:35 -0600 Subject: [PATCH 496/885] Changed tag for httpd configuration to match role name Signed-off-by: Jacob Emery --- roles/httpd/tasks/main.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index f74a8762..0ed10844 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -1,18 +1,19 @@ --- - name: Change permissive domain for httpd - tags: selinux,httpconf,bastion + tags: selinux,httpd,bastion selinux_permissive: name: httpd_t permissive: true - name: enable httpd - tags: httpconf,bastion + tags: httpd,bastion systemd: name: httpd enabled: yes - name: restart httpd + tags: httpd,bastion service: name: httpd state: restarted \ No newline at end of file From efa4cb945198578c811175379013ca992c3db4c0 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:26:07 -0600 Subject: [PATCH 497/885] Removed role for mounting rhel and moved it to create_bastion role Signed-off-by: Jacob Emery --- roles/mount_rhel/tasks/main.yaml | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 roles/mount_rhel/tasks/main.yaml diff --git a/roles/mount_rhel/tasks/main.yaml b/roles/mount_rhel/tasks/main.yaml deleted file mode 100644 index 62bb95c4..00000000 --- a/roles/mount_rhel/tasks/main.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- - -- name: Check to see if rhcos core install directory already exists - tags: keymastr - stat: - path: "/rhcos-install/" - register: rhcos_mount - -- name: Print results of rhcos core install directory check - tags: keymastr - debug: - var: rhcos_mount - -- name: Mount red hat core os install directory - tags: kvm_host - command: mount -o loop /var/lib/libvirt/images/rhel83.iso /rhcos-install/ - when: rhcos_mount is defined and rhcos_mount.stat.exists == false \ No newline at end of file From 92c5e247b4d713d09bb3d671b010a09e296368f0 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:28:46 -0600 Subject: [PATCH 498/885] Specified file name for rootfs image in case it's different. Removed kernel and initramfs downloads from get-ocp because they are needed on the KVM host, not the bastion. Changed the tag from getocp to get-ocp to match the role name. Signed-off-by: Jacob Emery --- roles/get-ocp/tasks/main.yaml | 30 ++++++++++-------------------- 1 file changed, 10 insertions(+), 20 deletions(-) diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 17959d8d..3ae23afe 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -10,30 +10,20 @@ path: /var/www/html/bin state: directory mode: '0755' - -- name: get ocp kernel + +- name: get rhcos rootfs tags: getocp,bastion get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-kernel-s390x - dest: /var/www/html/bin + url: "{{ env_rhcos_rootfs }}" + dest: /var/www/html/bin/rhcos-live-rootfs.s390x.img mode: '0755' force: yes -- name: get ocp initramfs +- name: Since ignition files deprecate after 24 hours, delete OCP download landing directory for idempotency. tags: getocp,bastion - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-initramfs.s390x.img - dest: /var/www/html/bin - mode: '0755' - force: yes - -- name: get ocp rootfs - tags: getocp,bastion - get_url: - url: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/latest/latest/rhcos-live-rootfs.s390x.img - dest: /var/www/html/bin - mode: '0755' - force: yes + file: + path: /ocpinst + state: absent - name: create OCP download landing directory tags: getocp,bastion @@ -44,14 +34,14 @@ - name: Unzip OCP Client tags: getocp,bastion ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + src: "{{ env_ocp_client }}" dest: /ocpinst/ remote_src: yes - name: Unzip OCP Installer tags: getocp,bastion ansible.builtin.unarchive: - src: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + src: "{{ env_ocp_installer }}" dest: /ocpinst/ remote_src: yes From 08bf693e6b29e2368bb6effca426b96a72f94197 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:30:33 -0600 Subject: [PATCH 499/885] Added tasks to fill ansible.cfg with root username and password. Added tasks to comment out attach_subscription and dns roles in main.yaml if corresponding booleans are False in env.yaml Signed-off-by: Jacob Emery --- roles/set_inventory/tasks/main.yaml | 107 ++++++++++------------------ 1 file changed, 38 insertions(+), 69 deletions(-) diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index 7d5338ab..fab1ef32 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -4,57 +4,6 @@ tags: setup include_vars: env.yaml -- name: Check for any undefined user-input variables in env.yaml. Fail if true. - tags: setup - fail: - msg: Required variable "{{item}}" has not been provided in env.yaml file. - when: vars[item] is undefined - loop: - - env_rh_email - - env_rh_pass - - env_apiVersion - - env_baseDomain - - env_hyperthreading_compute - - env_compute_name - - env_compute_count - - env_compute_arch - - env_hyperthreading_control - - env_control_name - - env_control_count - - env_control_arch - - env_metadata_name - - env_cidr - - env_host_prefix - - env_network_type - - env_service_network - - env_fips - - env_pullSecret - - env_ip_kvm_host - - env_ip_bastion - - env_ip_bootstrap - - env_ip_control_0 - - env_ip_control_1 - - env_ip_control_2 - - env_ip_compute_0 - - env_ip_compute_1 - - env_ssh_ans_name - - env_ssh_ans_pass - - env_ssh_ocp_comm - - env_dns_nameserver - - env_default_gateway - - env_netmask - - env_ftp - - env_comp_disk_size - - env_comp_ram - - env_comp_cpu - - env_comp_vcpu - - env_comp_os_variant - - env_cont_disk_size - - env_cont_ram - - env_cont_cpu - - env_cont_vcpu - - env_cont_os_variant - - name: Populate inventory file with ip variables from env.yaml tags: setup blockinfile: @@ -70,26 +19,15 @@ {{ env_ip_bootstrap }} [control_nodes] + {{ env_ip_control_0 }} + {{ env_ip_control_1 }} + {{ env_ip_control_2 }} [compute_nodes] + {{env_ip_compute_0}} + {{env_ip_compute_1}} state: present -- name: Populate inventory file with control IPs from env.yaml, the number of which depends on env_control_count - tags: setup - lineinfile: - path: inventory - line: "{{env_ip_control_{{ item }}}}" - insertafter: '\[control_nodes]' - with_sequence: count={{env_control_count}} - -- name: Populate inventory file with compute IPs from env.yaml, the number of which depends on env_compute_count - tags: setup - lineinfile: - path: inventory - line: "{{ env_ip_compute_{{ item }} }}" - insertafter: '\[compute_nodes]' - with_sequence: count={{env_compute_count}} - - name: check inventory setup tags: setup command: ansible-inventory --list @@ -107,14 +45,45 @@ insertafter: '\[defaults\]' line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} +- name: fill ansible.cfg with default ansible password + tags: setup + ansible.builtin.lineinfile: + path: ansible.cfg + insertafter: '\[defaults\]' + line: ansible_password={{env_vm_root_passwd}} + +- name: fill ansible.cfg with default ansible user + tags: setup + ansible.builtin.lineinfile: + path: ansible.cfg + insertafter: '\[defaults\]' + line: remote_user=root + - name: delete ocp_ssh_pub file if it exists already to ensure idempotence - tags: getocp, bastion + tags: setup file: state: absent path: roles/get-ocp/files/ocp_ssh_pub - name: create ocp_ssh_pub if it needs to be + tags: setup file: path: roles/get-ocp/files/ocp_ssh_pub mode: '0755' - state: touch \ No newline at end of file + state: touch + +- name: comment out auto-attach rhel subscription line in main.yaml if requested with env.yaml auto_attach_rhel_sub boolean + tags: setup + replace: + path: main.yaml + regexp: "- attach_subscription" + replace: "#- attach_subscription" + when: auto_attach_rhel_sub | bool == False + +- name: Comment out DNS setup role in main.yaml if requested with env.yaml env_dns_on_bastion boolean + tags: setup + replace: + regexp: "- dns" + path: main.yaml + replace: "#- dns" + when: env_dns_on_bastion | bool == False \ No newline at end of file From bca162b92b6634763cb64f6291c1c3b444bea1be Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:32:37 -0600 Subject: [PATCH 500/885] Added expect script to ssh_copy_id role to automatically fill prompted password for ssh Signed-off-by: Jacob Emery --- roles/ssh_copy_id/tasks/main.yaml | 44 +++++++++++++++-- .../ssh_copy_id/templates/ssh-copy-id.exp.j2 | 49 +++++++++++++++++++ roles/ssh_copy_id/vars/path_to_key_pair.yaml | 2 + 3 files changed, 90 insertions(+), 5 deletions(-) create mode 100644 roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 create mode 100644 roles/ssh_copy_id/vars/path_to_key_pair.yaml diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index 2930e673..8bad7d80 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,15 +1,49 @@ --- -- name: Load in variables from env.yaml +- name: Load in variables tags: ssh-copy-id - include_vars: env.yaml + include_vars: "{{item}}" + with_items: + - env.yaml + - roles/ssh_copy_id/vars/path_to_key_pair.yaml -- name: ssh copy id to remote host +- name: get ansible.pub key for check in next task tags: ssh-copy-id - command: "ssh-copy-id -o StrictHostKeyChecking=no -i ~/.ssh/{{ env_ssh_ans_name }}.pub -p 22 '{{ env_ssh_username }}@{{ ssh_target_ip }}'" + set_fact: + ans_pub_key: "{{ lookup('file', '~/.ssh/ansible.pub') }}" + +- name: print key_check + tags: ssh-copy-id + debug: + msg: "{{ ans_pub_key }}" + +- name: delete ssh key from known hosts if it already exists for idempotency + tags: ssh-copy-id + lineinfile: + path: "~/.ssh/known_hosts" + line: "{{ ssh_target_ip }}" + state: absent + delegate_to: localhost + +- name: Use template file to create expect script + tags: ssh-copy-id + template: + src: ssh-copy-id.exp.j2 + dest: roles/ssh_copy_id/files/ssh-copy-id-expect-pass.exp + force: yes + +- name: Copy SSH ID to remote host with pre-provided password + tags: ssh-copy-id + command: "expect roles/ssh_copy_id/files/ssh-copy-id-expect-pass.exp" register: ssh_copy +- name: delete templated expect script + tags: ssh-copy-id + file: + path: roles/ssh_copy_id/files/ssh-copy-id-expect-pass.exp + state: absent + - name: Print results of copying ssh id to remote host. - tags: ssh,ssh-copy-id + tags: ssh-copy-id debug: var: ssh_copy \ No newline at end of file diff --git a/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 new file mode 100644 index 00000000..b5055434 --- /dev/null +++ b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 @@ -0,0 +1,49 @@ +#!/usr/local/bin/expect -f +# +# This Expect script was generated by autoexpect on Wed Nov 10 22:21:39 2021 +# Expect and autoexpect were both written by Don Libes, NIST. +# +# Note that autoexpect does not guarantee a working script. It +# necessarily has to guess about certain things. Two reasons a script +# might fail are: +# +# 1) timing - A surprising number of programs (rn, ksh, zsh, telnet, +# etc.) and devices discard or ignore keystrokes that arrive "too +# quickly" after prompts. If you find your new script hanging up at +# one spot, try adding a short sleep just before the previous send. +# Setting "force_conservative" to 1 (see below) makes Expect do this +# automatically - pausing briefly before sending each character. This +# pacifies every program I know of. The -c flag makes the script do +# this in the first place. The -C flag allows you to define a +# character to toggle this mode off and on. + +set force_conservative 0 ;# set to 1 to force conservative mode even if + ;# script wasn't run conservatively originally +if {$force_conservative} { + set send_slow {1 .1} + proc send {ignore arg} { + sleep .1 + exp_send -s -- $arg + } +} + +# +# 2) differing output - Some programs produce different output each time +# they run. The "date" command is an obvious example. Another is +# ftp, if it produces throughput statistics at the end of a file +# transfer. If this causes a problem, delete these patterns or replace +# them with wildcards. An alternative is to use the -p flag (for +# "prompt") which makes Expect only look for the last line of output +# (i.e., the prompt). The -P flag allows you to define a character to +# toggle this mode off and on. +# +# Read the man page for more info. +# +# -Don + + +set timeout -1 +spawn ssh-copy-id -o StrictHostKeyChecking=no -i {{path_to_key_pair}} root@{{ ssh_target_ip }} +expect "*assword: " +send -- "{{env_vm_passwd}}\r" +expect eof \ No newline at end of file diff --git a/roles/ssh_copy_id/vars/path_to_key_pair.yaml b/roles/ssh_copy_id/vars/path_to_key_pair.yaml new file mode 100644 index 00000000..6b591c03 --- /dev/null +++ b/roles/ssh_copy_id/vars/path_to_key_pair.yaml @@ -0,0 +1,2 @@ +# Will be filled in by ssh-key-gen role + From 027b09c0c05e985ab71ae63e26fe68953e534270 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:33:30 -0600 Subject: [PATCH 501/885] Added files folder in get-ocp for OCP SSH public key. Signed-off-by: Jacob Emery --- roles/get-ocp/files/ocp_ssh_pub | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100755 roles/get-ocp/files/ocp_ssh_pub diff --git a/roles/get-ocp/files/ocp_ssh_pub b/roles/get-ocp/files/ocp_ssh_pub new file mode 100755 index 00000000..e69de29b From 9ca26091a2e75d90ed95d21539385d25f1966246 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 14:37:18 -0600 Subject: [PATCH 502/885] Removed inventories folder for now. May add back later. Signed-off-by: Jacob Emery --- inventories/inventory.yml | 1 - 1 file changed, 1 deletion(-) delete mode 100644 inventories/inventory.yml diff --git a/inventories/inventory.yml b/inventories/inventory.yml deleted file mode 100644 index e290e64f..00000000 --- a/inventories/inventory.yml +++ /dev/null @@ -1 +0,0 @@ -# Placeholder as we move to community directory structure. From 4e68cb440e95716eed3db9425a8d6274a668d818 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 15:25:29 -0600 Subject: [PATCH 503/885] Updated changelog to reflect major update. Added roadmap. Signed-off-by: Jacob Emery --- CHANGELOG.md | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 112424b5..60961c8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,3 +11,45 @@ All notable changes to this project will be documented in this file. [unreleased]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.0.1...HEAD [0.0.1]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.0.1 + +## [Automated Bastion Update] + +## [1.0.0] - 2021-11-24 + +### Summary +- Fully automated bastion installation and configuration using cloud-init + +### Added +- Added options in env.yaml for creating a DNS server on the bastion or not, and for automatically attaching Red Hat subscriptions +- Added variables for bootstrap, bastion, control and compute nodes' specifications in env.yaml +- Added node name variables in env.yaml +- Added variable for network interface name in env.yaml +- Added variable for DNS forwarder in env.yaml +- Added templating of DNS configuration files so they don't have to be pre-provided +- Added expect script to ssh_copy_id role so that the user doesn't have to type in ssh password when copying ssh key +- Added templating of haproxy config file +- Added a boot_teardown tag in teardown.yaml to automate the teardown of bootstrap node +### Modified +- Reworked create_bastion role to use cloud-init to fully automate configuration and installation of the bastion node +- Reworked teardown.yaml script to decrease complexity and work faster. +- Changed some tags to match their corresponding role names +- Lots of small improvements and tweaks +### Removed +- Removed encryption of env.yaml as it was unnecessary and increased complexity + + +[Automated Bastion Update]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.0.0...HEAD +[1.0.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.0.0 + +## [Roadmap] + +- Make ssh-copy-id role idempotent. +- Add role to check if DNS is working properly before continuing +- Add picture of finished infrastructure to README +- Add README’s for each role +- Create inventories and playbooks folders +- Integrate setup.yaml into main.yaml - no longer need to be separate because encryption role removed +- Automate verification steps. This is difficult because Ansible has to use bastion as a jumphost. +- Add option in env.yaml to create HAProxy on bastion or not (on different server) +- Further down the line, air-gapped install of OpenShift option +- Further down the line, add functionality to provision more than 3 control and 2 compute nodes From defaa59bcfd14f51ab978d75b54e20e2083d7885 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 15:27:34 -0600 Subject: [PATCH 504/885] Fixed some errors in changelog Signed-off-by: Jacob Emery --- CHANGELOG.md | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 60961c8b..ae2887e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,15 +6,12 @@ All notable changes to this project will be documented in this file. ## [0.0.1] - 2021-08-24 -### Added -- Added a changelog - [unreleased]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.0.1...HEAD [0.0.1]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.0.1 ## [Automated Bastion Update] -## [1.0.0] - 2021-11-24 +## 1.0.0 - 2021-11-24 ### Summary - Fully automated bastion installation and configuration using cloud-init @@ -37,11 +34,7 @@ All notable changes to this project will be documented in this file. ### Removed - Removed encryption of env.yaml as it was unnecessary and increased complexity - -[Automated Bastion Update]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.0.0...HEAD -[1.0.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.0.0 - -## [Roadmap] +## Roadmap - Make ssh-copy-id role idempotent. - Add role to check if DNS is working properly before continuing From 24ef9d178ae882a1febf9c5852b757f92a8ba46a Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 24 Nov 2021 15:29:18 -0600 Subject: [PATCH 505/885] Tweaked changelog again Signed-off-by: Jacob Emery --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ae2887e3..87721d4e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,9 @@ All notable changes to this project will be documented in this file. ### Removed - Removed encryption of env.yaml as it was unnecessary and increased complexity +[Automated Bastion Update]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.0.0...HEAD +[1.0.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.0.0 + ## Roadmap - Make ssh-copy-id role idempotent. From 5292a0c3e8a7c51510d4e7058f128710e5dcf5ad Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 29 Nov 2021 09:00:45 -0600 Subject: [PATCH 506/885] Added bullet to roadmap Signed-off-by: Jacob Emery --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 87721d4e..550cd742 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ All notable changes to this project will be documented in this file. - Add role to check if DNS is working properly before continuing - Add picture of finished infrastructure to README - Add README’s for each role +- Add update_vars role to create vars files from env.yaml / update if changed to increase effifiency and user understanding - Create inventories and playbooks folders - Integrate setup.yaml into main.yaml - no longer need to be separate because encryption role removed - Automate verification steps. This is difficult because Ansible has to use bastion as a jumphost. From c0c7c0094f50905ed8e07859708c320db81263ca Mon Sep 17 00:00:00 2001 From: ftmiranda Date: Tue, 30 Nov 2021 10:35:53 -0600 Subject: [PATCH 507/885] new --- env.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.yaml b/env.yaml index a523e1f2..c062b8f1 100644 --- a/env.yaml +++ b/env.yaml @@ -9,7 +9,7 @@ auto_attach_rhel_sub: True # make sure to also comment out the above variables i # To populate OpenShift install config file. env_apiVersion: v1 -env_baseDomain: +env_baseDomain: X env_hyperthreading_compute: Enabled env_compute_name: compute env_compute_count: 2 From d1a0dfb52c46e863c6231649968d901f0ec90ff8 Mon Sep 17 00:00:00 2001 From: ftmiranda Date: Tue, 30 Nov 2021 10:36:56 -0600 Subject: [PATCH 508/885] back to normal --- env.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.yaml b/env.yaml index c062b8f1..a523e1f2 100644 --- a/env.yaml +++ b/env.yaml @@ -9,7 +9,7 @@ auto_attach_rhel_sub: True # make sure to also comment out the above variables i # To populate OpenShift install config file. env_apiVersion: v1 -env_baseDomain: X +env_baseDomain: env_hyperthreading_compute: Enabled env_compute_name: compute env_compute_count: 2 From 76e2c1b713ead616c07a1c3515fdbe2a6bac3e9d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 30 Nov 2021 11:02:27 -0600 Subject: [PATCH 509/885] Removed unnecessary comments from ssh-copy-id expect script. Signed-off-by: Jacob Emery --- .../ssh_copy_id/templates/ssh-copy-id.exp.j2 | 32 ------------------- 1 file changed, 32 deletions(-) diff --git a/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 index b5055434..d132625e 100644 --- a/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 +++ b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 @@ -1,21 +1,4 @@ #!/usr/local/bin/expect -f -# -# This Expect script was generated by autoexpect on Wed Nov 10 22:21:39 2021 -# Expect and autoexpect were both written by Don Libes, NIST. -# -# Note that autoexpect does not guarantee a working script. It -# necessarily has to guess about certain things. Two reasons a script -# might fail are: -# -# 1) timing - A surprising number of programs (rn, ksh, zsh, telnet, -# etc.) and devices discard or ignore keystrokes that arrive "too -# quickly" after prompts. If you find your new script hanging up at -# one spot, try adding a short sleep just before the previous send. -# Setting "force_conservative" to 1 (see below) makes Expect do this -# automatically - pausing briefly before sending each character. This -# pacifies every program I know of. The -c flag makes the script do -# this in the first place. The -C flag allows you to define a -# character to toggle this mode off and on. set force_conservative 0 ;# set to 1 to force conservative mode even if ;# script wasn't run conservatively originally @@ -27,21 +10,6 @@ if {$force_conservative} { } } -# -# 2) differing output - Some programs produce different output each time -# they run. The "date" command is an obvious example. Another is -# ftp, if it produces throughput statistics at the end of a file -# transfer. If this causes a problem, delete these patterns or replace -# them with wildcards. An alternative is to use the -p flag (for -# "prompt") which makes Expect only look for the last line of output -# (i.e., the prompt). The -P flag allows you to define a character to -# toggle this mode off and on. -# -# Read the man page for more info. -# -# -Don - - set timeout -1 spawn ssh-copy-id -o StrictHostKeyChecking=no -i {{path_to_key_pair}} root@{{ ssh_target_ip }} expect "*assword: " From 099617e0c93bda0762365e2194e4d033692c062c Mon Sep 17 00:00:00 2001 From: ftmiranda Date: Tue, 30 Nov 2021 11:11:53 -0600 Subject: [PATCH 510/885] updates --- main.yaml | 1 + roles/check_dns/tasks/main.yaml | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 roles/check_dns/tasks/main.yaml diff --git a/main.yaml b/main.yaml index 762f72e3..8fc9e889 100644 --- a/main.yaml +++ b/main.yaml @@ -59,6 +59,7 @@ - set_selinux_permissive - set_firewall - dns + - check_dns - haproxy - httpd - get-ocp diff --git a/roles/check_dns/tasks/main.yaml b/roles/check_dns/tasks/main.yaml new file mode 100644 index 00000000..b1404b5b --- /dev/null +++ b/roles/check_dns/tasks/main.yaml @@ -0,0 +1,25 @@ +--- + +- name: check internal cluster DNS resolution + tags: check_dns,dns + command: "nslookup {{ item }}" + with_items: + - "{{ env_bastion_name }}.{{ env_baseDomain }}" + - "{{ env_bootstrap_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "{{ env_control_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "{{ env_control_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "{{ env_control_2_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "{{ env_compute_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "{{ env_compute_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "haproxy.{{ env_baseDomain }}" + - "api.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "api-int.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "test.apps.{{ env_metadata_name }}.{{ env_baseDomain }}" + +- name: check external DNS resolution from DNS forwarder + tags: check_dns,dns + command: "nslookup {{ item }}" + loop: + - www.google.com + - www.ibm.com + - www.redhat.com From 74ce572d55f24f7589938b0594dbdc3c8a6d5fcf Mon Sep 17 00:00:00 2001 From: ftmiranda Date: Tue, 30 Nov 2021 11:11:53 -0600 Subject: [PATCH 511/885] add dns_check role to make sure DNS is tested before OCP installation Signed-off-by: Filipe Miranda --- main.yaml | 1 + roles/check_dns/tasks/main.yaml | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 roles/check_dns/tasks/main.yaml diff --git a/main.yaml b/main.yaml index 762f72e3..8fc9e889 100644 --- a/main.yaml +++ b/main.yaml @@ -59,6 +59,7 @@ - set_selinux_permissive - set_firewall - dns + - check_dns - haproxy - httpd - get-ocp diff --git a/roles/check_dns/tasks/main.yaml b/roles/check_dns/tasks/main.yaml new file mode 100644 index 00000000..b1404b5b --- /dev/null +++ b/roles/check_dns/tasks/main.yaml @@ -0,0 +1,25 @@ +--- + +- name: check internal cluster DNS resolution + tags: check_dns,dns + command: "nslookup {{ item }}" + with_items: + - "{{ env_bastion_name }}.{{ env_baseDomain }}" + - "{{ env_bootstrap_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "{{ env_control_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "{{ env_control_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "{{ env_control_2_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "{{ env_compute_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "{{ env_compute_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "haproxy.{{ env_baseDomain }}" + - "api.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "api-int.{{ env_metadata_name }}.{{ env_baseDomain }}" + - "test.apps.{{ env_metadata_name }}.{{ env_baseDomain }}" + +- name: check external DNS resolution from DNS forwarder + tags: check_dns,dns + command: "nslookup {{ item }}" + loop: + - www.google.com + - www.ibm.com + - www.redhat.com From 07924ba322707517eb838cbd7a19e6ea23806ee4 Mon Sep 17 00:00:00 2001 From: Subhi Al Hasan Date: Tue, 30 Nov 2021 20:14:24 +0100 Subject: [PATCH 512/885] fail dns_check depending on the nslookup out --- roles/check_dns/tasks/main.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/check_dns/tasks/main.yaml b/roles/check_dns/tasks/main.yaml index b1404b5b..43209989 100644 --- a/roles/check_dns/tasks/main.yaml +++ b/roles/check_dns/tasks/main.yaml @@ -3,6 +3,9 @@ - name: check internal cluster DNS resolution tags: check_dns,dns command: "nslookup {{ item }}" + register: command_result + # fail step if the output contains "server can't find" + failed_when: '"server can" in command_result.stdout' with_items: - "{{ env_bastion_name }}.{{ env_baseDomain }}" - "{{ env_bootstrap_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" @@ -18,6 +21,9 @@ - name: check external DNS resolution from DNS forwarder tags: check_dns,dns + register: command_result + # fail step if the output contains "server can't find" + failed_when: '"server can" in command_result.stdout' command: "nslookup {{ item }}" loop: - www.google.com From 022350c8791344c12b68f20593b682b26a9bc521 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 30 Nov 2021 14:34:30 -0600 Subject: [PATCH 513/885] Clarified that errors from teardown script that result from VMs not existing will be ignored. Signed-off-by: Jacob Emery --- roles/teardown_vms/tasks/main.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml index 28982cb2..5aac979e 100644 --- a/roles/teardown_vms/tasks/main.yaml +++ b/roles/teardown_vms/tasks/main.yaml @@ -1,13 +1,13 @@ --- -- name: Destroy running VMs. Expect errors if some VMs are already destroyed. +- name: Destroy running VMs. Expect ignored errors if some VMs are already destroyed. community.libvirt.virt: name: "{{ item }}" command: destroy loop: "{{ vms }}" ignore_errors: yes -- name: Undefine remaining existing VMs. Expect errors if some VMs are already undefined. +- name: Undefine remaining existing VMs. Expect ignored errors if some VMs are already undefined. community.libvirt.virt: name: "{{ item }}" command: undefine From 17b7f8aec09cc539691f6c335719be540e982828 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 1 Dec 2021 13:13:55 -0600 Subject: [PATCH 514/885] Hyperlinked How-To's in README and added more information to users get OCP pull secret and qcow2 file Signed-off-by: Jacob Emery --- README.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 06c42984..6b64aafa 100644 --- a/README.md +++ b/README.md @@ -21,11 +21,12 @@ for local workstation running Ansible * MacOS X ## Pre-Requisites: -* Red Hat OpenShift Container Platform license or free trial (includes licenses for RHEL and CoreOS) -* Python3 intalled on your local computer (how-to: https://realpython.com/installing-python/) -* Ansible installed on your local computer (how-to: https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) +* An acvtive Red Hat account ([Sign Up](https://www.redhat.com/wapps/ugc/register.html?_flowId=register-flow&_flowExecutionKey=e1s1)) +* A [license](https://access.redhat.com/products/red-hat-openshift-container-platform/) or [free trial](https://www.redhat.com/en/technologies/cloud-computing/openshift/try-it) of Red Hat OpenShift Container Platform for IBM Z systems - s390x architecture (OCP license comes with licenses for RHEL and CoreOS) +* Python3 intalled on your local computer ([how-to](https://realpython.com/installing-python/)) +* Ansible installed on your local computer ([how-to](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html)) * If you are using Mac OS X for your localhost workstation to run Ansible, you also need to have: - * homebrew package manager installed (how-to: https://brew.sh/) + * homebrew package manager installed ([how-to](https://brew.sh/)) * Updated software for command line tools (run "softwareupdate --all --install" in your terminal) * Access to a logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: * 6 Integrated Facilities for Linux (IFLs) with SMT2 enabled @@ -41,12 +42,12 @@ for local workstation running Ansible * **Step 1: Get This Repository** * Navigate to a folder where you would like to store this project in your terminal * Run "git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git" -* **Step 2: Get OpenShift Information** - * In a web browser, navigate to https://console.redhat.com/openshift/install/ibmz/user-provisioned - * Copy the OpenShift pull secret (for use in the next step) +* **Step 2: Get Red Hat Info** + * In a web browser, navigate to Red Hat's [customer portal](https://access.redhat.com/products/red-hat-enterprise-linux/), click on the 'Download Latest' button, use the drop-down to select Red Hat Enterprise Linux for IBM z Systems, select your desired version, make sure 'Architcture' is 's390x', and then scroll down to 'Red Hat Enterprise Linux X.X Update KVM Guest Image' and right click on 'Download Now' and copy link. Paste it into [env.yaml](env.yaml) as the variable 'env_rhel_qcow2'. + * In a web browser, navigate to the Red Hat [console](https://console.redhat.com/openshift/install/ibmz/user-provisioned) and copy the OpenShift pull secret and paste it into [env.yaml](env.yaml) as the variable 'env_pullSecret'. * **Step 3: Set Variables** * In a text editor of your choice, open [env.yaml](env.yaml) - * Fill out the variables to match your specific installation. Many variables are pre-filled with defaults. For a default installation, you only need to fill in the empty variables. + * Fill out the remaining variables to match your specific installation. Many variables are pre-filled with defaults. For a default installation, you only need to fill in the empty variables. * **Step 4: Setup Script** * Navigate to the folder where you cloned the Git Repository * Run "ansible-playbook setup.yaml --ask-become-pass" From de6d4beb0035659d0f0d8d519f9fb593acd2c61e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 1 Dec 2021 13:18:00 -0600 Subject: [PATCH 515/885] Made small edit to README Signed-off-by: Jacob Emery --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 6b64aafa..1716d671 100644 --- a/README.md +++ b/README.md @@ -33,8 +33,7 @@ for local workstation running Ansible * 85 GB of RAM * 1 TB of disk space * On that LPAR, Red Hat Enterprise Linux (RHEL) with networking configured and a root password set -* On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses -* Fully Qualified Domain Names (FQDN) names for all IPv4 addresses +* On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses with Fully Qualified Domain Names (FQDN) ## Installation Instructions: From 23ae031b82807b2c92211580e462f525be4a2d28 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 1 Dec 2021 14:42:20 -0600 Subject: [PATCH 516/885] Tweaked tags section to clarify teardown usage Signed-off-by: Jacob Emery --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1716d671..f5429672 100644 --- a/README.md +++ b/README.md @@ -123,7 +123,7 @@ List of Tags (in alphabetical order): * create_nodes = tasks from the second set of kvm plays * dns = configuration of DNS server on bastion * firewall = for tasks related to firewall settings -* full_teardown = for use with teardown.yaml to bring down all VMs +* full_teardown = for use with teardown.yaml to bring down all KVM guests. To continue from that point, run the main playbook with "--tags 'bastionvm,bastion,create_nodes'" * getocp = download of OCP installer and http server configuration * haproxy = configuration of haproxy on bastion kvm guest * httpd = configuration of httpd server on bastion kvm guest @@ -131,7 +131,7 @@ List of Tags (in alphabetical order): * kvm_host = tasks to apply to KVM host for OCP cluster * kvm_prep = tasks from the first set of kvm plays * localhost = for tasks that apply to the local machine running Ansible -* partial_teardown = for use with teardown.yaml to bring down all VMs except the bastion +* partial_teardown = for use with teardown.yaml to bring down all KVM guests except the bastion. To continue from that point, run the main playbook with "--tags 'bastion,create_nodes'" * pkg = install and update all packages * prep = run all setup playbooks * selinux = for tasks related to SELinux settings From 7ffaf22b89b8dce577d2fa5803ac5538952a9d85 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 1 Dec 2021 14:49:22 -0600 Subject: [PATCH 517/885] Reworked DNS forward config template to specify that the load balancer is on the bastion Signed-off-by: Jacob Emery --- roles/check_dns/tasks/main.yaml | 1 - roles/dns/templates/dns.db.j2 | 9 +++------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/roles/check_dns/tasks/main.yaml b/roles/check_dns/tasks/main.yaml index 43209989..62d3a253 100644 --- a/roles/check_dns/tasks/main.yaml +++ b/roles/check_dns/tasks/main.yaml @@ -14,7 +14,6 @@ - "{{ env_control_2_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" - "{{ env_compute_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" - "{{ env_compute_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" - - "haproxy.{{ env_baseDomain }}" - "api.{{ env_metadata_name }}.{{ env_baseDomain }}" - "api-int.{{ env_metadata_name }}.{{ env_baseDomain }}" - "test.apps.{{ env_metadata_name }}.{{ env_baseDomain }}" diff --git a/roles/dns/templates/dns.db.j2 b/roles/dns/templates/dns.db.j2 index e965d6bd..4976c66c 100644 --- a/roles/dns/templates/dns.db.j2 +++ b/roles/dns/templates/dns.db.j2 @@ -25,14 +25,11 @@ $TTL 86400 {{ env_compute_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_compute_0 }} {{ env_compute_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_compute_1 }} -;entry of your load balancer -haproxy IN A {{ env_ip_bastion }} - ;The api identifies the IP of your load balancer. -api.{{ env_metadata_name }} IN CNAME haproxy.{{ env_baseDomain }}. -api-int.{{ env_metadata_name }} IN CNAME haproxy.{{ env_baseDomain }}. +api.{{ env_metadata_name }} IN CNAME {{ env_bastion_name }}.{{ env_baseDomain }}. +api-int.{{ env_metadata_name }} IN CNAME {{ env_bastion_name }}.{{ env_baseDomain }}. ;The wildcard also identifies the load balancer. -*.apps.{{ env_metadata_name }} IN CNAME haproxy.{{ env_baseDomain }}. +*.apps.{{ env_metadata_name }} IN CNAME {{ env_bastion_name }}.{{ env_baseDomain }}. ;EOF \ No newline at end of file From 0b7ad8b1f9c4d3e1ad46bbf735ff0dd544c21a13 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 1 Dec 2021 14:52:39 -0600 Subject: [PATCH 518/885] Typo in README Signed-off-by: Jacob Emery --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f5429672..b8492a41 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ for local workstation running Ansible * MacOS X ## Pre-Requisites: -* An acvtive Red Hat account ([Sign Up](https://www.redhat.com/wapps/ugc/register.html?_flowId=register-flow&_flowExecutionKey=e1s1)) +* An Red Hat account ([Sign Up](https://www.redhat.com/wapps/ugc/register.html?_flowId=register-flow&_flowExecutionKey=e1s1)) * A [license](https://access.redhat.com/products/red-hat-openshift-container-platform/) or [free trial](https://www.redhat.com/en/technologies/cloud-computing/openshift/try-it) of Red Hat OpenShift Container Platform for IBM Z systems - s390x architecture (OCP license comes with licenses for RHEL and CoreOS) * Python3 intalled on your local computer ([how-to](https://realpython.com/installing-python/)) * Ansible installed on your local computer ([how-to](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html)) From 458b407d40dd0b6757268ddedf07678c5b30a3ec Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 1 Dec 2021 15:02:43 -0600 Subject: [PATCH 519/885] Fixed typo in README Signed-off-by: Jacob Emery --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b8492a41..67755f7b 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ for local workstation running Ansible * MacOS X ## Pre-Requisites: -* An Red Hat account ([Sign Up](https://www.redhat.com/wapps/ugc/register.html?_flowId=register-flow&_flowExecutionKey=e1s1)) +* A Red Hat account ([Sign Up](https://www.redhat.com/wapps/ugc/register.html?_flowId=register-flow&_flowExecutionKey=e1s1)) * A [license](https://access.redhat.com/products/red-hat-openshift-container-platform/) or [free trial](https://www.redhat.com/en/technologies/cloud-computing/openshift/try-it) of Red Hat OpenShift Container Platform for IBM Z systems - s390x architecture (OCP license comes with licenses for RHEL and CoreOS) * Python3 intalled on your local computer ([how-to](https://realpython.com/installing-python/)) * Ansible installed on your local computer ([how-to](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html)) From a9f4e37a342f03c010154b3057e9a5d429a46153 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 1 Dec 2021 17:09:44 -0600 Subject: [PATCH 520/885] Deleted unnecessary comment Signed-off-by: Jacob Emery --- main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.yaml b/main.yaml index 8fc9e889..1e2d0f5d 100644 --- a/main.yaml +++ b/main.yaml @@ -54,7 +54,7 @@ roles: - check_ssh - attach_subscription - - install_packages #RHEL subscription already attached + - install_packages - ssh-ocp-key-gen # SSH key for bastion to connect to nodes - set_selinux_permissive - set_firewall From 217d259d6d68bd4a750dcebaebd3be2bc8945f84 Mon Sep 17 00:00:00 2001 From: Filipe Miranda Date: Thu, 2 Dec 2021 08:26:53 -0600 Subject: [PATCH 521/885] improvment to the check_dns --- roles/check_dns/tasks/main.yaml | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/roles/check_dns/tasks/main.yaml b/roles/check_dns/tasks/main.yaml index 62d3a253..9f5361c3 100644 --- a/roles/check_dns/tasks/main.yaml +++ b/roles/check_dns/tasks/main.yaml @@ -1,12 +1,14 @@ --- +- name: create list of IP addresses from env.yaml + tags: check_dns + set_fact: + ip_from_env: ['{{ env_ip_bastion }}','{{ env_ip_bootstrap }}','{{ env_ip_control_0 }}','{{ env_ip_control_1 }}','{{ env_ip_control_2 }}','{{ env_ip_compute_0 }}','{{ env_ip_compute_1 }}','{{ env_ip_bastion }}','{{ env_ip_bastion }}','{{ env_ip_bastion }}'] + - name: check internal cluster DNS resolution tags: check_dns,dns - command: "nslookup {{ item }}" - register: command_result - # fail step if the output contains "server can't find" - failed_when: '"server can" in command_result.stdout' - with_items: + shell: "dig +short {{ item }} | tail -n1" + loop: - "{{ env_bastion_name }}.{{ env_baseDomain }}" - "{{ env_bootstrap_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" - "{{ env_control_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" @@ -17,7 +19,12 @@ - "api.{{ env_metadata_name }}.{{ env_baseDomain }}" - "api-int.{{ env_metadata_name }}.{{ env_baseDomain }}" - "test.apps.{{ env_metadata_name }}.{{ env_baseDomain }}" - + register: internal_check + failed_when: ip_from_env[i] != internal_check.stdout + loop_control: + extended: yes + index_var: i + - name: check external DNS resolution from DNS forwarder tags: check_dns,dns register: command_result @@ -27,4 +34,4 @@ loop: - www.google.com - www.ibm.com - - www.redhat.com + - www.redhat.com \ No newline at end of file From 1ee58b92587722e0c8818d2f5e1d52b411df9f1c Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 10:47:23 -0600 Subject: [PATCH 522/885] Removed all verification steps as they are now automated. Signed-off-by: Jacob Emery --- README.md | 108 +++++++++++++++++++++++------------------------------- 1 file changed, 46 insertions(+), 62 deletions(-) diff --git a/README.md b/README.md index 67755f7b..10dfa724 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ * [Instructions](#Installation-Instructions) * [Setup](#Setup) * [Provisioning](#Provisioning) -* [Verification](#Verification) +* [Install Complete](*Install-Complete) * [Teardown](#Teardown) * [Tags](#Tags) @@ -42,7 +42,7 @@ for local workstation running Ansible * Navigate to a folder where you would like to store this project in your terminal * Run "git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git" * **Step 2: Get Red Hat Info** - * In a web browser, navigate to Red Hat's [customer portal](https://access.redhat.com/products/red-hat-enterprise-linux/), click on the 'Download Latest' button, use the drop-down to select Red Hat Enterprise Linux for IBM z Systems, select your desired version, make sure 'Architcture' is 's390x', and then scroll down to 'Red Hat Enterprise Linux X.X Update KVM Guest Image' and right click on 'Download Now' and copy link. Paste it into [env.yaml](env.yaml) as the variable 'env_rhel_qcow2'. + * In a web browser, navigate to Red Hat's [customer portal](https://access.redhat.com/products/red-hat-enterprise-linux/), click on the 'Download Latest' button, use the drop-down to select Red Hat Enterprise Linux for IBM z Systems, select your desired version, make sure 'Architcture' is 's390x', and then scroll down to 'Red Hat Enterprise Linux X.X Update KVM Guest Image' and click on 'Download Now'. See where it downloads, copy the path and paste it into [env.yaml](env.yaml) as the variable 'env_rhel_qcow2'. * In a web browser, navigate to the Red Hat [console](https://console.redhat.com/openshift/install/ibmz/user-provisioned) and copy the OpenShift pull secret and paste it into [env.yaml](env.yaml) as the variable 'env_pullSecret'. * **Step 3: Set Variables** * In a text editor of your choice, open [env.yaml](env.yaml) @@ -56,51 +56,28 @@ for local workstation running Ansible * If you are not already there, navigate to the folder where you cloned the Git repository in your terminal. * Run the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Watch Ansible as it completes the installation, correcting errors if they arise. - * If all goes smoothly, this will take approximately 25 minutes. * To look at what is running in detail, open roles/'task-you-want-to-inspect'/tasks/main.yaml * If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of the main playbook run, use [tags](#Tags). See the [main playbook](main.yaml) to determine what part you would like to run and use those tags when running the [main playbook](main.yaml). Example: "ansible-playbook main.yaml --ask-become-pass --tags 'get-ocp,create_nodes'" * Note: we chose to not edit the user's .bash_profile/.bashrc with an automatic ssh-add command because that would change the user's local workstation set-up in a way that was potentially undesirable. Therefore, if you close out your terminal session in the middle of provisioning, you will need to run "ansible-playbook main.yaml --tags ssh-agent" before doing anything else. -### Verification -* **Step 6: Bootkube Verification** - * SSH into the bastion (run "ssh root@your-bastion-IP-address-here" in the terminal) - * Then SSH into the bootstrap as core ("ssh core@your-bootstrap-IP-address-here") - * Run "journalctl -u bootkube.service" to watch the bootstrap connect to the control nodes (hold spacebar to get to the bottom of the log). Press "q" to exit the log. - * Expect lots of errors in this log, as the control nodes may not be entirely up yet. - * This may take some time. Check in occassionally by running the above command again to update the log. - * Once all control nodes are connected, the end of the bootkube log will read "bootkube.service complete". -* **Step 7: Export Kube Config** - * Disconnect from bootstrap (Press "Ctrl+d") - * Make sure you are connected to the bastion as root (if not, run "ssh root@your-bastion-IP-address-here") - * Then run "export KUBECONFIG=/ocpinst/auth/kubeconfig" - * Check that worked by running "oc whoami", which should return "system:admin" - * If this doesn't work, just give it some time for the control nodes to connect and try again. - * If you are getting "oc: command not found", disconnect from the bastion (press "Ctrl+d") and repeat this step. -* **Step 8: Approve Certificates** - * From the bastion as root user (as above) run "oc get csr". This will bring up a list of certificates that need approval. - * To approve all certificates at the same time, run the following command: - "for i in \`oc get csr --no-headers | grep -i pending | awk '{ print $1 }\'`; do oc adm certificate approve $i; done" - * If you are viewing this file outside of GitHub, remove \ characters in the above command before running. The slashes are escape characters for formatting on GitHub. - * It may take some time for all the certificates that need approval to show up. Keep running "oc get csr" to check to make sure that no new certificates have appeared since you last approved them. - * Once all certificates read "Approved, Issued". You're ready for the next step. -* **Step 9: Wait for Cluster To Become Operational** - * From the bastion, as root user (as above) check node status by running: "oc get nodes". All nodes need to be "Ready" in the "Status" column. - * From the bastion, as root user (as above) run "oc get clusteroperators". All cluster operators need to be "True" in the "Available" column. If there are messages regarding revisions, give it some time and check back in a few minutes by running the same command again. - * This may take some time, especially the cluster operators. Run the above two bullets' commmands to check-in occasionally. - * Once all nodes are ready and cluster operators are available with no messages, you are ready to continue to the next step. -* **Step 10: Verify OpenShift Installation** - * Run "/ocpinst/openshift-install --dir=/ocpinst wait-for install-complete" - * If installation is ready, running the above command will give you some information about how to log into the OpenShift cluster's dashboard. - * Copy the provided URL into a web browser using the provided username (kubeadmin) and password for first time sign-on. - * Congratulations! Your OpenShift cluster provisioning and installation is now complete. +### Install Complete +* **Step 6: First-Time Login** + * The last step of the main playbook will print a URL, username and temporary password for first-time login. + * Use a web-browser to type in the URL, which should take you to a sign-in page. Use the provided credentials to sign in. + * Congratulations! Your OpenShift cluster installation is now complete. -* Optional: Leave the bootstrap running as is, shut it down and destroy it (Run "ansible-playbook teardown.yaml --ask-become-pass --tags boot_teardown"), or convert it into another compute node. +## Troubleshooting: +If you encounter errors while running the main playbook, there are a few things you can do: +*1)* Check your variables in env.yaml +*2)* Inspect the task that failed by inspecting the task in roles/role_name/tasks_main.yaml +*3)* Re-Run the role indivually with with [tags](#Tags) +*4)* Teardown problematic KVM guests with [teardown](#Teardown) scripts and start again with [tags](#Tags) ## Teardown: * If you would like to teardown your VMs, first determine whether you would like to do a full, partial, or bootstrap teardown, specified below. * Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full_teardown" * Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial_teardown" -* Bootstrap: The bootstrap is not needed after OpenShift fully installs. To easily tear it down, run: "ansible-playbook teardown.yaml --ask-become-pass --tags boot_teardown" +* Bootstrap: The bootstrap is not needed after OpenShift fully installs and will be automatically brough down in the process of running the main playbook. To easily tear it down, run: "ansible-playbook teardown.yaml --ask-become-pass --tags boot_teardown" * If you have provisioned more than the minimum number of nodes for your installation, add them to the respective list found in roles/teardown_vms/tasks/main.yaml. * Once you run the full teardown, to start the main.yaml playbook back from that point, run: @@ -114,28 +91,35 @@ If the process fails in error, you should be able to run the same shell command * ansible-playbook main.yaml --ask-become-pass --tags 'bastion,get-ocp' (for multiple tags) List of Tags (in alphabetical order): -* bastion = configuration of bastion for OCP -* bastionvm = creation of Bastion KVM guest -* bootstrap = creation of Boostrap KVM guest -* boot_teardown = for use with teardown.yaml to bring down the bootstrap -* compute = creation of the Compute nodes KVM guests -* control = creation of the Control nodes KVM guests -* create_nodes = tasks from the second set of kvm plays -* dns = configuration of DNS server on bastion -* firewall = for tasks related to firewall settings -* full_teardown = for use with teardown.yaml to bring down all KVM guests. To continue from that point, run the main playbook with "--tags 'bastionvm,bastion,create_nodes'" -* getocp = download of OCP installer and http server configuration -* haproxy = configuration of haproxy on bastion kvm guest -* httpd = configuration of httpd server on bastion kvm guest -* keymastr = ssh key configuration and testing -* kvm_host = tasks to apply to KVM host for OCP cluster -* kvm_prep = tasks from the first set of kvm plays -* localhost = for tasks that apply to the local machine running Ansible -* partial_teardown = for use with teardown.yaml to bring down all KVM guests except the bastion. To continue from that point, run the main playbook with "--tags 'bastion,create_nodes'" -* pkg = install and update all packages -* prep = run all setup playbooks -* selinux = for tasks related to SELinux settings -* setup = first-time setup of ansible -* ssh-agent = setting up ansible ssh-agent -* ssh-copy-id = for copying ssh id -* subscription = Attach Red Hat Subscription \ No newline at end of file +* approve_certs = Tasks for approve_certs role +* bastion = Configuration of bastion +* bastionvm = Creation of Bastion KVM guest +* bootstrap = Creation of Boostrap KVM guest +* boot_teardown = Use with teardown.yaml to bring down the bootstrap +* check_nodes = Tasks for check_nodes role +* check_dns = Check DNS resolution +* compute = Creation of the compute nodes +* control = Creation of the control nodes +* create_nodes = Second set of KVM host's plays +* dns = Configuration of DNS server on bastion +* firewall = Configuration of firewall +* full_teardown = Use with teardown.yaml to bring down all KVM guests +* get-ocp = Prepare bastion for OCP +* haproxy = Configuration of load balancer on bastion +* httpd = Configuration of Apache server on bastion +* ssh-keygen = SSH key configuration and testing +* kvm_host = All KVM host tasks +* kvm_prep = First set of KVM host's tasks +* localhost = Tasks that apply to the local machine running Ansible +* partial_teardown = Use with teardown.yaml to bring down all VMs except bastion +* pkg = Install and update packages +* selinux = Tasks related to SELinux settings +* setup = First set of setup tasks on the localhost +* ssh = All SSH tasks +* ssh-agent = Setting up SSH agent +* ssh-copy-id = Copying SSH key to target +* subscription = Attach Red Hat Subscription +* verification = All OpenShift cluster verification tasks +* wait_for_bootstrap = Tasks for to wait_for_bootstrap role +* wait_for_cluster_operators = Tasks for wait_for_cluster_operators +* wait_for_install_complete = Tasks for wait_for_install_complete role \ No newline at end of file From 9fa65bae404cdeae43f557168129666ea2b1e49e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 10:49:23 -0600 Subject: [PATCH 523/885] Reset after test run Signed-off-by: Jacob Emery --- ansible.cfg | 1 - roles/ssh_copy_id/vars/path_to_key_pair.yaml | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/ansible.cfg b/ansible.cfg index d432c9f8..80a4489f 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,5 +1,4 @@ [defaults] -private_key_file=~/.ssh/ansible inventory=inventory [inventory] diff --git a/roles/ssh_copy_id/vars/path_to_key_pair.yaml b/roles/ssh_copy_id/vars/path_to_key_pair.yaml index 6b591c03..5eae78d0 100644 --- a/roles/ssh_copy_id/vars/path_to_key_pair.yaml +++ b/roles/ssh_copy_id/vars/path_to_key_pair.yaml @@ -1,2 +1 @@ -# Will be filled in by ssh-key-gen role - +# Will be filled in by ssh-key-gen role \ No newline at end of file From 6406fc2ee5d66e518258f3d256d9f0635b40068b Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 10:52:18 -0600 Subject: [PATCH 524/885] Updated to CoreOS version 4.9, updated variables to match changes to playbooks. Most importnatly, the qcow2 file download has changed from an ephemeral link (which created issues every time it expired) to a path to the file on your local machine. Signed-off-by: Jacob Emery --- env.yaml | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/env.yaml b/env.yaml index a523e1f2..8181640f 100644 --- a/env.yaml +++ b/env.yaml @@ -5,7 +5,7 @@ # Note: If no subscription is attached manually, an error will occur when the install_packages role runs. env_rh_username: env_rh_passwd: -auto_attach_rhel_sub: True # make sure to also comment out the above variables if False +auto_attach_rhel_sub: true # make sure to also comment out the above variables if false # To populate OpenShift install config file. env_apiVersion: v1 @@ -27,13 +27,11 @@ env_fips: "false" # "true" or "false" (include quotes) env_pullSecret: '' #paste it into these single quotes # OpenShift SSH key is generated via Ansible in ssh_ocp_key_gen role -# RHEL KVM Guest Image link. Please ensure it is for System Z s390x architecture. -# The address will start with https://access.cdn.redhat.com/content/origin/files/... -# Link will expire after a few hours. Copy the link again if you have waited a while since pasting here. -# If you don't want to download via ephemeral URL, download the qcow2 file from Red Hat and place it in the KVM host at /var/lib/libvirt/images/bastion_base.qcow2 +# Path on local workstation to RHEL KVM Guest Image. +# Please ensure it is for System Z s390x architecture. +# See README Step 2. env_rhel_qcow2: -# IP addresses for the nodes that Ansible will be run against. This will automatically fill out the inventory file when setup.yaml is run. env_ip_kvm_host: env_ip_bastion: env_ip_bootstrap: @@ -54,12 +52,12 @@ env_compute_0_name: compute-0 env_compute_1_name: compute-1 # Networking -env_dns_nameserver: #If you are using the bastion as a DNS server (and have set the below variable env_dns_on_bastion to True) this variable is the same as env_ip_bastion. +env_dns_nameserver: #If you are using the bastion as a DNS server (and have set the below variable env_dns_on_bastion to true) then this variable should be the same as env_ip_bastion. env_default_gateway: env_netmask: env_net_int_name: #KVM network interface name: i.e. enc1 -env_dns_forwarder: #Upstream DNS server, can use 8.8.8.8 as a default -env_dns_on_bastion: True #Set to False if you do not want to setup a DNS server on the bastion because you already have a DNS server. +env_dns_forwarder: 8.8.8.8 #Upstream DNS server, can use 8.8.8.8 as a default +env_dns_on_bastion: true #Set to false if you do not want to setup a DNS server on the bastion because you already have a DNS server. # Username and password for user on VMs env_vm_uid: @@ -84,24 +82,24 @@ env_bastion_vcpu: 4 env_bastion_os_variant: 8.4 #RHEL version. Make sure this matches the version you copied for RHEL iso. # Bootstrap node configuration. Pre-filled values are minimum requirements. -env_boot_disk_size: 100 #120 preferred +env_boot_disk_size: 100 #120 recommended env_boot_ram: 16384 env_boot_cpu: host env_boot_vcpu: 4 env_boot_os_variant: 8.4 #RH CoreOS version. Make sure this matches the version you copied for RHEL iso. # Control node configuration. Pre-filled values are minimum requirements. -env_cont_disk_size: 100 #120 preferred +env_cont_disk_size: 100 #120 recommended env_cont_ram: 16384 env_cont_cpu: host -env_cont_vcpu: 4 #8 preferred +env_cont_vcpu: 4 #8 recommended env_cont_os_variant: 8.4 #RH CoreOS version. Make sure this matches the version you copied for RHEL iso. # Compute node configuration. Pre-filled values are minimum requirements. -env_comp_disk_size: 100 #120 preferred +env_comp_disk_size: 100 #120 recommended env_comp_ram: 8192 env_comp_cpu: host -env_comp_vcpu: 2 #6 preferred +env_comp_vcpu: 2 #6 recommended env_comp_os_variant: 8.4 #RH CoreOS version. Make sure this matches the version you copied for RHEL iso # If you would like to download the latest stable version of OpenShift, leave as is. @@ -111,6 +109,6 @@ env_ocp_installer: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/o # This version of Red Hat CoreOS works. Feel free to replace these links with preferred versions. # Used in prep_kvm_guests and get-ocp roles. -env_rhcos_kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.7/latest/rhcos-4.7.33-s390x-live-kernel-s390x -env_rhcos_initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.7/latest/rhcos-4.7.33-s390x-live-initramfs.s390x.img -env_rhcos_rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.7/latest/rhcos-4.7.33-s390x-live-rootfs.s390x.img \ No newline at end of file +env_rhcos_kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-kernel-s390x +env_rhcos_initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-initramfs.s390x.img +env_rhcos_rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-rootfs.s390x.img \ No newline at end of file From 2559714e0c584077ba4246787889bf1858d2ee57 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 10:52:40 -0600 Subject: [PATCH 525/885] Added verification steps Signed-off-by: Jacob Emery --- main.yaml | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/main.yaml b/main.yaml index 1e2d0f5d..0bcfdda8 100644 --- a/main.yaml +++ b/main.yaml @@ -15,7 +15,7 @@ - ssh_agent - hosts: kvm_host - tags: kvm_host,kvm_prep + tags: kvm_host, kvm_prep become: true vars_files: - env.yaml @@ -58,14 +58,14 @@ - ssh-ocp-key-gen # SSH key for bastion to connect to nodes - set_selinux_permissive - set_firewall - - dns + #- dns - check_dns - haproxy - httpd - get-ocp - hosts: kvm_host - tags: kvm_host,create_nodes + tags: kvm_host, create_nodes become: true gather_facts: no vars_files: @@ -74,4 +74,17 @@ - prep_kvm_guests - create_bootstrap - create_control_nodes - - create_compute_nodes \ No newline at end of file + - create_compute_nodes + +- hosts: bastion + tags: verification, bastion + become: true + gather_facts: yes + vars_files: + - env.yaml + roles: + - wait_for_bootstrap + - approve_certs + - check_nodes + - wait_for_cluster_operators + - wait_for_install_complete \ No newline at end of file From 14dab1613208e2b2e144bb0e0903dc586b146098 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 10:53:54 -0600 Subject: [PATCH 526/885] Tweaked check_dns task failed_when condition Signed-off-by: Jacob Emery --- roles/check_dns/tasks/main.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/check_dns/tasks/main.yaml b/roles/check_dns/tasks/main.yaml index 9f5361c3..f1fd58e6 100644 --- a/roles/check_dns/tasks/main.yaml +++ b/roles/check_dns/tasks/main.yaml @@ -19,12 +19,12 @@ - "api.{{ env_metadata_name }}.{{ env_baseDomain }}" - "api-int.{{ env_metadata_name }}.{{ env_baseDomain }}" - "test.apps.{{ env_metadata_name }}.{{ env_baseDomain }}" - register: internal_check - failed_when: ip_from_env[i] != internal_check.stdout + register: command_result + failed_when: ip_from_env[i] != command_result.stdout loop_control: extended: yes index_var: i - + - name: check external DNS resolution from DNS forwarder tags: check_dns,dns register: command_result From 0e4bf80142c87adaea7b710e9f96011d98bd923a Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 10:55:14 -0600 Subject: [PATCH 527/885] Changed qcow2 file acquisition from ephemeral URL to instead have the user download the file to their local machine at it is copied to the KVM host. This avoids the problem of having to re-copy the URL every time the link expires Signed-off-by: Jacob Emery --- roles/create_bastion/tasks/main.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index f0b2d6a7..dd47b2c1 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -21,19 +21,19 @@ state: directory mode: '0755' -- name: check if RHEL qcow2 is already downloaded - tags: kvm_host, bastionvm +- name: check to see if qcow2 file already exists on KVM host + tags: bastionvm stat: - path: /var/lib/libvirt/images/bastion.qcow2 - register: rhel_qcow2 + path: /var/lib/libvirt/images/bastion_base.qcow2 + register: qcow2_check -- name: download RHEL qcow2 file +- name: copy RHEL qcow2 file to KVM host. This may take a while. tags: kvm_host, bastionvm - get_url: - url: "{{ env_rhel_qcow2 }}" + copy: + src: "{{ env_rhel_qcow2 }}" dest: /var/lib/libvirt/images/bastion_base.qcow2 mode: '0775' - when: rhel_qcow2.stat.exists == false + when: qcow2_check.stat.exists == false register: rhel_qcow2_download - name: remove snapshot for idempotency From cad28b26fd9eb6e4243e33407bbf18637f783789 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 10:56:19 -0600 Subject: [PATCH 528/885] Fixed tag in get-ocp task Signed-off-by: Jacob Emery --- roles/get-ocp/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml index 3ae23afe..4a4b9cbe 100644 --- a/roles/get-ocp/tasks/main.yaml +++ b/roles/get-ocp/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Load in variables from env.yaml - tags: setup,getocp,bastion + tags: getocp,bastion include_vars: env.yaml - name: create directory bin for mirrors From 74c82da1d1ede0fa70c683fb861f0ae44138d82e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 10:57:18 -0600 Subject: [PATCH 529/885] Reworked reset_files role to be simpler Signed-off-by: Jacob Emery --- roles/reset_files/tasks/main.yaml | 26 +++++--------------------- 1 file changed, 5 insertions(+), 21 deletions(-) diff --git a/roles/reset_files/tasks/main.yaml b/roles/reset_files/tasks/main.yaml index 67fbd152..a0cf35bf 100644 --- a/roles/reset_files/tasks/main.yaml +++ b/roles/reset_files/tasks/main.yaml @@ -1,25 +1,9 @@ ---- -- name: delete /ocpinst directory - file: - path: /ocpinst - state: absent - -- name: delete OCP SSH keys from bastion's ssh folder - file: - path: ~/.ssh/{{ item }} - state: absent - loop: - - id_rsa - - id_rsa.pub +- name: Load in variables from env.yaml + include_vars: env.yaml -- name: delete bastion's ssh known hosts file to remove fingerprints +- name: delete files_to_reset from teardown.yaml file: - path: ~/.ssh/known_hosts + path: "{{ item }}" state: absent - -- name: create empty bastion ssh known hosts file - file: - path: ~/.ssh/known_hosts - state: touch - mode: '644' \ No newline at end of file + loop: files_to_reset \ No newline at end of file From 3632be667e97eb447133bc8e621ec9efe2222160 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 10:58:04 -0600 Subject: [PATCH 530/885] Reworked teardown.yaml to be more specific with what files are reset for idempotency Signed-off-by: Jacob Emery --- teardown.yaml | 49 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 45 insertions(+), 4 deletions(-) diff --git a/teardown.yaml b/teardown.yaml index a02b1919..bd65ffc0 100644 --- a/teardown.yaml +++ b/teardown.yaml @@ -4,16 +4,41 @@ # If you have more nodes than what is present in the "vms" list below, feel free to add more to the list. # After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastionvm,bastion,create_nodes'" +- hosts: localhost + tags: full_teardown + connection: local + become: false + gather_facts: no + vars_files: + - env.yaml + tasks: + - name: remove bastion from localhost's known_hosts file + lineinfile: + path: "~/.ssh/known_hosts" + regexp: "{{ env_ip_bastion}}" + state: absent + - hosts: kvm_host tags: full_teardown become: true gather_facts: no + vars: + - vms: ['{{env_bastion_name}}', '{{env_bootstrap_name}}', '{{env_control_0_name}}', '{{env_control_1_name}}', '{{env_control_2_name}}', '{{env_compute_0_name}}', '{{env_compute_1_name}}'] vars_files: - env.yaml - pre_tasks: - - name: Create list of VMs to teardown. - set_fact: - vms: ['{{env_bastion_name}}', '{{env_bootstrap_name}}', '{{env_control_0_name}}', '{{env_control_1_name}}', '{{env_control_2_name}}', '{{env_compute_0_name}}', '{{env_compute_1_name}}'] + post_tasks: + - name: Capture files to delete + find: + paths: /var/lib/libvirt/images + file_type: file + excludes: + - lost+found + register: found_files + - name: delete files in /var/lib/libvirt/images except for lost+found + file: + path: "{{ item.path }}" + state: absent + with_items: "{{ found_files['files'] }}" roles: - teardown_vms @@ -21,10 +46,26 @@ # If you have more nodes than what is present in the "vms" list below, feel free to add more to the list. # After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastion,create_nodes'" +- hosts: localhost + tags: partial_teardown + connection: local + become: false + gather_facts: no + vars_files: + - env.yaml + tasks: + - name: remove bastion from localhost's known_hosts file + lineinfile: + path: "~/.ssh/known_hosts" + regexp: "{{ env_ip_bastion}}" + state: absent + - hosts: bastion tags: partial_teardown become: true gather_facts: no + vars: + - files_to_reset: ['~/.ssh/known_hosts', '~/.ssh/id_rsa','~/.ssh/id_rsa.pub','/ocpinst'] # feel free to add as needed vars_files: - env.yaml roles: From 3b1f87ac99127a8e6a59551600e6294ec7a0d74b Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 10:58:43 -0600 Subject: [PATCH 531/885] Added task to refresh inventory Signed-off-by: Jacob Emery --- roles/set_inventory/tasks/main.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index fab1ef32..372e6392 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -38,6 +38,10 @@ tags: setup ansible.builtin.gather_facts: +- name: Refresh inventory + tags: setup + meta: refresh_inventory + - name: fill ansible.cfg with provided variable ansible ssh key file name tags: setup ansible.builtin.lineinfile: @@ -78,7 +82,7 @@ path: main.yaml regexp: "- attach_subscription" replace: "#- attach_subscription" - when: auto_attach_rhel_sub | bool == False + when: not auto_attach_rhel_sub - name: Comment out DNS setup role in main.yaml if requested with env.yaml env_dns_on_bastion boolean tags: setup @@ -86,4 +90,4 @@ regexp: "- dns" path: main.yaml replace: "#- dns" - when: env_dns_on_bastion | bool == False \ No newline at end of file + when: not env_dns_on_bastion \ No newline at end of file From f3fb6a5de1e67c3ab8af0c1d0fa57c89396d0134 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 10:59:31 -0600 Subject: [PATCH 532/885] Added task to recreate ssh_copy_id files folder for idempotency Signed-off-by: Jacob Emery --- roles/ssh_copy_id/tasks/main.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index 8bad7d80..23f0c2d7 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -43,6 +43,12 @@ path: roles/ssh_copy_id/files/ssh-copy-id-expect-pass.exp state: absent +- name: re-create ssh-copy-id files folder + tags: ssh-copy-id,ssh + file: + path: roles/ssh_copy_id/files/ + state: directory + - name: Print results of copying ssh id to remote host. tags: ssh-copy-id debug: From 22ea76393cebb498437db25511debf615d6e3025 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 11:00:06 -0600 Subject: [PATCH 533/885] Added task to print results of ssh key pair check Signed-off-by: Jacob Emery --- roles/ssh_key_gen/tasks/main.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index c4f17be1..61b4f309 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -33,6 +33,11 @@ - "{{env_ssh_ans_name}}" - "{{env_ssh_ans_name}}.pub" +- name: Print results of ssh key pair files check + tags: keymastr + debug: + var: ssh_key_file_exists_check.results[0].stat.exists + - name: create a vars file for key path tags: keymastr file: From af1a8964919bc4eb257c4e7a9ca3849d141353d6 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 11:01:09 -0600 Subject: [PATCH 534/885] Added automated verification steps Signed-off-by: Jacob Emery --- roles/approve_certs/tasks/main.yaml | 91 +++++++++++++++++++ roles/check_nodes/tasks/main.yaml | 23 +++++ roles/wait_for_bootstrap/tasks/main.yaml | 48 ++++++++++ .../tasks/main.yaml | 23 +++++ .../wait_for_install_complete/tasks/main.yaml | 29 ++++++ 5 files changed, 214 insertions(+) create mode 100644 roles/approve_certs/tasks/main.yaml create mode 100644 roles/check_nodes/tasks/main.yaml create mode 100644 roles/wait_for_bootstrap/tasks/main.yaml create mode 100644 roles/wait_for_cluster_operators/tasks/main.yaml create mode 100644 roles/wait_for_install_complete/tasks/main.yaml diff --git a/roles/approve_certs/tasks/main.yaml b/roles/approve_certs/tasks/main.yaml new file mode 100644 index 00000000..0cc149e6 --- /dev/null +++ b/roles/approve_certs/tasks/main.yaml @@ -0,0 +1,91 @@ +--- + +- name: Approving all pending CSR + tags: approve_certs + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: | + /ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /ocpinst/oc adm certificate approve + register: csr_approved_1 + ignore_errors: yes + +- name: viewing first csr approved + tags: approve_certs + debug: + msg: "{{csr_approved_1.stdout_lines}}" + +- name: pause to let new certificates needing approval to generate + tags: approve_certs + pause: + seconds: 30 + +- name: Second round approving all pending CSR + tags: approve_certs + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: | + /ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /ocpinst/oc adm certificate approve + register: csr_approved_2 + ignore_errors: yes + +- name: viewing second csr approved + tags: approve_certs + debug: + msg: "{{csr_approved_2.stdout_lines}}" + +- name: pause to let new certificates needing approval to generate + tags: approve_certs + pause: + seconds: 30 + +- name: Third round approving all pending CSR + tags: approve_certs + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: | + /ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /ocpinst/oc adm certificate approve + register: csr_approved_3 + ignore_errors: yes + +- name: viewing third csr approved + tags: approve_certs + debug: + msg: "{{csr_approved_3.stdout_lines}}" + +- name: pause to let new certificates needing approval to generate + tags: approve_certs + pause: + seconds: 30 + +- name: Fourth round approving all pending CSR + tags: approve_certs + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: | + /ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /ocpinst/oc adm certificate approve + register: csr_approved_4 + ignore_errors: yes + +- name: viewing fourth csr approved + tags: approve_certs + debug: + msg: "{{csr_approved_4.stdout_lines}}" + +- name: pause to let new certificates needing approval to generate + tags: approve_certs + pause: + seconds: 30 + +- name: Fifth round approving all pending CSR + tags: approve_certs + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: | + /ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /ocpinst/oc adm certificate approve + register: csr_approved_5 + ignore_errors: yes + +- name: viewing fifth csr approved + tags: approve_certs + debug: + msg: "{{csr_approved_4.stdout_lines}}" \ No newline at end of file diff --git a/roles/check_nodes/tasks/main.yaml b/roles/check_nodes/tasks/main.yaml new file mode 100644 index 00000000..070e8415 --- /dev/null +++ b/roles/check_nodes/tasks/main.yaml @@ -0,0 +1,23 @@ +--- + + - name: Check nodes status + tags: check_nodes + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: oc get nodes | awk '{print $1, $2}' + register: oc_get_nodes + + - name: print nodes status + tags: check_nodes + debug: + var: oc_get_nodes.stdout_lines + + - name: Make sure nodes are 'Ready' before continuing + tags: check_nodes + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: oc get nodes | awk '{print $2}' + register: nodes_check + until: ("NotReady" not in nodes_check.stdout) + retries: 120 + delay: 30 \ No newline at end of file diff --git a/roles/wait_for_bootstrap/tasks/main.yaml b/roles/wait_for_bootstrap/tasks/main.yaml new file mode 100644 index 00000000..0ff5311d --- /dev/null +++ b/roles/wait_for_bootstrap/tasks/main.yaml @@ -0,0 +1,48 @@ +--- + +- name: Make sure kubeconfig works + tags: wait_for_bootstrap + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: oc whoami + register: oc_whoami + until: oc_whoami.stdout == "system:admin" + retries: 120 + delay: 30 + +- name: print output of oc whoami, should be system:admin + tags: wait_for_bootstrap + debug: + var: oc_whoami.stdout + +- name: Watch bootstrap + tags: wait_for_bootstrap + command: chdir=/ocpinst ./openshift-install wait-for bootstrap-complete + async: 3600 + poll: 0 + register: bootstrap_complete_sleeper + +- name: Retry bootstrap job id check until it's finished. This may take some time. + tags: wait_for_bootstrap + async_status: + jid: "{{ bootstrap_complete_sleeper.ansible_job_id }}" + register: job_result + until: job_result.finished + retries: 120 + delay: 30 + +- name: Destroy bootstrap. Expect ignored errors if bootstrap is already destroyed. + tags: wait_for_bootstrap + community.libvirt.virt: + name: "{{ env_bootstrap_name }}" + command: destroy + ignore_errors: yes + delegate_to: "{{ env_ip_kvm_host}}" + +- name: Undefine bootstrap. Expect ignored errors if bootstrap is already undefined. + tags: wait_for_bootstrap + community.libvirt.virt: + name: "{{ env_bootstrap_name }}" + command: undefine + ignore_errors: yes + delegate_to: "{{ env_ip_kvm_host}}" diff --git a/roles/wait_for_cluster_operators/tasks/main.yaml b/roles/wait_for_cluster_operators/tasks/main.yaml new file mode 100644 index 00000000..d995411b --- /dev/null +++ b/roles/wait_for_cluster_operators/tasks/main.yaml @@ -0,0 +1,23 @@ +--- + +- name: Check cluster operators + tags: wait_for_cluster_operators + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: oc get co + register: oc_get_co + +- name: print clusteroperator status + tags: wait_for_cluster_operators + debug: + var: oc_get_co.stdout_lines + +- name: Make sure all cluster operators are Available before continuing. This may take a while. + tags: wait_for_cluster_operators + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: oc get co | awk '{print $3}' + register: co_check + until: ("False" not in co_check.stdout) + retries: 120 + delay: 30 \ No newline at end of file diff --git a/roles/wait_for_install_complete/tasks/main.yaml b/roles/wait_for_install_complete/tasks/main.yaml new file mode 100644 index 00000000..6ba1292b --- /dev/null +++ b/roles/wait_for_install_complete/tasks/main.yaml @@ -0,0 +1,29 @@ +--- + +- name: Wait for OpenShift install to complete + tags: wait_for_install_complete + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: /ocpinst/openshift-install --dir=/ocpinst wait-for install-complete + register: wait_install_complete + until: ("Install complete!" in wait_install_complete.stderr) + retries: 120 + delay: 30 + +- name: Set OCP URL + tags: wait_for_install_complete + set_fact: + ocp_url: https://console-openshift-console.apps.{{env_metadata_name}}.{{env_baseDomain}} + +- name: Set OCP password + tags: wait_for_install_complete + command: "cat /ocpinst/auth/kubeadmin-password" + register: ocp_passwd + +- name: Congratulations! OpenShift installation complete. Use the information below for first-time login. + tags: wait_for_install_complete + command: "echo {{ item }}" + loop: + - " URL: {{ocp_url}} " + - " Username: kubeadmin " + - " Password: {{ocp_passwd.stdout}} " \ No newline at end of file From a6e784dfd8eccb64fc1dae75f748739cc54aca5d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 11:04:41 -0600 Subject: [PATCH 535/885] Fixed formatting in Troubleshooting section Signed-off-by: Jacob Emery --- README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 10dfa724..3fdf80b8 100644 --- a/README.md +++ b/README.md @@ -68,10 +68,12 @@ for local workstation running Ansible ## Troubleshooting: If you encounter errors while running the main playbook, there are a few things you can do: -*1)* Check your variables in env.yaml -*2)* Inspect the task that failed by inspecting the task in roles/role_name/tasks_main.yaml -*3)* Re-Run the role indivually with with [tags](#Tags) -*4)* Teardown problematic KVM guests with [teardown](#Teardown) scripts and start again with [tags](#Tags) +* 1) Check your variables in env.yaml +* 2) Inspect the task that failed by inspecting the task in roles/role_name/tasks_main.yaml +* 3) Google the specific error message +* 3) Re-Run the role indivually with with [tags](#Tags) +* 4) Teardown troublesome KVM guests with [teardown](#Teardown) scripts and start again with [tags](#Tags) +* 6) E-mail Jacob Emery at jacob.emery@ibm.com ## Teardown: * If you would like to teardown your VMs, first determine whether you would like to do a full, partial, or bootstrap teardown, specified below. From 88e19f24132068a87ff524b0aef042692e6d3d84 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 11:07:31 -0600 Subject: [PATCH 536/885] Fixed internal links at the top Signed-off-by: Jacob Emery --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3fdf80b8..4be18aa2 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,8 @@ * [Instructions](#Installation-Instructions) * [Setup](#Setup) * [Provisioning](#Provisioning) -* [Install Complete](*Install-Complete) +* [Post-Install Complete](#Post-Install-Complete) +* [Troubleshooting](#Troubleshooting) * [Teardown](#Teardown) * [Tags](#Tags) @@ -60,7 +61,7 @@ for local workstation running Ansible * If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of the main playbook run, use [tags](#Tags). See the [main playbook](main.yaml) to determine what part you would like to run and use those tags when running the [main playbook](main.yaml). Example: "ansible-playbook main.yaml --ask-become-pass --tags 'get-ocp,create_nodes'" * Note: we chose to not edit the user's .bash_profile/.bashrc with an automatic ssh-add command because that would change the user's local workstation set-up in a way that was potentially undesirable. Therefore, if you close out your terminal session in the middle of provisioning, you will need to run "ansible-playbook main.yaml --tags ssh-agent" before doing anything else. -### Install Complete +### Post-Install Complete * **Step 6: First-Time Login** * The last step of the main playbook will print a URL, username and temporary password for first-time login. * Use a web-browser to type in the URL, which should take you to a sign-in page. Use the provided credentials to sign in. From 7790ce985f31fb99689a2b5146324878b01a27b1 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 11:29:21 -0600 Subject: [PATCH 537/885] Updated CHANGELOG with update information Signed-off-by: Jacob Emery --- CHANGELOG.md | 64 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 550cd742..2072ab6c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,42 +11,62 @@ All notable changes to this project will be documented in this file. ## [Automated Bastion Update] -## 1.0.0 - 2021-11-24 +## [1.0.0] - 2021-11-24 +[1.0.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.0.1...HEAD ### Summary - Fully automated bastion installation and configuration using cloud-init ### Added -- Added options in env.yaml for creating a DNS server on the bastion or not, and for automatically attaching Red Hat subscriptions -- Added variables for bootstrap, bastion, control and compute nodes' specifications in env.yaml -- Added node name variables in env.yaml -- Added variable for network interface name in env.yaml -- Added variable for DNS forwarder in env.yaml -- Added templating of DNS configuration files so they don't have to be pre-provided -- Added expect script to ssh_copy_id role so that the user doesn't have to type in ssh password when copying ssh key -- Added templating of haproxy config file -- Added a boot_teardown tag in teardown.yaml to automate the teardown of bootstrap node +- Options in env.yaml for creating a DNS server on the bastion or not, and for automatically attaching Red Hat subscriptions +- Variables for bootstrap, bastion, control and compute nodes' specifications in env.yaml +- Node name variables in env.yaml +- Variable for network interface name in env.yaml +- Variable for DNS forwarder in env.yaml +- Templating of DNS configuration files so they don't have to be pre-provided +- Expect script to ssh_copy_id role so that the user doesn't have to type in ssh password when copying ssh key +- Templating of haproxy config file +- A boot_teardown tag in teardown.yaml to automate the teardown of bootstrap node ### Modified -- Reworked create_bastion role to use cloud-init to fully automate configuration and installation of the bastion node -- Reworked teardown.yaml script to decrease complexity and work faster. -- Changed some tags to match their corresponding role names +- create_bastion role to use cloud-init to fully automate configuration and installation of the bastion node +- teardown.yaml script to decrease complexity and work faster. +- Some tags to match their corresponding role names - Lots of small improvements and tweaks ### Removed -- Removed encryption of env.yaml as it was unnecessary and increased complexity +- Encryption of env.yaml as it was unnecessary and increased complexity [Automated Bastion Update]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.0.0...HEAD [1.0.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.0.0 +## [Automated OCP Verification Update] + +## [1.1.0] - 2021-12-03 +[1.1.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.1.0...HEAD + +### Summary +- Fully automated all OCP verification steps. Cutting the number of steps nearly in half. The main playbook can now run completely hands-off from kicking it off all the way to an operational cluster. The last step provides the first-time login credentials. + +### Added +- 5 roles related to automating OCP verification steps: wait_for_bootstrap, approve_certs, check_nodes, wait_for_cluster_operators, and wait_for_install_complete. +- role to check internal and external DNS configuration before continuing. Including checking to make sure the name resolves to the correct IP address. +- +### Modified +- The mirrors for CoreOS versions to update to 4.9 and tested them. +- The acquisition method of RHEL qcow2 from downloading via ephemeral link to having the user download the file to their local machine as a pre-req. This was changed to avoid having to re-copy the link every time it expires. +- teardown.yaml and reset_files role to be fully idempotent when running the main playbook from the point where each type of teardown sets the user back to. +- Lots of small tweaks. +### Removed +- Instructions in README for doing OCP verification steps manually + +[Automated OCP Verification Update]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.1.0...HEAD +[1.1.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.0.0 + ## Roadmap +- Add option in env.yaml to create HAProxy on bastion or not +- Add option for using a proxy server for OpenShift in install-config via env.yaml +- Add functionality to provision more than 3 control and 2 compute nodes - Make ssh-copy-id role idempotent. -- Add role to check if DNS is working properly before continuing - Add picture of finished infrastructure to README - Add README’s for each role -- Add update_vars role to create vars files from env.yaml / update if changed to increase effifiency and user understanding -- Create inventories and playbooks folders -- Integrate setup.yaml into main.yaml - no longer need to be separate because encryption role removed -- Automate verification steps. This is difficult because Ansible has to use bastion as a jumphost. -- Add option in env.yaml to create HAProxy on bastion or not (on different server) -- Further down the line, air-gapped install of OpenShift option -- Further down the line, add functionality to provision more than 3 control and 2 compute nodes +- Air-gapped (disconnected) install of OpenShift option From 925a0bb85e1ebb0ee093e058dc4873a3741fe221 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 11:30:51 -0600 Subject: [PATCH 538/885] Fixed formatting error in CHANGELOG Signed-off-by: Jacob Emery --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2072ab6c..67ecbefd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,8 +58,8 @@ All notable changes to this project will be documented in this file. ### Removed - Instructions in README for doing OCP verification steps manually -[Automated OCP Verification Update]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.1.0...HEAD -[1.1.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.0.0 +[Automated OCP Verification Update]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.1.0...v1.0.0 +[1.1.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.1.0...v1.0.0 ## Roadmap From f0462d281a7868b89bbb10b1bd143f76ad4359f7 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 11:39:41 -0600 Subject: [PATCH 539/885] Flipped CHANGELOG so that the most recent updates are at the top Signed-off-by: Jacob Emery --- CHANGELOG.md | 68 ++++++++++++++++++++++------------------------------ 1 file changed, 28 insertions(+), 40 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 67ecbefd..1b79a0bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,17 +2,30 @@ All notable changes to this project will be documented in this file. -## [Unreleased] +Jump-To: +* [Latest](#1.1.0) +* [1.0.0](#1.0.0) +* [0.0.1](#0.0.1) -## [0.0.1] - 2021-08-24 -[unreleased]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.0.1...HEAD -[0.0.1]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.0.1 +## 1.1.0 - Automated OCP Verification Update - 2021-12-03 -## [Automated Bastion Update] +### Summary +- Fully automated all OCP verification steps. Cutting the number of steps nearly in half. The main playbook can now run completely hands-off from kicking it off all the way to an operational cluster. The last step provides the first-time login credentials. -## [1.0.0] - 2021-11-24 -[1.0.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.0.1...HEAD +### Added +- 5 roles related to automating OCP verification steps: wait_for_bootstrap, approve_certs, check_nodes, wait_for_cluster_operators, and wait_for_install_complete. +- role to check internal and external DNS configuration before continuing. Including checking to make sure the name resolves to the correct IP address. +- +### Modified +- The mirrors for CoreOS versions to update to 4.9 and tested them. +- The acquisition method of RHEL qcow2 from downloading via ephemeral link to having the user download the file to their local machine as a pre-req. This was changed to avoid having to re-copy the link every time it expires. +- teardown.yaml and reset_files role to be fully idempotent when running the main playbook from the point where each type of teardown sets the user back to. +- Lots of small tweaks. +### Removed +- Instructions in README for doing OCP verification steps manually + +## 1.0.0 - Automated Bastion Update - 2021-11-24 ### Summary - Fully automated bastion installation and configuration using cloud-init @@ -35,38 +48,13 @@ All notable changes to this project will be documented in this file. ### Removed - Encryption of env.yaml as it was unnecessary and increased complexity -[Automated Bastion Update]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.0.0...HEAD -[1.0.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.0.0 - -## [Automated OCP Verification Update] - -## [1.1.0] - 2021-12-03 -[1.1.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v0.1.0...HEAD - -### Summary -- Fully automated all OCP verification steps. Cutting the number of steps nearly in half. The main playbook can now run completely hands-off from kicking it off all the way to an operational cluster. The last step provides the first-time login credentials. - -### Added -- 5 roles related to automating OCP verification steps: wait_for_bootstrap, approve_certs, check_nodes, wait_for_cluster_operators, and wait_for_install_complete. -- role to check internal and external DNS configuration before continuing. Including checking to make sure the name resolves to the correct IP address. -- -### Modified -- The mirrors for CoreOS versions to update to 4.9 and tested them. -- The acquisition method of RHEL qcow2 from downloading via ephemeral link to having the user download the file to their local machine as a pre-req. This was changed to avoid having to re-copy the link every time it expires. -- teardown.yaml and reset_files role to be fully idempotent when running the main playbook from the point where each type of teardown sets the user back to. -- Lots of small tweaks. -### Removed -- Instructions in README for doing OCP verification steps manually - -[Automated OCP Verification Update]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.1.0...v1.0.0 -[1.1.0]: https://github.com/IBM/Ansible-OpenShift-Provisioning/compare/v1.1.0...v1.0.0 +## 0.0.1 - Unreleased - 2021-08-24 ## Roadmap - -- Add option in env.yaml to create HAProxy on bastion or not -- Add option for using a proxy server for OpenShift in install-config via env.yaml -- Add functionality to provision more than 3 control and 2 compute nodes -- Make ssh-copy-id role idempotent. -- Add picture of finished infrastructure to README -- Add README’s for each role -- Air-gapped (disconnected) install of OpenShift option +* Add option in env.yaml to create HAProxy on bastion or not +* Add option for using a proxy server for OpenShift in install-config via env.yaml +* Add functionality to provision more than 3 control and 2 compute nodes +* Make ssh-copy-id role idempotent. +* Add picture of finished infrastructure to README +* Add README’s for each role +* Air-gapped (disconnected) install of OpenShift option \ No newline at end of file From 67afbfb686ad4cbb26a6eb1235188d11b4a1523d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 11:43:02 -0600 Subject: [PATCH 540/885] Fixed CHANGELOG formatting Signed-off-by: Jacob Emery --- CHANGELOG.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b79a0bc..e1687752 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,10 +2,11 @@ All notable changes to this project will be documented in this file. -Jump-To: -* [Latest](#1.1.0) -* [1.0.0](#1.0.0) -* [0.0.1](#0.0.1) +###Jump-To: +* [Latest](#1.1.0 - Automated OCP Verification Update - 2021-12-03) +* [1.0.0](#1.0.0 - Automated Bastion Update - 2021-11-24) +* [0.0.1](#0.0.1 - Unreleased - 2021-08-24) +* [Roadmap](#Roadmap) ## 1.1.0 - Automated OCP Verification Update - 2021-12-03 @@ -16,7 +17,6 @@ Jump-To: ### Added - 5 roles related to automating OCP verification steps: wait_for_bootstrap, approve_certs, check_nodes, wait_for_cluster_operators, and wait_for_install_complete. - role to check internal and external DNS configuration before continuing. Including checking to make sure the name resolves to the correct IP address. -- ### Modified - The mirrors for CoreOS versions to update to 4.9 and tested them. - The acquisition method of RHEL qcow2 from downloading via ephemeral link to having the user download the file to their local machine as a pre-req. This was changed to avoid having to re-copy the link every time it expires. From 5e3152e4d30ca859c4ab4488c0a59f6c958dee89 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 11:45:58 -0600 Subject: [PATCH 541/885] Fixed CHANGELOG formatting Signed-off-by: Jacob Emery --- CHANGELOG.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e1687752..4b7dc3be 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,10 +2,10 @@ All notable changes to this project will be documented in this file. -###Jump-To: -* [Latest](#1.1.0 - Automated OCP Verification Update - 2021-12-03) -* [1.0.0](#1.0.0 - Automated Bastion Update - 2021-11-24) -* [0.0.1](#0.0.1 - Unreleased - 2021-08-24) +### Jump-To: +* [Latest](#1.1.0---Automated-OCP-Verification-Update---2021-12-03) +* [1.0.0](#1.0.0---Automated-Bastion-Update---2021-11-24) +* [0.0.1](#0.0.1---Unreleased---2021-08-24) * [Roadmap](#Roadmap) From 5f6962cac52825147febf8d7aab665ac8f2b56f0 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Dec 2021 11:55:51 -0600 Subject: [PATCH 542/885] Reformatted CHANGELOG Signed-off-by: Jacob Emery --- CHANGELOG.md | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b7dc3be..76f8fd1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,13 +3,13 @@ All notable changes to this project will be documented in this file. ### Jump-To: -* [Latest](#1.1.0---Automated-OCP-Verification-Update---2021-12-03) -* [1.0.0](#1.0.0---Automated-Bastion-Update---2021-11-24) -* [0.0.1](#0.0.1---Unreleased---2021-08-24) +* [Latest](#1.1.0) +* [1.0.0](#1.0.0) +* [0.0.1](#0.0.1) * [Roadmap](#Roadmap) - -## 1.1.0 - Automated OCP Verification Update - 2021-12-03 +## 1.1.0 +### Automated OCP Verification Update - 2021-12-03 ### Summary - Fully automated all OCP verification steps. Cutting the number of steps nearly in half. The main playbook can now run completely hands-off from kicking it off all the way to an operational cluster. The last step provides the first-time login credentials. @@ -25,7 +25,8 @@ All notable changes to this project will be documented in this file. ### Removed - Instructions in README for doing OCP verification steps manually -## 1.0.0 - Automated Bastion Update - 2021-11-24 +## 1.0.0 +### Automated Bastion Update 2021-11-24 ### Summary - Fully automated bastion installation and configuration using cloud-init @@ -48,7 +49,8 @@ All notable changes to this project will be documented in this file. ### Removed - Encryption of env.yaml as it was unnecessary and increased complexity -## 0.0.1 - Unreleased - 2021-08-24 +## 0.0.1 +### Unreleased 2021-08-24 ## Roadmap * Add option in env.yaml to create HAProxy on bastion or not From 7cdb1c374f9ebcb342641f46c344d4e055bb7077 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 6 Dec 2021 16:12:45 -0600 Subject: [PATCH 543/885] Removed deprecated roles Signed-off-by: Jacob Emery --- roles/enable_packages/tasks/main.yaml | 7 ------- roles/install_ansible/tasks/main.yaml | 12 ------------ roles/remove_bootstrap/tasks/main.yaml | 15 --------------- 3 files changed, 34 deletions(-) delete mode 100644 roles/enable_packages/tasks/main.yaml delete mode 100644 roles/install_ansible/tasks/main.yaml delete mode 100644 roles/remove_bootstrap/tasks/main.yaml diff --git a/roles/enable_packages/tasks/main.yaml b/roles/enable_packages/tasks/main.yaml deleted file mode 100644 index 0cf851a7..00000000 --- a/roles/enable_packages/tasks/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - -- name: enable packages - tags: kvm_host - service: - name: libvirtd - state: started \ No newline at end of file diff --git a/roles/install_ansible/tasks/main.yaml b/roles/install_ansible/tasks/main.yaml deleted file mode 100644 index dfcb1502..00000000 --- a/roles/install_ansible/tasks/main.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - -- name: install ansible dependencies on bastion - tags: bastion, ansible - command: "{{ item }}" - loop: - - subscription-manager repos --enable "codeready-builder-for-rhel-8-s390x-rpms" - - sudo yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - - sudo rpm -i epel-release-latest-8.noarch.rpm - - sudo yum -y install ansible - - ansible-galaxy collection install community.crypto - - ansible-galaxy collection install community.general \ No newline at end of file diff --git a/roles/remove_bootstrap/tasks/main.yaml b/roles/remove_bootstrap/tasks/main.yaml deleted file mode 100644 index cb17edef..00000000 --- a/roles/remove_bootstrap/tasks/main.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- - -- name: shutdown bootstrap - community.libvirt.virt: - name: bootstrap_server - state: shutdown - -- name: wait for shutdown - pause: - minutes: 1 - -- name: destroy bootstrap - community.libvirt.virt: - name: bootstrap_server - state: destroyed \ No newline at end of file From 14b5074500350a2cae41dde4de0bcc3fa0eafd2a Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 7 Dec 2021 11:47:57 -0600 Subject: [PATCH 544/885] Switch license file to GPL Signed-off-by: Jacob Emery --- LICENSE | 876 +++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 674 insertions(+), 202 deletions(-) diff --git a/LICENSE b/LICENSE index 8f71f43f..8253ee4c 100644 --- a/LICENSE +++ b/LICENSE @@ -1,202 +1,674 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. \ No newline at end of file From 81f600ed3641c4844b4d1d4c6f45f00c71dbdcbf Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 7 Dec 2021 15:56:43 -0600 Subject: [PATCH 545/885] Changed to MIT license Signed-off-by: Jacob Emery --- LICENSE | 695 ++------------------------------------------------------ 1 file changed, 21 insertions(+), 674 deletions(-) diff --git a/LICENSE b/LICENSE index 8253ee4c..526ec4c8 100644 --- a/LICENSE +++ b/LICENSE @@ -1,674 +1,21 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. \ No newline at end of file +MIT License + +Copyright (c) 2021 IBM Corporation. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE \ No newline at end of file From 0c9042f6fcc59f07bb579ad7f1cda2b6b1a37e16 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 9 Dec 2021 03:00:25 -0600 Subject: [PATCH 546/885] Updated tags and references to new variables Signed-off-by: Jacob Emery --- README.md | 78 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 40 insertions(+), 38 deletions(-) diff --git a/README.md b/README.md index 4be18aa2..30314661 100644 --- a/README.md +++ b/README.md @@ -32,9 +32,10 @@ for local workstation running Ansible * Access to a logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: * 6 Integrated Facilities for Linux (IFLs) with SMT2 enabled * 85 GB of RAM - * 1 TB of disk space -* On that LPAR, Red Hat Enterprise Linux (RHEL) with networking configured and a root password set -* On that LPAR, access to 8 (for a minimum installation) pre-allocated IPv4 addresses with Fully Qualified Domain Names (FQDN) + * 1 TB of disk space mounted to /var/lib/libvirt/images + * Red Hat Enterprise Linux (RHEL) 8.4 with networking configured and a root password set + * Access to 8 (for a minimum installation) pre-allocated IPv4 addresses +* Note on DNS: The [main playbook](main.yaml) will create a DNS server on the bastion by default. If you plan to use a pre-existing DNS server instead, please make sure to mark the variable env.networking.dns.setup_on_bastion to 'false' in [env.yaml](env.yaml) to skip that step. Either way, the playbook will double-check the DNS configuration before continuing. ## Installation Instructions: @@ -47,18 +48,20 @@ for local workstation running Ansible * In a web browser, navigate to the Red Hat [console](https://console.redhat.com/openshift/install/ibmz/user-provisioned) and copy the OpenShift pull secret and paste it into [env.yaml](env.yaml) as the variable 'env_pullSecret'. * **Step 3: Set Variables** * In a text editor of your choice, open [env.yaml](env.yaml) - * Fill out the remaining variables to match your specific installation. Many variables are pre-filled with defaults. For a default installation, you only need to fill in the empty variables. + * Fill out variables marked with '#X' to match your specific installation. + * Many variables are pre-filled with defaults, change pre-filled variables at your own discretion. + * This is the most important step in the process. Take the time to make sure everything here is correct. * **Step 4: Setup Script** - * Navigate to the folder where you cloned the Git Repository - * Run "ansible-playbook setup.yaml --ask-become-pass" + * Navigate to the folder where you cloned the Git Repository in your terminal. + * Run this shell command: "ansible-playbook setup.yaml --ask-become-pass" ### Provisioning * **Step 5: Running the Main Playbook** - * If you are not already there, navigate to the folder where you cloned the Git repository in your terminal. - * Run the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" + * Navigate to the folder where you cloned the Git repository in your terminal. + * Start the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" * Watch Ansible as it completes the installation, correcting errors if they arise. * To look at what is running in detail, open roles/'task-you-want-to-inspect'/tasks/main.yaml - * If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of the main playbook run, use [tags](#Tags). See the [main playbook](main.yaml) to determine what part you would like to run and use those tags when running the [main playbook](main.yaml). Example: "ansible-playbook main.yaml --ask-become-pass --tags 'get-ocp,create_nodes'" + * If the process fails in error, go through the steps in the [troubleshooting](#Troubleshooting) section. use [tags](#Tags) to selectively start from a certain point. See the [main playbook](main.yaml) to determine what part you would like to run and use those tags when running the [main playbook](main.yaml). Example: "ansible-playbook main.yaml --ask-become-pass --tags 'get-ocp,create_nodes'" * Note: we chose to not edit the user's .bash_profile/.bashrc with an automatic ssh-add command because that would change the user's local workstation set-up in a way that was potentially undesirable. Therefore, if you close out your terminal session in the middle of provisioning, you will need to run "ansible-playbook main.yaml --tags ssh-agent" before doing anything else. ### Post-Install Complete @@ -69,59 +72,58 @@ for local workstation running Ansible ## Troubleshooting: If you encounter errors while running the main playbook, there are a few things you can do: -* 1) Check your variables in env.yaml -* 2) Inspect the task that failed by inspecting the task in roles/role_name/tasks_main.yaml -* 3) Google the specific error message -* 3) Re-Run the role indivually with with [tags](#Tags) -* 4) Teardown troublesome KVM guests with [teardown](#Teardown) scripts and start again with [tags](#Tags) -* 6) E-mail Jacob Emery at jacob.emery@ibm.com +1) Double check your variables in env.yaml +2) Inspect the part that failed by opening roles/role_name/tasks/main.yaml +3) Google the specific error message +3) Re-Run the role indivually with [tags](#Tags) +4) Teardown troublesome KVM guests with [teardown](#Teardown) scripts and start again with [tags](#Tags). To start from the beginning, run "ansible-playbook teardown.yaml --ask-become-pass --tags full_teardown +6) E-mail Jacob Emery at jacob.emery@ibm.com +7) If it's a problem with an OpenShift verification step, first re-reun the role with [tags](#Tags). If that doesn't work, SSH into the bastion as root ("ssh root@bastion-ip-address-here") and then run,"export KUBECONFIG=/ocpinst/auth/kubeconfig" and then "oc whoami" and make sure it ouputs "system:admin". Then run the shell command from the role you would like to check on manually: i.e. 'oc get nodes', 'oc get co', etc. ## Teardown: -* If you would like to teardown your VMs, first determine whether you would like to do a full, partial, or bootstrap teardown, specified below. -* Full: to teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full_teardown" -* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial_teardown" -* Bootstrap: The bootstrap is not needed after OpenShift fully installs and will be automatically brough down in the process of running the main playbook. To easily tear it down, run: "ansible-playbook teardown.yaml --ask-become-pass --tags boot_teardown" -* If you have provisioned more than the minimum number of nodes for your installation, add them to the - respective list found in roles/teardown_vms/tasks/main.yaml. -* Once you run the full teardown, to start the main.yaml playbook back from that point, run: - "run ansible-playbook main.yaml --ask-become-pass --tags "bastionvm,bastion,create_nodes" -* Once you run the partial teardown, to start the main.yaml playbook back from that point, run main.yaml with "--tags 'getocp,create_nodes'" +* If you would like to teardown your VMs, first determine whether you would like to do a full or partial teardown, specified below. +* Full: To teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full_teardown". Start back again from the beginning by running "ansible-playbook main.yaml --ask-become-pass" +* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial_teardown". To start the main.yaml playbook back from that point, run main.yaml with "--tags 'get_ocp,create_nodes,verification'" ## Tags -If the process fails in error, you should be able to run the same shell command to start the process from the top. To be more selective with what parts of playbooks run, use tags. To determine what you part of a playbook or role you would like to run, open the file (either main.yaml or a role/tasks/main.yaml file) and look at the "tags: " section for a task and then use those tags when running the main playbook (examples below). +* To be more selective with what parts of playbooks run, use tags. +* This is especially helpful for troubleshooting. +* To determine what you part of a playbook you would like to run, check the list below. Tags match their corresponding roles. There are also some tags like "bastion" that cover multiple roles. To see these tags, see the [main playbook](main.yaml). * Examples: -* ansible-playbook main.yaml --ask-become-pass --tags getocp (for one tag), or -* ansible-playbook main.yaml --ask-become-pass --tags 'bastion,get-ocp' (for multiple tags) +* "ansible-playbook main.yaml --ask-become-pass --tags get_ocp" (for one tag), or +* "ansible-playbook main.yaml --ask-become-pass --tags 'bastion,get_ocp'" (for multiple tags) List of Tags (in alphabetical order): * approve_certs = Tasks for approve_certs role +* attach_subscription = Auto-attach Red Hat subscription role * bastion = Configuration of bastion -* bastionvm = Creation of Bastion KVM guest -* bootstrap = Creation of Boostrap KVM guest -* boot_teardown = Use with teardown.yaml to bring down the bootstrap * check_nodes = Tasks for check_nodes role * check_dns = Check DNS resolution +* check_ssh = Check SSH role * compute = Creation of the compute nodes * control = Creation of the control nodes +* create_bastion = Creation of bastion KVM guest +* create_bootstrap = Creation of boostrap KVM guest * create_nodes = Second set of KVM host's plays * dns = Configuration of DNS server on bastion -* firewall = Configuration of firewall * full_teardown = Use with teardown.yaml to bring down all KVM guests -* get-ocp = Prepare bastion for OCP +* get_ocp = Prepare bastion for installing OpenShift * haproxy = Configuration of load balancer on bastion * httpd = Configuration of Apache server on bastion -* ssh-keygen = SSH key configuration and testing +* install_packages = Install and update packages * kvm_host = All KVM host tasks * kvm_prep = First set of KVM host's tasks * localhost = Tasks that apply to the local machine running Ansible +* prep_kvm_guest = Get Red Hat CoreOS kernel and initramfs on host * partial_teardown = Use with teardown.yaml to bring down all VMs except bastion -* pkg = Install and update packages -* selinux = Tasks related to SELinux settings +* set_selinux_permissive = Tasks related to SELinux settings +* set_firewall = Configuration of firewall * setup = First set of setup tasks on the localhost * ssh = All SSH tasks -* ssh-agent = Setting up SSH agent -* ssh-copy-id = Copying SSH key to target -* subscription = Attach Red Hat Subscription +* ssh_agent = Setting up SSH agent +* ssh_copy_id = Copying SSH key to target +* ssh_key_gen = Ansible SSH keypair creation +* ssh_ocp_key_gen = Generate SSH key pair for OpenShift on bastion * verification = All OpenShift cluster verification tasks * wait_for_bootstrap = Tasks for to wait_for_bootstrap role * wait_for_cluster_operators = Tasks for wait_for_cluster_operators From 7d65fc4cfb905c8a3dde73d8c54b4cbf1aebdf58 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 9 Dec 2021 03:01:25 -0600 Subject: [PATCH 547/885] Removed the option to name the Ansible SSH key, so removed its variable placement in ansible.cfg for static filename. Signed-off-by: Jacob Emery --- ansible.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/ansible.cfg b/ansible.cfg index 80a4489f..d432c9f8 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,4 +1,5 @@ [defaults] +private_key_file=~/.ssh/ansible inventory=inventory [inventory] From 9448bdbbe97defbc9ba7b5f1df65ff471ac5663d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 9 Dec 2021 03:02:02 -0600 Subject: [PATCH 548/885] Completely reworked variables structure to support scaling of nodes Signed-off-by: Jacob Emery --- env.yaml | 219 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 114 insertions(+), 105 deletions(-) diff --git a/env.yaml b/env.yaml index 8181640f..36ee5b0b 100644 --- a/env.yaml +++ b/env.yaml @@ -1,114 +1,123 @@ -# Red Hat account with license to regsiter VMs. -# If you do not want to automatically attach RHEL subscription: -# comment out env_rh_username and env_rh_passwd variables below, and -# change auto_attach_rhel_sub variable to False. -# Note: If no subscription is attached manually, an error will occur when the install_packages role runs. -env_rh_username: -env_rh_passwd: -auto_attach_rhel_sub: true # make sure to also comment out the above variables if false +# The home for all your variables. The single source of truth for your specific installation. +# Variables with a #X need to be filled in. +# This is the most important step in the process, pelase take your time to make sure these are set correctly. +# A note on YAML: only the lowest level variables in a hierarchy need to be filled out. (i.e. below, don't put anything after "redhat", but do fill in "username") -# To populate OpenShift install config file. -env_apiVersion: v1 -env_baseDomain: -env_hyperthreading_compute: Enabled -env_compute_name: compute -env_compute_count: 2 -env_compute_arch: s390x -env_hyperthreading_control: Enabled -env_control_name: control -env_control_count: 3 -env_control_arch: s390x -env_metadata_name: -env_cidr: 10.128.0.0/14 -env_host_prefix: 23 -env_network_type: OpenShiftSDN -env_service_network: 172.30.0.0/16 -env_fips: "false" # "true" or "false" (include quotes) -env_pullSecret: '' #paste it into these single quotes -# OpenShift SSH key is generated via Ansible in ssh_ocp_key_gen role +env: + redhat: + attach_subscription: true + username: #X + password: #X + path_to_qcow2: #Absolute path to RHEL qcow2 file on workstation running Ansible, i.e. /Users/username/Downloads (If unclear, see README step 2) + install_config: + api_version: v1 + metadata_name: #X #Will be combined with base_domain to create FQDNs + base_domain: sanfran.pbm.ihost.com #X + compute: + replicas: #X + architecture: s390x + hyperthreading: Enabled + control: + replicas: #X + architecture: s390x + hyperthreading: Enabled + cluster_network: + cidr: 10.128.0.0/14 + host_prefix: 23 + type: OpenShiftSDN + service_network: 172.30.0.0/16 + fips: "false" # "true" or "false" (include quotes) + pull_secret: '#X' #paste it into these single quotes + #OpenShift SSH key is generated via Ansible in ssh_ocp_key_gen role +# IP addresses for the nodes that Ansible will be run against. +# This will automatically fill out the inventory file when setup.yaml is run. + ip: + kvm: #X + bastion: #X + bootstrap: #X + control: + - #X + - #X + - #X + compute: + - #X + - #X -# Path on local workstation to RHEL KVM Guest Image. -# Please ensure it is for System Z s390x architecture. -# See README Step 2. -env_rhel_qcow2: + hostname: + kvm: #X + bastion: #X + bootstrap: #X + control: + - #X + - #X + - #X + compute: + - #X + - #X -env_ip_kvm_host: -env_ip_bastion: -env_ip_bootstrap: -env_ip_control_0: -env_ip_control_1: -env_ip_control_2: -env_ip_compute_0: -env_ip_compute_1: +#Packages to be installed on the KVM host and bastion. Feel free to add more as needed. + pkgs: + kvm: ['@server-product-environment','@hardware-monitoring','@network-file-system-client','@remote-system-management', + '@headless-management','@system-tools','libvirt-devel','libvirt-daemon-kvm','qemu-kvm','virt-manager','genisoimage', + 'libvirt-daemon-config-network','libvirt-client','qemu-img','virt-install','virt-viewer','libvirt-daemon-kvm','libvirt'] + bastion: ['haproxy','httpd','bind','bind-utils','expect','firewalld','mod_ssl'] -# Node names, leave as is or change to custom names. -env_kvm_host_name: kvm_host -env_bastion_name: bastion -env_bootstrap_name: bootstrap -env_control_0_name: control-0 -env_control_1_name: control-1 -env_control_2_name: control-2 -env_compute_0_name: compute-0 -env_compute_1_name: compute-1 + networking: + interface_name: #X #KVM network interface name: i.e. enc1 + gateway: #X + netmask: #X + dns: + setup_on_bastion: true #Set to false if you do not want to setup a DNS server on the bastion because you already have a DNS server elsewhere. + nameserver: #X #If above variable is true, then this variable should be the same as env.ip.bastion above. + forwarder: 8.8.8.8 -# Networking -env_dns_nameserver: #If you are using the bastion as a DNS server (and have set the below variable env_dns_on_bastion to true) then this variable should be the same as env_ip_bastion. -env_default_gateway: -env_netmask: -env_net_int_name: #KVM network interface name: i.e. enc1 -env_dns_forwarder: 8.8.8.8 #Upstream DNS server, can use 8.8.8.8 as a default -env_dns_on_bastion: true #Set to false if you do not want to setup a DNS server on the bastion because you already have a DNS server. +#To create user on bastion, create and copy ssh keys + access: + login: + bastion: + username: #X + password: #X + root_password: #X + kvm: + root_password: #X + ssh: + ansible: + comment: "" + ocp: + comment: "" -# Username and password for user on VMs -env_vm_uid: -env_vm_passwd: - -# Root password for VMs -env_vm_root_passwd: - -# Ansible passwordless SSH setup. Pre-filled with recommended values. -# Ansible ssh key pair filename (don't include file extension) -env_ssh_ans_name: ansible -# Ansible SSH password. Keep quotes. Strongly recommended to leave as is (no password). -env_ssh_ans_pass: "" - -# OpenShift cluster's SSH key comment. Keep quotes. Can leave as is (no comment). -env_ssh_ocp_comm: "" - -# Bastion configuration. Pre-filled values are minimum requirements. -env_bastion_disk_size: 30 -env_bastion_ram: 4096 -env_bastion_vcpu: 4 -env_bastion_os_variant: 8.4 #RHEL version. Make sure this matches the version you copied for RHEL iso. - -# Bootstrap node configuration. Pre-filled values are minimum requirements. -env_boot_disk_size: 100 #120 recommended -env_boot_ram: 16384 -env_boot_cpu: host -env_boot_vcpu: 4 -env_boot_os_variant: 8.4 #RH CoreOS version. Make sure this matches the version you copied for RHEL iso. - -# Control node configuration. Pre-filled values are minimum requirements. -env_cont_disk_size: 100 #120 recommended -env_cont_ram: 16384 -env_cont_cpu: host -env_cont_vcpu: 4 #8 recommended -env_cont_os_variant: 8.4 #RH CoreOS version. Make sure this matches the version you copied for RHEL iso. - -# Compute node configuration. Pre-filled values are minimum requirements. -env_comp_disk_size: 100 #120 recommended -env_comp_ram: 8192 -env_comp_cpu: host -env_comp_vcpu: 2 #6 recommended -env_comp_os_variant: 8.4 #RH CoreOS version. Make sure this matches the version you copied for RHEL iso +#Pre-filled values are minimum requirements for nodes. + node_resources: + bastion: + disk_size: 30 + ram: 4096 + vcpu: 4 + os_variant: 8.4 + bootstrap: + disk_size: 120 + ram: 16384 + vcpu: 4 + os_variant: 8.4 + control: + disk_size: 120 + ram: 16384 + vcpu: 4 #8 recommended + os_variant: 8.4 + compute: + disk_size: 120 + ram: 8192 + vcpu: 2 #6 recommended + os_variant: 8.4 # If you would like to download the latest stable version of OpenShift, leave as is. -# Otherwise, replace these links with preferred versions. Used in get-ocp role. -env_ocp_client: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz -env_ocp_installer: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz +# Otherwise, replace these links with preferred versions. + openshift: + client: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + installer: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz -# This version of Red Hat CoreOS works. Feel free to replace these links with preferred versions. -# Used in prep_kvm_guests and get-ocp roles. -env_rhcos_kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-kernel-s390x -env_rhcos_initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-initramfs.s390x.img -env_rhcos_rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-rootfs.s390x.img \ No newline at end of file +# If you would like to use a version of CoreOS that has been tested with these playbooks, leave as is. +# Otherwise, replace these links with preferred versions. + coreos: + kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-kernel-s390x + initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-initramfs.s390x.img + rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-rootfs.s390x.img \ No newline at end of file From e31671d41d7598486d754dae368afb4bdb129c1d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 9 Dec 2021 03:05:23 -0600 Subject: [PATCH 549/885] Renamed to match underscores used in other role names Signed-off-by: Jacob Emery --- roles/get-ocp/tasks/main.yaml | 138 ------------------ .../get-ocp/templates/install-config.yaml.j2 | 26 ---- .../files/ocp_ssh_pub.yaml} | 0 roles/get_ocp/tasks/main.yaml | 122 ++++++++++++++++ .../get_ocp/templates/install-config.yaml.j2 | 26 ++++ .../tasks/main.yaml | 34 ++--- 6 files changed, 165 insertions(+), 181 deletions(-) delete mode 100644 roles/get-ocp/tasks/main.yaml delete mode 100644 roles/get-ocp/templates/install-config.yaml.j2 rename roles/{get-ocp/files/ocp_ssh_pub => get_ocp/files/ocp_ssh_pub.yaml} (100%) create mode 100644 roles/get_ocp/tasks/main.yaml create mode 100644 roles/get_ocp/templates/install-config.yaml.j2 rename roles/{ssh-ocp-key-gen => ssh_ocp_key_gen}/tasks/main.yaml (62%) diff --git a/roles/get-ocp/tasks/main.yaml b/roles/get-ocp/tasks/main.yaml deleted file mode 100644 index 4a4b9cbe..00000000 --- a/roles/get-ocp/tasks/main.yaml +++ /dev/null @@ -1,138 +0,0 @@ ---- - -- name: Load in variables from env.yaml - tags: getocp,bastion - include_vars: env.yaml - -- name: create directory bin for mirrors - tags: getocp,bastion - file: - path: /var/www/html/bin - state: directory - mode: '0755' - -- name: get rhcos rootfs - tags: getocp,bastion - get_url: - url: "{{ env_rhcos_rootfs }}" - dest: /var/www/html/bin/rhcos-live-rootfs.s390x.img - mode: '0755' - force: yes - -- name: Since ignition files deprecate after 24 hours, delete OCP download landing directory for idempotency. - tags: getocp,bastion - file: - path: /ocpinst - state: absent - -- name: create OCP download landing directory - tags: getocp,bastion - file: - path: /ocpinst/ - state: directory - -- name: Unzip OCP Client - tags: getocp,bastion - ansible.builtin.unarchive: - src: "{{ env_ocp_client }}" - dest: /ocpinst/ - remote_src: yes - -- name: Unzip OCP Installer - tags: getocp,bastion - ansible.builtin.unarchive: - src: "{{ env_ocp_installer }}" - dest: /ocpinst/ - remote_src: yes - -- name: Copy kubectl file - tags: getocp,bastion - ansible.builtin.copy: - src: /ocpinst/kubectl - dest: /usr/local/bin/kubectl - remote_src: yes - owner: root - group: root - mode: '0755' - -- name: Copy oc file - tags: getocp,bastion - ansible.builtin.copy: - src: /ocpinst/oc - dest: /usr/local/bin/oc - remote_src: yes - owner: root - group: root - mode: '0755' - -- name: Copy openshift-install file - tags: getocp,bastion - ansible.builtin.copy: - src: /ocpinst/openshift-install - dest: /usr/local/bin/openshift-install - remote_src: yes - owner: root - group: root - mode: '0755' - -- name: Fetch ssh key from bastion for use in install-config - tags: getocp,bastion - ansible.builtin.fetch: - src: ~/.ssh/id_rsa.pub - dest: roles/get-ocp/files/ocp_ssh_pub - flat: yes - -- name: Use template file to create install-config - tags: setup,getocp - template: - src: install-config.yaml.j2 - dest: /ocpinst/install-config.yaml - force: yes - backup: yes - -- name: Create Manifests - tags: getocp,bastion - command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ - become: yes - -- name: Set mastersSchedulable parameter to False - tags: getocp,bastion - replace: - path: /ocpinst/manifests/cluster-scheduler-02-config.yml - regexp: ': true' - replace: ': false' - -- name: Create Ignition files - tags: getocp,bastion - command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ - become: yes - -- name: create Ignition directory on webserver - tags: getocp,bastion - file: - path: /var/www/html/ignition - state: directory - -- name: Copy bootstrap Ignition file to web server - tags: getocp,bastion - copy: - src: /ocpinst/bootstrap.ign - dest: /var/www/html/ignition - remote_src: yes - mode: '775' - -- name: Copy control plane Ignition file to web server - tags: getocp,bastion - copy: - src: /ocpinst/master.ign - dest: /var/www/html/ignition - remote_src: yes - mode: '775' - -- name: Copy worker Ignition file to web server - tags: getocp,bastion - copy: - src: /ocpinst/worker.ign - dest: /var/www/html/ignition - remote_src: yes - mode: '775' diff --git a/roles/get-ocp/templates/install-config.yaml.j2 b/roles/get-ocp/templates/install-config.yaml.j2 deleted file mode 100644 index 29b0b930..00000000 --- a/roles/get-ocp/templates/install-config.yaml.j2 +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -baseDomain: {{ env_baseDomain }} -compute: -- hyperthreading: Enabled - name: worker - replicas: 2 - architecture: {{ env_compute_arch | default(s390x) }} -controlPlane: - hyperthreading: Enabled - name: master - replicas: {{ env_control_count | default(3) }} - architecture: {{ env_control_arch | default(s390x) }} -metadata: - name: {{ env_metadata_name }} -networking: - clusterNetwork: - - cidr: {{ env_cidr | default("10.128.0.0/14") }} - hostPrefix: {{ env_host_prefix | default(23) }} - networkType: {{ env_network_type | default(OpenShiftSDN) }} - serviceNetwork: - - {{ env_service_network | default("172.30.0.0/16") }} -platform: - none: {} -fips: {{ env_fips | default(false) }} -pullSecret: '{{ env_pullSecret }}' -sshKey: '{{ lookup('file', 'roles/get-ocp/files/ocp_ssh_pub') }}' \ No newline at end of file diff --git a/roles/get-ocp/files/ocp_ssh_pub b/roles/get_ocp/files/ocp_ssh_pub.yaml similarity index 100% rename from roles/get-ocp/files/ocp_ssh_pub rename to roles/get_ocp/files/ocp_ssh_pub.yaml diff --git a/roles/get_ocp/tasks/main.yaml b/roles/get_ocp/tasks/main.yaml new file mode 100644 index 00000000..fbcaac97 --- /dev/null +++ b/roles/get_ocp/tasks/main.yaml @@ -0,0 +1,122 @@ +--- + +- name: Load in variables from env.yaml + tags: get_ocp + include_vars: env.yaml + +- name: Create directory bin for mirrors + tags: get_ocp + file: + path: /var/www/html/bin + state: directory + mode: '0755' + +- name: Check to see if rootfs already exists on bastion + tags: get_ocp + stat: + path: /var/www/html/bin/rhcos-live-rootfs.s390x.img + register: rootfs_check + +- name: Get Red Hat CoreOS rootfs file + tags: get_ocp + get_url: + url: "{{ env.coreos.rootfs }}" + dest: /var/www/html/bin/rhcos-live-rootfs.s390x.img + mode: '0755' + force: yes + when: rootfs_check.stat.exists == false + +- name: Delete OCP download directory for idempotency, because ignition files deprecate after 24 hours. + tags: get_ocp + file: + path: /ocpinst + state: absent + +- name: Create OCP download directory + tags: get_ocp + file: + path: /ocpinst/ + state: directory + +- name: Unzip OCP client and installer + tags: get_ocp + ansible.builtin.unarchive: + src: "{{ item }}" + dest: /ocpinst/ + remote_src: yes + loop: + - "{{ env.openshift.client }}" + - "{{ env.openshift.installer }}" + +- name: Copy kubectl and oc files to bastion + tags: get_ocp + ansible.builtin.copy: + src: /ocpinst/{{item}} + dest: /usr/local/bin/{{item}} + remote_src: yes + owner: root + group: root + mode: '0755' + loop: + - kubectl + - oc + +- name: Copy openshift-install file to bastion + tags: get_ocp + ansible.builtin.copy: + src: /ocpinst/openshift-install + dest: /usr/local/bin/openshift-install + remote_src: yes + owner: root + group: root + mode: '0755' + +- name: Fetch SSH key from bastion for use in template + tags: getocp,bastion + ansible.builtin.fetch: + src: ~/.ssh/id_rsa.pub + dest: roles/get_ocp/files/ocp_ssh_pub.yaml + flat: yes + +- name: Use template file to create install-config + tags: get_ocp + template: + src: install-config.yaml.j2 + dest: /ocpinst/install-config.yaml + force: yes + backup: yes + +- name: Create manifests + tags: get_ocp + command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ + become: yes + +- name: Set masters schedulable parameter to false + tags: get_ocp + replace: + path: /ocpinst/manifests/cluster-scheduler-02-config.yml + regexp: ': true' + replace: ': false' + +- name: Create ignition files + tags: get_ocp + command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ + become: yes + +- name: create ignition directory on webserver + tags: get_ocp + file: + path: /var/www/html/ignition + state: directory + +- name: Copy ignition files to web server + tags: get_ocp + copy: + src: /ocpinst/{{ item }}.ign + dest: /var/www/html/ignition + remote_src: yes + mode: '775' + loop: + - bootstrap + - master + - worker \ No newline at end of file diff --git a/roles/get_ocp/templates/install-config.yaml.j2 b/roles/get_ocp/templates/install-config.yaml.j2 new file mode 100644 index 00000000..0dfb74bb --- /dev/null +++ b/roles/get_ocp/templates/install-config.yaml.j2 @@ -0,0 +1,26 @@ +apiVersion: {{ env.install_config.api_version }} +baseDomain: {{ env.install_config.base_domain }} +compute: +- hyperthreading: {{ env.install_config.compute.hyperthreading }} + name: worker + replicas: {{ env.install_config.compute.replicas }} + architecture: {{ env.install_config.compute.architecture }} +controlPlane: + hyperthreading: {{ env.install_config.control.hyperthreading }} + name: master + replicas: {{ env.install_config.control.replicas }} + architecture: {{ env.install_config.control.architecture }} +metadata: + name: {{ env.install_config.metadata_name }} +networking: + clusterNetwork: + - cidr: {{ env.install_config.cluster_network.cidr }} + hostPrefix: {{ env.install_config.cluster_network.host_prefix }} + networkType: {{ env.install_config.cluster_network.type }} + serviceNetwork: + - {{ env.install_config.service_network }} +platform: + none: {} +fips: {{ env.install_config.fips }} +pullSecret: '{{ env.install_config.pull_secret }}' +sshKey: '{{ lookup('file', 'roles/get_ocp/files/ocp_ssh_pub.yaml') }}' \ No newline at end of file diff --git a/roles/ssh-ocp-key-gen/tasks/main.yaml b/roles/ssh_ocp_key_gen/tasks/main.yaml similarity index 62% rename from roles/ssh-ocp-key-gen/tasks/main.yaml rename to roles/ssh_ocp_key_gen/tasks/main.yaml index 916b0c63..33485989 100644 --- a/roles/ssh-ocp-key-gen/tasks/main.yaml +++ b/roles/ssh_ocp_key_gen/tasks/main.yaml @@ -1,22 +1,22 @@ --- - name: Load in variables from env.yaml - tags: keymastr,getocp + tags: ssh_ocp_key_gen, ssh include_vars: env.yaml -- name: Check to see if local .ssh directory exists - tags: keymastr, getocp +- name: Check to see if local SSH directory exists + tags: ssh_ocp_key_gen, ssh stat: path: ~/.ssh register: ssh_directory_exists_check -- name: Print results of .ssh directory check - tags: keymastr,bastion,getocp +- name: Print results of SSH directory check + tags: ssh_ocp_key_gen, ssh debug: var: ssh_directory_exists_check -- name: Create .ssh local directory if it doesn't already exist - tags: keymastr,getocp +- name: Create SSH local directory if it doesn't already exist + tags: ssh_ocp_key_gen, ssh file: path: ~/.ssh state: directory @@ -24,13 +24,13 @@ register: ssh_directory_creation when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false -- name: Print results of ssh directory creation - tags: keymastr,getocp +- name: Print results of SSH directory creation + tags: ssh_ocp_key_gen, ssh debug: var: ssh_directory_creation -- name: Check .ssh key pair files exist - tags: keymastr,getocp +- name: Check SSH key pair files exist + tags: ssh_ocp_key_gen, ssh stat: path: ~/.ssh/{{item}} register: ssh_key_file_exists_check @@ -38,25 +38,25 @@ - "id_rsa" - "id_rsa.pub" -- name: Print results of ssh key pair files check - tags: keymastr,getocp +- name: Print results of SSH key pair files check + tags: ssh_ocp_key_gen, ssh debug: var: ssh_key_file_exists_check.results[1].stat.exists - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key if it doesn't exist already - tags: keymastr,getocp + tags: ssh_ocp_key_gen, ssh community.crypto.openssh_keypair: path: ~/.ssh/id_rsa backend: opensshbin owner: root passphrase: "" - comment: "{{ env_ssh_ocp_comm }}" + comment: "{{ env.access.ssh.ocp.comment }}" regenerate: full_idempotence register: ssh_ocp when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false -- name: Print results of ssh key generation - tags: keymastr,getocp +- name: Print results of SSH key generation + tags: ssh_ocp_key_gen, ssh debug: var: ssh_ocp.public_key when: ssh_ocp.changed == true \ No newline at end of file From a240b462b94c6f4610d6d16b08ba41b6ff45b239 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 9 Dec 2021 03:07:18 -0600 Subject: [PATCH 550/885] Changed variable references to match env.yaml and added functionality to support scaling of nodes Signed-off-by: Jacob Emery --- main.yaml | 21 ++-- roles/approve_certs/tasks/main.yaml | 18 ++-- roles/attach_subscription/tasks/main.yaml | 8 +- roles/check_dns/tasks/main.yaml | 64 +++++++---- roles/check_nodes/tasks/main.yaml | 2 +- roles/check_ssh/tasks/main.yaml | 8 +- roles/create_bastion/tasks/main.yaml | 102 +++++++++--------- .../templates/cloud_init.cfg.j2 | 20 ++-- .../templates/network_config_static.cfg.j2 | 8 +- roles/create_bootstrap/tasks/main.yaml | 30 +++--- roles/create_compute_nodes/tasks/main.yaml | 71 +++--------- roles/create_control_nodes/tasks/main.yaml | 102 +++--------------- roles/dns/tasks/main.yaml | 85 +++++++++++---- roles/dns/templates/dns-named.conf.j2 | 8 +- roles/dns/templates/dns.db.j2 | 23 ++-- roles/dns/templates/dns.rev.j2 | 19 ++-- roles/haproxy/tasks/main.yaml | 61 +++++++++-- roles/haproxy/templates/haproxy.cfg.j2 | 22 ++-- roles/httpd/tasks/main.yaml | 6 +- roles/install_packages/tasks/main.yaml | 6 +- roles/macvtap/tasks/main.yaml | 6 +- roles/macvtap/templates/macvtap.xml.j2 | 4 +- roles/prep_kvm_guests/tasks/main.yaml | 28 +++-- roles/reset_files/tasks/main.yaml | 4 +- roles/set_firewall/tasks/main.yaml | 12 +-- roles/set_inventory/tasks/main.yaml | 50 +++++---- roles/ssh_agent/tasks/main.yaml | 12 +-- roles/ssh_copy_id/tasks/main.yaml | 38 +++---- .../ssh_copy_id/templates/ssh-copy-id.exp.j2 | 5 +- roles/ssh_key_gen/tasks/main.yaml | 51 +++++---- roles/teardown_vms/tasks/main.yaml | 64 +++++++++-- roles/wait_for_bootstrap/tasks/main.yaml | 10 +- .../tasks/main.yaml | 54 +++++++++- .../wait_for_install_complete/tasks/main.yaml | 2 +- teardown.yaml | 80 +++++++------- 35 files changed, 585 insertions(+), 519 deletions(-) diff --git a/main.yaml b/main.yaml index 0bcfdda8..6eb63bdc 100644 --- a/main.yaml +++ b/main.yaml @@ -8,7 +8,7 @@ vars_files: - env.yaml vars: - - ssh_target_ip: "{{ env_ip_kvm_host }}" + - ssh_target_ip: "{{ env.ip.kvm }}" roles: - ssh_key_gen - ssh_copy_id @@ -19,16 +19,13 @@ become: true vars_files: - env.yaml - vars: # feel free to add more packages as needed - - packages: ['@server-product-environment','@hardware-monitoring','@network-file-system-client','@remote-system-management', - '@headless-management','@system-tools','libvirt-devel','libvirt-daemon-kvm','qemu-kvm','virt-manager','genisoimage', - 'libvirt-daemon-config-network','libvirt-client','qemu-img','virt-install','virt-viewer','libvirt-daemon-kvm','libvirt'] + vars: + - packages: "{{ env.pkgs.kvm}}" roles: - check_ssh - attach_subscription - install_packages - set_selinux_permissive - - enable_packages - macvtap - create_bastion @@ -40,7 +37,7 @@ vars_files: - env.yaml vars: - - ssh_target_ip: "{{ env_ip_bastion }}" + - ssh_target_ip: "{{ env.ip.bastion }}" roles: - ssh_copy_id @@ -49,20 +46,20 @@ become: true vars_files: - env.yaml - vars: # feel free to add more packages as needed - - packages: ['haproxy','httpd','bind','bind-utils','expect','firewalld','mod_ssl'] + vars: + - packages: "{{env.pkgs.bastion}}" roles: - check_ssh - attach_subscription - install_packages - - ssh-ocp-key-gen # SSH key for bastion to connect to nodes + - ssh_ocp_key_gen - set_selinux_permissive - set_firewall - #- dns + - dns - check_dns - haproxy - httpd - - get-ocp + - get_ocp - hosts: kvm_host tags: kvm_host, create_nodes diff --git a/roles/approve_certs/tasks/main.yaml b/roles/approve_certs/tasks/main.yaml index 0cc149e6..32024767 100644 --- a/roles/approve_certs/tasks/main.yaml +++ b/roles/approve_certs/tasks/main.yaml @@ -9,12 +9,12 @@ register: csr_approved_1 ignore_errors: yes -- name: viewing first csr approved +- name: Viewing first csr approved tags: approve_certs debug: msg: "{{csr_approved_1.stdout_lines}}" -- name: pause to let new certificates needing approval to generate +- name: Pause to let new certificates needing approval to generate tags: approve_certs pause: seconds: 30 @@ -28,12 +28,12 @@ register: csr_approved_2 ignore_errors: yes -- name: viewing second csr approved +- name: Viewing second csr approved tags: approve_certs debug: msg: "{{csr_approved_2.stdout_lines}}" -- name: pause to let new certificates needing approval to generate +- name: Pause to let new certificates needing approval to generate tags: approve_certs pause: seconds: 30 @@ -47,12 +47,12 @@ register: csr_approved_3 ignore_errors: yes -- name: viewing third csr approved +- name: Viewing third csr approved tags: approve_certs debug: msg: "{{csr_approved_3.stdout_lines}}" -- name: pause to let new certificates needing approval to generate +- name: Pause to let new certificates needing approval to generate tags: approve_certs pause: seconds: 30 @@ -66,12 +66,12 @@ register: csr_approved_4 ignore_errors: yes -- name: viewing fourth csr approved +- name: Viewing fourth csr approved tags: approve_certs debug: msg: "{{csr_approved_4.stdout_lines}}" -- name: pause to let new certificates needing approval to generate +- name: Pause to let new certificates needing approval to generate tags: approve_certs pause: seconds: 30 @@ -85,7 +85,7 @@ register: csr_approved_5 ignore_errors: yes -- name: viewing fifth csr approved +- name: Viewing fifth csr approved tags: approve_certs debug: msg: "{{csr_approved_4.stdout_lines}}" \ No newline at end of file diff --git a/roles/attach_subscription/tasks/main.yaml b/roles/attach_subscription/tasks/main.yaml index a0421602..05946d12 100644 --- a/roles/attach_subscription/tasks/main.yaml +++ b/roles/attach_subscription/tasks/main.yaml @@ -1,10 +1,10 @@ --- -- name: Attach RHEL subscription - tags: subscription +- name: Auto attach RHEL subscription + tags: attach_subscription community.general.redhat_subscription: state: present - username: "{{env_rh_username}}" - password: "{{env_rh_passwd}}" + username: "{{env.redhat.username}}" + password: "{{env.redhat.password}}" auto_attach: yes force_register: yes \ No newline at end of file diff --git a/roles/check_dns/tasks/main.yaml b/roles/check_dns/tasks/main.yaml index f1fd58e6..7e6a9245 100644 --- a/roles/check_dns/tasks/main.yaml +++ b/roles/check_dns/tasks/main.yaml @@ -1,35 +1,55 @@ --- -- name: create list of IP addresses from env.yaml - tags: check_dns - set_fact: - ip_from_env: ['{{ env_ip_bastion }}','{{ env_ip_bootstrap }}','{{ env_ip_control_0 }}','{{ env_ip_control_1 }}','{{ env_ip_control_2 }}','{{ env_ip_compute_0 }}','{{ env_ip_compute_1 }}','{{ env_ip_bastion }}','{{ env_ip_bastion }}','{{ env_ip_bastion }}'] +- name: Load in variables from env.yaml + tags: check_dns,dns + include_vars: env.yaml -- name: check internal cluster DNS resolution +- name: Check internal cluster DNS resolution for bastion and its services tags: check_dns,dns shell: "dig +short {{ item }} | tail -n1" loop: - - "{{ env_bastion_name }}.{{ env_baseDomain }}" - - "{{ env_bootstrap_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" - - "{{ env_control_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" - - "{{ env_control_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" - - "{{ env_control_2_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" - - "{{ env_compute_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" - - "{{ env_compute_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}" - - "api.{{ env_metadata_name }}.{{ env_baseDomain }}" - - "api-int.{{ env_metadata_name }}.{{ env_baseDomain }}" - - "test.apps.{{ env_metadata_name }}.{{ env_baseDomain }}" - register: command_result - failed_when: ip_from_env[i] != command_result.stdout + - "{{ env.hostname.bastion }}.{{ env.install_config.base_domain }}" + - "api.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}" + - "api-int.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}" + - "test.apps.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}" + register: bastion_lookup + failed_when: env.ip.bastion != bastion_lookup.stdout + +- name: Check internal cluster DNS resolution for bootstrap + tags: check_dns,dns + shell: "dig +short {{ env.hostname.bootstrap }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }} | tail -n1" + register: bootstrap_lookup + failed_when: env.ip.bootstrap != bootstrap_lookup.stdout + +- name: Print results from bootstrap lookup + tags: check_dns, dns + debug: + var: bootstrap_lookup.stdout + +- name: Check control nodes DNS resolution + tags: check_dns,dns + shell: "dig +short {{ env.hostname.control[i] }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }} | tail -n1" + register: control_lookup + failed_when: env.ip.control[i] != control_lookup.stdout + with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 loop_control: extended: yes index_var: i - -- name: check external DNS resolution from DNS forwarder + +- name: Check compute nodes DNS resolution + tags: check_dns,dns + shell: "dig +short {{ env.hostname.compute[i] }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }} | tail -n1" + register: compute_lookup + failed_when: env.ip.compute[i] != compute_lookup.stdout + with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Check external DNS resolution from DNS forwarder tags: check_dns,dns - register: command_result - # fail step if the output contains "server can't find" - failed_when: '"server can" in command_result.stdout' + register: external_dns_check + failed_when: '"server can" in external_dns_check.stdout' command: "nslookup {{ item }}" loop: - www.google.com diff --git a/roles/check_nodes/tasks/main.yaml b/roles/check_nodes/tasks/main.yaml index 070e8415..b8cfbdaf 100644 --- a/roles/check_nodes/tasks/main.yaml +++ b/roles/check_nodes/tasks/main.yaml @@ -7,7 +7,7 @@ shell: oc get nodes | awk '{print $1, $2}' register: oc_get_nodes - - name: print nodes status + - name: Print nodes status tags: check_nodes debug: var: oc_get_nodes.stdout_lines diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml index ce7d56bf..ebb2d22f 100644 --- a/roles/check_ssh/tasks/main.yaml +++ b/roles/check_ssh/tasks/main.yaml @@ -1,12 +1,12 @@ --- -- name: check ssh to remote hosts works - tags: keymastr +- name: Check SSH to remote hosts works + tags: check_ssh, ssh shell: "hostname; id" register: ssh_connection_test failed_when: ssh_connection_test.rc != 0 -- name: print the connectivity test results - tags: keymastr +- name: Print the connectivity test results + tags: check_ssh, ssh debug: var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index dd47b2c1..b23046e6 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -1,128 +1,130 @@ --- - name: Load in variables from env.yaml - tags: kvm_host, bastionvm + tags: create_bastion include_vars: env.yaml -- name: enable cockpit console - tags: kvm_host, bastionvm +- name: Enable cockpit console + tags: create_bastion command: systemctl enable --now cockpit.socket -- name: remove working directory for idempotency - tags: kvm_host, bastionvm +- name: Remove working directory for idempotency + tags: create_bastion file: path: /var/lib/libvirt/images/tmp state: absent -- name: create working directory - tags: kvm_host, bastionvm +- name: Create working directory + tags: create_bastion file: path: /var/lib/libvirt/images/tmp state: directory mode: '0755' -- name: check to see if qcow2 file already exists on KVM host - tags: bastionvm +- name: Check to see if qcow2 file already exists on KVM host + tags: create_bastion stat: path: /var/lib/libvirt/images/bastion_base.qcow2 register: qcow2_check -- name: copy RHEL qcow2 file to KVM host. This may take a while. - tags: kvm_host, bastionvm +- name: Copy RHEL qcow2 file to KVM host. This may take a while. + tags: create_bastion copy: - src: "{{ env_rhel_qcow2 }}" + src: "{{ env.redhat.path_to_qcow2 }}" dest: /var/lib/libvirt/images/bastion_base.qcow2 mode: '0775' when: qcow2_check.stat.exists == false register: rhel_qcow2_download -- name: remove snapshot for idempotency - tags: kvm_host, bastionvm +- name: Remove snapshot for idempotency + tags: create_bastion file: - path: /var/lib/libvirt/images/{{env_bastion_name}}-snapshot-cloudimg.qcow2 + path: /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2 state: absent -- name: create base image - tags: kvm_host, bastionvm - command: "qemu-img create -b /var/lib/libvirt/images/bastion_base.qcow2 -f qcow2 /var/lib/libvirt/images/{{env_bastion_name}}-snapshot-cloudimg.qcow2 {{env_bastion_disk_size}}G" +- name: Create base image + tags: create_bastion + command: "qemu-img create -b /var/lib/libvirt/images/bastion_base.qcow2 -f qcow2 /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2 {{env.node_resources.bastion.disk_size}}G" register: qemu_create -- name: print result of creation of base image - tags: kvm_host, bastionvm +- name: Print result of creation of base image + tags: create_bastion debug: var: qemu_create -- name: get info about qemu image creation - tags: kvm_host, bastionvm - command: "qemu-img info /var/lib/libvirt/images/{{env_bastion_name}}-snapshot-cloudimg.qcow2" +- name: Get info about qemu image creation + tags: create_bastion + command: "qemu-img info /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2" register: qemu_info -- name: print output from qemu image creation information - tags: kvm_host, bastionvm +- name: Print output from qemu image creation information + tags: create_bastion debug: var: qemu_info -- name: create instance-id - tags: kvm_host, bastionvm +- name: Create instance-id + tags: create_bastion shell: "echo \"instance-id: $(uuidgen || echo i-abcdefg)\" > /var/lib/libvirt/images/tmp/meta-data" register: uuidgen -- name: print output from uuidgen command - tags: kvm_host, bastionvm +- name: Print output from uuidgen command + tags: create_bastion debug: var: uuidgen - name: Use cloud_init.cfg.j2 template to make user-data file - tags: kvm_host, bastionvm + tags: create_bastion template: src: cloud_init.cfg.j2 dest: /var/lib/libvirt/images/tmp/user-data - name: Use network_config_static.cfg.j2 template to make network-config file - tags: kvm_host, bastionvm + tags: create_bastion template: src: network_config_static.cfg.j2 dest: /var/lib/libvirt/images/tmp/network-config -- name: generate iso file - tags: kvm_host, bastionvm - command: genisoimage -output /var/lib/libvirt/images/{{env_bastion_name}}-seed.img -volid cidata -joliet -rock /var/lib/libvirt/images/tmp/meta-data /var/lib/libvirt/images/tmp/network-config /var/lib/libvirt/images/tmp/user-data +- name: Generate iso file + tags: create_bastion + command: genisoimage -output /var/lib/libvirt/images/{{env.hostname.bastion}}-seed.img -volid cidata -joliet -rock /var/lib/libvirt/images/tmp/meta-data /var/lib/libvirt/images/tmp/network-config /var/lib/libvirt/images/tmp/user-data register: gen_iso -- name: print output from generating iso - tags: kvm_host, bastionvm +- name: Print output from generating iso + tags: create_bastion debug: var: gen_iso -- name: check if bastion already exists. Expect an ignored error if it doesn't exist. - tags: kvm_host, bastionvm +- name: Check if bastion already exists. Expect an ignored error if it doesn't exist. + tags: create_bastion community.libvirt.virt: - name: "{{ env_bastion_name }}" + name: "{{ env.hostname.bastion }}" command: status register: bastion_check ignore_errors: true - name: Boot bastion - tags: kvm_host, bastionvm + tags: create_bastion command: virt-install - --name {{env_bastion_name}} \ - --virt-type kvm --memory {{env_bastion_ram}} --vcpus {{env_bastion_vcpu}} \ + --name {{ env.hostname.bastion }} \ + --virt-type kvm \ + --memory {{ env.node_resources.bastion.ram }} \ + --vcpus {{ env.node_resources.bastion.vcpu }} \ --boot hd \ - --disk path=/var/lib/libvirt/images/{{env_bastion_name}}-seed.img,device=cdrom \ - --disk path=/var/lib/libvirt/images/{{env_bastion_name}}-snapshot-cloudimg.qcow2,device=disk \ + --disk path=/var/lib/libvirt/images/{{ env.hostname.bastion }}-seed.img,device=cdrom \ + --disk path=/var/lib/libvirt/images/{{ env.hostname.bastion }}-snapshot-cloudimg.qcow2,device=disk \ --graphics none \ - --os-type Linux --os-variant rhel{{env_bastion_os_variant}} \ + --os-type Linux --os-variant rhel{{env.node_resources.bastion.os_variant}} \ --network network=macvtap-net \ --noautoconsole \ --noreboot when: bastion_check.failed == true -- name: Start bastion VM - tags: kvm_host, bastionvm - command: virsh start {{env_bastion_name}} +- name: Restart bastion + tags: create_bastion + command: virsh start {{ env.hostname.bastion }} -- name: wait 3 minutes for automated bastion installation and configuration to complete. To monitor, use a web browser to go to https://your-kvm-host-ip-address-here:9090, sign in as root and use the password you set for env_vm_root_passwd in env.yaml, then go to the Virtual Machines tab, click on bastion's hostname. - tags: kvm_host, bastionvm +- name: Waiting 3 minutes for automated bastion installation and configuration to complete. To monitor, use a web browser to go to https://your-kvm-host-ip-address-here:9090, sign in as 'root' and use the password you set for env.access.login.kvm.root_password in env.yaml, then go to the 'Virtual Machines' tab and click on the bastion's hostname. + tags: create_bastion pause: minutes: 3 when: bastion_check.failed == true \ No newline at end of file diff --git a/roles/create_bastion/templates/cloud_init.cfg.j2 b/roles/create_bastion/templates/cloud_init.cfg.j2 index 91c2af3d..281902f2 100644 --- a/roles/create_bastion/templates/cloud_init.cfg.j2 +++ b/roles/create_bastion/templates/cloud_init.cfg.j2 @@ -1,12 +1,12 @@ #cloud-config -hostname: {{env_bastion_name}} -fqdn: {{env_bastion_name}}.{{ env_metadata_name }}.{{ env_baseDomain }} +hostname: {{env.hostname.bastion}} +fqdn: {{env.hostname.bastion}}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }} manage_etc_hosts: true users: - - name: {{ env_vm_uid }} + - name: {{ env.access.login.bastion.username }} sudo: ALL=(ALL) NOPASSWD:ALL groups: adm,sys - home: /home/{{ env_vm_uid }} + home: /home/{{ env.access.login.bastion.username }} shell: /bin/bash lock_passwd: false # allow both password auth and cert auth via ssh (console access can still login) @@ -14,18 +14,10 @@ ssh_pwauth: true disable_root: false chpasswd: list: | - root:{{ env_vm_root_passwd }} - {{ env_vm_uid }}:{{ env_vm_passwd }} + root:{{ env.access.login.bastion.root_password }} + {{ env.access.login.bastion.username }}:{{ env.access.login.bastion.username }} expire: False -# https://cloudinit.readthedocs.io/en/latest/topics/examples.html#register-redhat-subscription -# https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html/installation_and_configuration_guide/setting_up_cloud_init -# Attach Red Hat subscription -#rh_subscription: -# username: '{{ env_rh_username }}' -# password: '{{ env_rh_passwd }}' -# auto-attach: True - #growpart: # mode: auto # devices: ['/'] diff --git a/roles/create_bastion/templates/network_config_static.cfg.j2 b/roles/create_bastion/templates/network_config_static.cfg.j2 index 2b793692..5a8ff0a4 100644 --- a/roles/create_bastion/templates/network_config_static.cfg.j2 +++ b/roles/create_bastion/templates/network_config_static.cfg.j2 @@ -3,8 +3,8 @@ ethernets: eth0: dhcp4: false # default libvirt network - addresses: [ {{ env_ip_bastion }} ] - gateway4: {{ env_default_gateway }} + addresses: [ {{ env.ip.bastion }} ] + gateway4: {{ env.networking.gateway }} nameservers: - search: [ {{ env_baseDomain }} ] - addresses: [ {{ env_dns_nameserver }},{{ env_dns_forwarder }} ] \ No newline at end of file + search: [ {{ env.install_config.base_domain }} ] + addresses: [ {{ env.networking.dns.nameserver }},{{ env.networking.dns.forwarder }} ] \ No newline at end of file diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 905227e9..b6959b5f 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -1,36 +1,36 @@ --- - name: Load in variables from env.yaml - tags: bootstrap + tags: create_bootstrap include_vars: env.yaml -- name: check if bootstrap already exists - tags: bootstrap +- name: Check if bootstrap already exists + tags: create_bootstrap community.libvirt.virt: - name: "{{ env_bootstrap_name }}" + name: "{{ env.hostname.bootstrap }}" command: status register: bootstrap_check ignore_errors: yes -- name: print status of bootstrap - tags: bootstrap +- name: Print status of bootstrap + tags: create_bootstrap debug: var: bootstrap_check -- name: boot bootstrap - tags: bootstrap +- name: Start bootstrap installation + tags: create_bootstrap command: | virt-install \ - --name {{env_bootstrap_name}} \ - --disk /var/lib/libvirt/images/{{env_bootstrap_name}}-bootstrap.qcow2,size={{ env_boot_disk_size }} \ - --ram {{ env_boot_ram }} \ - --cpu {{ env_boot_cpu }} \ - --vcpus {{ env_boot_vcpu }} \ + --name {{env.hostname.bootstrap}} \ + --disk /var/lib/libvirt/images/{{env.hostname.bootstrap}}-bootstrap.qcow2,size={{ env.node_resources.bootstrap.disk_size }} \ + --ram {{ env.node_resources.bootstrap.ram }} \ + --cpu host \ + --vcpus {{ env.node_resources.bootstrap.vcpu }} \ --os-type linux \ - --os-variant rhel{{ env_boot_os_variant }} \ + --os-variant rhel{{ env.node_resources.bootstrap.os_variant }} \ --network network=macvtap-net \ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_bootstrap}}::{{env_default_gateway}}:{{env_netmask}}:{{env_bootstrap_name}}::none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/bootstrap.ign" \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.ip.bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.ip.bootstrap}}::{{env.networking.gateway}}:{{env.networking.netmask}}:{{env.hostname.bootstrap}}::none:1500 nameserver={{env.networking.dns.nameserver}} coreos.inst.ignition_url=http://{{env.ip.bastion}}:8080/ignition/bootstrap.ign" \ --graphics none \ --wait=-1 \ --noautoconsole diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index 2a0fffd1..f0e53004 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -1,67 +1,26 @@ --- - name: Load in variables from env.yaml - tags: compute + tags: create_compute_nodes include_vars: env.yaml -- name: check if compute-0 already exists - tags: compute - community.libvirt.virt: - name: "{{ env_compute_0_name }}" - command: status - register: compute_0_check - ignore_errors: yes - -- name: print status of compute-0 - tags: compute - debug: - var: compute_0_check - -- name: check if compute-1 already exists - tags: compute - community.libvirt.virt: - name: "{{ env_compute_1_name }}" - command: status - register: compute_1_check - ignore_errors: yes - -- name: print status of compute-1 - tags: compute - debug: - var: compute_1_check - -- name: install CoreOS on compute-0 node - tags: compute - command: | - virt-install \ - --name {{env_compute_0_name}} \ - --disk size={{env_comp_disk_size}} \ - --ram {{env_comp_ram}} \ - --cpu {{env_comp_cpu}} \ - --vcpus {{env_comp_vcpu}} \ - --os-type linux \ - --os-variant rhel{{env_comp_os_variant}} \ - --network network=macvtap-net \ - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_compute_0}}::{{env_default_gateway}}:{{env_netmask}}:{{env_compute_0_name}}::none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/worker.ign" \ - --wait=-1 \ - --noautoconsole - when: compute_0_check.failed == true - -- name: install CoreOS on compute-1 node - tags: compute +- name: Install CoreOS on compute nodes + tags: create_compute_nodes command: | virt-install \ - --name {{env_compute_1_name}} \ - --disk size={{env_comp_disk_size}} \ - --ram {{env_comp_ram}} \ - --cpu {{env_comp_cpu}} \ - --vcpus {{env_comp_vcpu}} \ + --name {{env.hostname.compute[i]}} \ + --disk size={{env.node_resources.compute.disk_size}} \ + --ram {{env.node_resources.compute.ram}} \ + --cpu host \ + --vcpus {{env.node_resources.compute.vcpu}} \ --os-type linux \ - --os-variant rhel{{env_comp_os_variant}} \ + --os-variant rhel{{env.node_resources.compute.os_variant}} \ --network network=macvtap-net \ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_compute_1}}::{{env_default_gateway}}:{{env_netmask}}:{{env_compute_1_name}}::none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/worker.ign" \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.ip.bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.ip.compute[i]}}::{{env.networking.gateway}}:{{env.networking.netmask}}:{{env.hostname.compute[i]}}::none:1500 nameserver={{env.networking.dns.nameserver}} coreos.inst.ignition_url=http://{{env.ip.bastion}}:8080/ignition/worker.ign" \ --wait=-1 \ - --noautoconsole - when: compute_1_check.failed == true \ No newline at end of file + --noautoconsole + with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i \ No newline at end of file diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 7a052a3c..e96e5d7d 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -1,101 +1,27 @@ --- - name: Load in variables from env.yaml - tags: control + tags: create_control_nodes include_vars: env.yaml -- name: check if control-0 already exists - tags: control - community.libvirt.virt: - name: "{{ env_control_0_name }}" - command: status - register: control_0_check - ignore_errors: yes - -- name: print status of control-0 - tags: control - debug: - var: control_0_check - -- name: check if control-1 already exists - tags: control - community.libvirt.virt: - name: "{{ env_control_1_name }}" - command: status - register: control_1_check - ignore_errors: yes - -- name: print status of control-1 - tags: control - debug: - var: control_1_check - -- name: check if control-2 already exists - tags: control - community.libvirt.virt: - name: "{{ env_control_2_name }}" - command: status - register: control_2_check - ignore_errors: yes - -- name: print status of control-2 - tags: control - debug: - var: control_2_check - -- name: install CoreOS on control-0 node - tags: control - command: | - virt-install \ - --name {{env_control_0_name}} \ - --disk size={{env_cont_disk_size}} \ - --ram {{env_cont_ram}} \ - --cpu {{env_cont_cpu}} \ - --vcpus {{env_cont_vcpu}} \ - --os-type linux \ - --os-variant rhel{{env_cont_os_variant}} \ - --network network=macvtap-net \ - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_0}}::{{env_default_gateway}}:{{env_netmask}}:{{env_control_0_name}}::none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" \ - --graphics none \ - --wait=-1 \ - --noautoconsole - when: control_0_check.failed == true - -- name: install CoreOS on control-1 node - tags: control - command: | - virt-install \ - --name {{env_control_1_name}} \ - --disk size={{env_cont_disk_size}} \ - --ram {{env_cont_ram}} \ - --cpu {{env_cont_cpu}} \ - --vcpus {{env_cont_vcpu}} \ - --os-type linux \ - --os-variant rhel{{env_cont_os_variant}} \ - --network network=macvtap-net \ - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_1}}::{{env_default_gateway}}:{{env_netmask}}:{{env_control_1_name}}::none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" \ - --graphics none \ - --wait=-1 \ - --noautoconsole - when: control_1_check.failed == true - -- name: install CoreOS on control-2 node - tags: control +- name: Install CoreOS on control nodes + tags: create_control_nodes command: | virt-install \ - --name {{env_control_2_name}} \ - --disk size={{env_cont_disk_size}} \ - --ram {{env_cont_ram}} \ - --cpu {{env_cont_cpu}} \ - --vcpus {{env_cont_vcpu}} \ + --name {{env.hostname.control[i]}} \ + --disk size={{env.node_resources.control.disk_size}} \ + --ram {{env.node_resources.control.ram}} \ + --cpu host \ + --vcpus {{env.node_resources.control.vcpu}} \ --os-type linux \ - --os-variant rhel{{env_cont_os_variant}} \ + --os-variant rhel{{env.node_resources.control.os_variant}} \ --network network=macvtap-net \ --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env_ip_bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env_ip_control_2}}::{{env_default_gateway}}:{{env_netmask}}:{{env_control_2_name}}::none:1500 nameserver={{env_dns_nameserver}} coreos.inst.ignition_url=http://{{env_ip_bastion}}:8080/ignition/master.ign" \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.ip.bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.ip.control[i]}}::{{env.networking.gateway}}:{{env.networking.netmask}}:{{env.hostname.control[i]}}::none:1500 nameserver={{env.networking.dns.nameserver}} coreos.inst.ignition_url=http://{{env.ip.bastion}}:8080/ignition/master.ign" \ --graphics none \ --wait=-1 \ --noautoconsole - when: control_2_check.failed == true \ No newline at end of file + with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i \ No newline at end of file diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index 2925eea0..99a56fc5 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -1,34 +1,29 @@ --- - name: Load in variables from env.yaml - tags: dns,setup + tags: dns include_vars: env.yaml -- name: enable named - tags: dns,bastion +- name: Enable named + tags: dns ansible.builtin.systemd: name: named enabled: yes -- name: start named - tags: dns,bastion +- name: Start named + tags: dns ansible.builtin.systemd: name: named state: started -- name: split IP addresses for use in templates - tags: dns,bastion +- name: Split IP addresses for use in templates + tags: dns set_fact: - bastion_split_ip: "{{ env_ip_bastion.split('.') }}" - bootstrap_split_ip: "{{ env_ip_bootstrap.split('.') }}" - cont_0_split_ip: "{{ env_ip_control_0.split('.') }}" - cont_1_split_ip: "{{ env_ip_control_1.split('.') }}" - cont_2_split_ip: "{{ env_ip_control_2.split('.') }}" - comp_0_split_ip: "{{ env_ip_compute_0.split('.') }}" - comp_1_split_ip: "{{ env_ip_compute_1.split('.') }}" + bastion_split_ip: "{{ env.ip.bastion.split('.') }}" + bootstrap_split_ip: "{{ env.ip.bootstrap.split('.') }}" - name: Template named.conf file to bastion - tags: dns,bastion + tags: dns template: src: dns-named.conf.j2 dest: /etc/named.conf @@ -37,28 +32,72 @@ mode: '0755' backup: yes -- name: Template DNS .db file to bastion - tags: dns,bastion +- name: Template DNS forwarding file to bastion + tags: dns template: src: dns.db.j2 - dest: /var/named/{{env_metadata_name}}.db + dest: /var/named/{{env.install_config.metadata_name}}.db owner: named group: named mode: '0755' backup: yes -- name: Template DNS .rev file to bastion - tags: dns,bastion +- name: Add control nodes to DNS forwarding file on bastion + tags: dns + lineinfile: + path: /var/named/{{env.install_config.metadata_name}}.db + insertafter: "entries for the control nodes" + line: "{{ env.hostname.control[i] }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. IN A {{ env.ip.control[i] }}" + with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Add compute nodes to DNS forwarding file on bastion + tags: dns + lineinfile: + path: /var/named/{{env.install_config.metadata_name}}.db + insertafter: "entries for the compute nodes" + line: "{{ env.hostname.compute[i] }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. IN A {{ env.ip.compute[i] }}" + with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Template DNS reverse lookup file to bastion + tags: dns template: src: dns.rev.j2 - dest: /var/named/{{env_metadata_name}}.rev + dest: /var/named/{{env.install_config.metadata_name}}.rev owner: named group: named mode: '0755' backup: yes -- name: restart named to update changes made to DNS - tags: dns,bastion +- name: Add control nodes to DNS reverse lookup file on bastion + tags: dns + lineinfile: + path: /var/named/{{env.install_config.metadata_name}}.rev + insertafter: "PTR Record IP address to Hostname" + line: "{{ env.ip.control[i].split('.').3 }} IN PTR {{ env.hostname.control[i] }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}." + with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Add compute nodes to DNS reverse lookup file on bastion + tags: dns + lineinfile: + path: /var/named/{{env.install_config.metadata_name}}.rev + insertafter: "PTR Record IP address to Hostname" + line: "{{ env.ip.compute[i].split('.').3 }} IN PTR {{ env.hostname.compute[i] }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}." + with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Restart named to update changes made to DNS + tags: dns ansible.builtin.systemd: name: named state: restarted \ No newline at end of file diff --git a/roles/dns/templates/dns-named.conf.j2 b/roles/dns/templates/dns-named.conf.j2 index 4baf30e5..89918fb3 100644 --- a/roles/dns/templates/dns-named.conf.j2 +++ b/roles/dns/templates/dns-named.conf.j2 @@ -18,7 +18,7 @@ options { secroots-file "/var/named/data/named.secroots"; recursing-file "/var/named/data/named.recursing"; allow-query { any; }; - forwarders { {{ env_dns_forwarder }}; }; + forwarders { {{ env.networking.dns.forwarder }}; }; /* - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. @@ -60,9 +60,9 @@ include "/etc/named.rfc1912.zones"; include "/etc/named.root.key"; //forward zone -zone "{{ env_baseDomain }}" IN { +zone "{{ env.install_config.base_domain }}" IN { type master; - file "/var/named/{{ env_metadata_name }}.db"; + file "/var/named/{{ env.install_config.metadata_name }}.db"; allow-update { any; }; allow-query { any; }; }; @@ -70,7 +70,7 @@ zone "{{ env_baseDomain }}" IN { //backward zone zone "{{ bastion_split_ip.2 }}.{{ bastion_split_ip.1 }}.{{ bastion_split_ip.0 }}.in-addr.arpa" IN { type master; - file "/var/named/{{ env_metadata_name }}.rev"; + file "/var/named/{{ env.install_config.metadata_name }}.rev"; allow-update { any; }; allow-query { any; }; }; diff --git a/roles/dns/templates/dns.db.j2 b/roles/dns/templates/dns.db.j2 index 4976c66c..0412bc49 100644 --- a/roles/dns/templates/dns.db.j2 +++ b/roles/dns/templates/dns.db.j2 @@ -1,5 +1,5 @@ $TTL 86400 -@ IN SOA {{ env_bastion_name }}.{{ env_baseDomain }}. admin.{{ env_baseDomain }}.( +@ IN SOA {{ env.hostname.bastion }}.{{ env.install_config.base_domain }}. admin.{{ env.install_config.base_domain }}.( 2020021821 ;Serial 3600 ;Refresh 1800 ;Retry @@ -8,28 +8,23 @@ $TTL 86400 ) ;Name Server / Bastion Information -@ IN NS {{ env_bastion_name }}.{{ env_baseDomain }}. +@ IN NS {{ env.hostname.bastion }}.{{ env.install_config.base_domain }}. ;IP Address for Name Server -{{ env_bastion_name }} IN A {{ env_ip_bastion }} +{{ env.hostname.bastion }} IN A {{ env.ip.bastion }} ;entry for bootstrap host. -{{ env_bootstrap_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_bootstrap }} +{{ env.hostname.bootstrap }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. IN A {{ env.ip.bootstrap }} -;entries for the master nodes -{{ env_control_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_control_0 }} -{{ env_control_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_control_1 }} -{{ env_control_2_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_control_2 }} +;entries for the control nodes -;entries for the worker nodes -{{ env_compute_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_compute_0 }} -{{ env_compute_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. IN A {{ env_ip_compute_1 }} +;entries for the compute nodes ;The api identifies the IP of your load balancer. -api.{{ env_metadata_name }} IN CNAME {{ env_bastion_name }}.{{ env_baseDomain }}. -api-int.{{ env_metadata_name }} IN CNAME {{ env_bastion_name }}.{{ env_baseDomain }}. +api.{{ env.install_config.metadata_name }} IN CNAME {{ env.hostname.bastion }}.{{ env.install_config.base_domain }}. +api-int.{{ env.install_config.metadata_name }} IN CNAME {{ env.hostname.bastion }}.{{ env.install_config.base_domain }}. ;The wildcard also identifies the load balancer. -*.apps.{{ env_metadata_name }} IN CNAME {{ env_bastion_name }}.{{ env_baseDomain }}. +*.apps.{{ env.install_config.metadata_name }} IN CNAME {{ env.hostname.bastion }}.{{ env.install_config.base_domain }}. ;EOF \ No newline at end of file diff --git a/roles/dns/templates/dns.rev.j2 b/roles/dns/templates/dns.rev.j2 index 8a70509e..212ab21e 100644 --- a/roles/dns/templates/dns.rev.j2 +++ b/roles/dns/templates/dns.rev.j2 @@ -1,5 +1,5 @@ $TTL 86400 -@ IN SOA {{ env_bastion_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. admin.{{ env_metadata_name }}.{{ env_baseDomain }} ( +@ IN SOA {{ env.hostname.bastion }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. admin.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }} ( 2020011800 ;Serial 3600 ;Refresh 1800 ;Retry @@ -7,18 +7,13 @@ $TTL 86400 86400 ;Minimum TTL ) ;Name Server Information -@ IN NS {{ env_bastion_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. -{{ env_bastion_name }} IN A {{ env_ip_bastion }} +@ IN NS {{ env.hostname.bastion }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. +{{ env.hostname.bastion }} IN A {{ env.ip.bastion }} ;Reverse lookup for Name Server -{{ bastion_split_ip.3 }} IN PTR {{ env_bastion_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. +{{ bastion_split_ip.3 }} IN PTR {{ env.hostname.bastion }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. ;PTR Record IP address to Hostname -{{ cont_0_split_ip.3 }} IN PTR {{ env_control_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. -{{ cont_1_split_ip.3 }} IN PTR {{ env_control_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. -{{ cont_2_split_ip.3 }} IN PTR {{ env_control_2_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. -{{ comp_0_split_ip.3 }} IN PTR {{ env_compute_0_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. -{{ comp_1_split_ip.3 }} IN PTR {{ env_compute_1_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. -{{ bootstrap_split_ip.3 }} IN PTR {{ env_bootstrap_name }}.{{ env_metadata_name }}.{{ env_baseDomain }}. -{{ bastion_split_ip.3 }} IN PTR api-int.{{ env_metadata_name }}.{{ env_baseDomain }}. -{{ bastion_split_ip.3 }} IN PTR api.{{ env_metadata_name }}.{{ env_baseDomain }}. \ No newline at end of file +{{ bootstrap_split_ip.3 }} IN PTR {{ env.hostname.bootstrap }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. +{{ bastion_split_ip.3 }} IN PTR api-int.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. +{{ bastion_split_ip.3 }} IN PTR api.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. \ No newline at end of file diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index 7df039a4..e6c369bb 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -1,31 +1,72 @@ --- - name: Load in variables from env.yaml - tags: haproxy,bastion + tags: haproxy include_vars: env.yaml - name: Change permissive domain for haproxy - tags: selinux,haproxy,bastion + tags: selinux,haproxy selinux_permissive: name: haproxy_t permissive: true -- name: use template to create haproxy config file - tags: haproxy,bastion +- name: Use template to create haproxy config file + tags: haproxy template: src: haproxy.cfg.j2 dest: /etc/haproxy/haproxy.cfg backup: yes force: yes -- name: enable haproxy - tags: haproxy,bastion - systemd: - enabled: yes - name: haproxy +- name: Add control node information to 6443 section in haproxy config + tags: haproxy + lineinfile: + line: " server {{ env.hostname.control[i] }} {{env.ip.control[i]}}:6443 check inter 1s" + path: /etc/haproxy/haproxy.cfg + insertafter: "6443 section" + with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Add control node information to 22623 section in haproxy config + tags: haproxy + lineinfile: + line: " server {{ env.hostname.control[i] }} {{env.ip.control[i]}}:22623 check inter 1s" + path: /etc/haproxy/haproxy.cfg + insertafter: "22623 section" + with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Add compute node information to 443 section in haproxy config + tags: haproxy + lineinfile: + line: " server {{ env.hostname.compute[i] }} {{ env.ip.compute[i] }}:443 check inter 1s" + path: /etc/haproxy/haproxy.cfg + insertafter: "443 section" + with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Add compute node information to 80 section in haproxy config + tags: haproxy + lineinfile: + line: " server {{ env.hostname.compute[i] }} {{ env.ip.compute[i] }}:80 check inter 1s" + path: /etc/haproxy/haproxy.cfg + with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Set haproxy boolean to enable connections + tags: haproxy + command: setsebool -P haproxy_connect_any 1 - name: Restart haproxy - tags: haproxy,bastion + tags: haproxy systemd: state: restarted name: haproxy diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2 index e0a94b52..c90cd32c 100644 --- a/roles/haproxy/templates/haproxy.cfg.j2 +++ b/roles/haproxy/templates/haproxy.cfg.j2 @@ -27,32 +27,26 @@ frontend stats stats hide-version stats refresh 30s stats show-node - stats show-desc Stats for {{env_metadata_name}} cluster - stats auth admin:{{env_metadata_name}} + stats show-desc Stats for {{env.install_config.metadata_name}} cluster + stats auth admin:{{env.install_config.metadata_name}} stats uri /stats listen api-server-6443 bind *:6443 mode tcp - server {{ env_bootstrap_name }} {{env_ip_bootstrap}}:6443 check inter 1s backup - server {{env_control_0_name}} {{env_ip_control_0}}:6443 check inter 1s - server {{env_control_1_name}} {{env_ip_control_1}}:6443 check inter 1s - server {{env_control_2_name}} {{env_ip_control_2}}:6443 check inter 1s + #6443 section + server {{ env.hostname.bootstrap }} {{env.ip.bootstrap}}:6443 check inter 1s backup listen machine-config-server-22623 bind *:22623 mode tcp - server {{ env_bootstrap_name }} {{env_ip_bootstrap}}:22623 check inter 1s backup - server {{env_control_0_name}} {{env_ip_control_0}}:22623 check inter 1s - server {{env_control_1_name}} {{env_ip_control_1}}:22623 check inter 1s - server {{env_control_2_name}} {{env_ip_control_2}}:22623 check inter 1s + #22623 section + server {{ env.hostname.bootstrap }} {{env.ip.bootstrap}}:22623 check inter 1s backup listen ingress-router-443 bind *:443 mode tcp balance source - server {{ env_compute_0_name }} {{env_ip_compute_0}}:443 check inter 1s - server {{ env_compute_1_name }} {{env_ip_compute_1}}:443 check inter 1s + #443 section listen ingress-router-80 bind *:80 mode tcp balance source - server {{ env_compute_0_name }} {{env_ip_compute_0}}:80 check inter 1s - server {{ env_compute_1_name }} {{env_ip_compute_1}}:80 check inter 1s + #80 section \ No newline at end of file diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 0ed10844..20be67f5 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -1,19 +1,19 @@ --- - name: Change permissive domain for httpd - tags: selinux,httpd,bastion + tags: httpd selinux_permissive: name: httpd_t permissive: true - name: enable httpd - tags: httpd,bastion + tags: httpd systemd: name: httpd enabled: yes - name: restart httpd - tags: httpd,bastion + tags: httpd service: name: httpd state: restarted \ No newline at end of file diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index aed54e5b..3e09dbba 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,14 +1,14 @@ --- - name: print the list of packages to be installed and updated - tags: pkg + tags: install_packages debug: var: packages - name: installing required packages. This may take a while, depending on the number of packages to be installed. - tags: pkg + tags: install_packages ansible.builtin.package: name: "{{ item }}" state: latest update_cache: yes - loop: "{{ packages }}" \ No newline at end of file + loop: "{{ packages }}" \ No newline at end of file diff --git a/roles/macvtap/tasks/main.yaml b/roles/macvtap/tasks/main.yaml index 9c8383e7..6df00c66 100644 --- a/roles/macvtap/tasks/main.yaml +++ b/roles/macvtap/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Set up macvtap bridge - tags: kvmhost, macvtap + tags: macvtap community.libvirt.virt_net: command: define name: macvtap-net @@ -9,14 +9,14 @@ xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" - name: Start macvtap-net - tags: kvmhost, macvtap + tags: macvtap community.libvirt.virt_net: autostart: yes command: start name: macvtap-net - name: Set autostart for macvtap-net - tags: kvmhost, macvtap + tags: macvtap community.libvirt.virt_net: autostart: yes name: macvtap-net diff --git a/roles/macvtap/templates/macvtap.xml.j2 b/roles/macvtap/templates/macvtap.xml.j2 index 0a061025..6d0ab6fb 100644 --- a/roles/macvtap/templates/macvtap.xml.j2 +++ b/roles/macvtap/templates/macvtap.xml.j2 @@ -1,6 +1,6 @@ macvtap-net - - + + diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 982b5302..5c977f36 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -1,21 +1,35 @@ --- - name: Load in variables from env.yaml - tags: kvm_host + tags: prep_kvm_guests include_vars: env.yaml -- name: get rhcos kernel - tags: kvm_host +- name: Check to see if kernel already exists on KVM host + tags: prep_kvm_guests + stat: + path: /var/lib/libvirt/images/rhcos-live-kernel-s390x + register: kernel_check + +- name: Get Red Hat CoreOS kernel + tags: prep_kvm_guests get_url: - url: "{{ env_rhcos_kernel }}" + url: "{{ env.coreos.kernel }}" dest: /var/lib/libvirt/images/rhcos-live-kernel-s390x mode: '0755' force: yes + when: kernel_check.stat.exists == false + +- name: Check to see if initramfs already exists on KVM host + tags: prep_kvm_guests + stat: + path: /var/lib/libvirt/images/rhcos-live-initramfs.s390x.img + register: initramfs_check -- name: get rhcos initramfs image - tags: kvm_host +- name: Get Red Hat CoreOS initramfs + tags: prep_kvm_guests get_url: url: "{{ env_rhcos_initramfs }}" dest: /var/lib/libvirt/images/rhcos-live-initramfs.s390x.img mode: '0755' - force: yes \ No newline at end of file + force: yes + when: initramfs_check.stat.exists == false \ No newline at end of file diff --git a/roles/reset_files/tasks/main.yaml b/roles/reset_files/tasks/main.yaml index a0cf35bf..e56825cc 100644 --- a/roles/reset_files/tasks/main.yaml +++ b/roles/reset_files/tasks/main.yaml @@ -2,8 +2,8 @@ - name: Load in variables from env.yaml include_vars: env.yaml -- name: delete files_to_reset from teardown.yaml +- name: Delete files_to_reset from teardown.yaml file: path: "{{ item }}" state: absent - loop: files_to_reset \ No newline at end of file + loop: "{{files_to_reset}}" \ No newline at end of file diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 650bf417..dd34dde6 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -1,7 +1,7 @@ --- - name: Add ports to firewalld - tags: firewall,bastion + tags: set_firewall firewalld: port: "{{ item }}" permanent: yes @@ -16,7 +16,7 @@ - 53/udp - name: Permit traffic in default zone for http and https - tags: firewall,bastion + tags: set_firewall ansible.posix.firewalld: service: "{{ item }}" permanent: yes @@ -26,7 +26,7 @@ - https - name: Ensure the default Apache port is 8080 - tags: httpconf,bastion.firewall + tags: set_firewall lineinfile: path: /etc/httpd/conf/httpd.conf search_string: 'Listen 80' @@ -34,7 +34,7 @@ backup: yes - name: Ensure the SSL default port is 4443 - tags: httpconf,bastion,firewall + tags: set_firewall replace: path: /etc/httpd/conf.d/ssl.conf regexp: '^Listen 443 https' @@ -42,13 +42,13 @@ backup: yes - name: reload firewalld to reflect changes - tags: firewall,bastion + tags: set_firewall systemd: name: firewalld state: reloaded - name: restart httpd - tags: firewall,bastion + tags: set_firewall service: name: httpd state: restarted \ No newline at end of file diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index 372e6392..f71da992 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -4,30 +4,41 @@ tags: setup include_vars: env.yaml -- name: Populate inventory file with ip variables from env.yaml +- name: Populate inventory with KVM host, bastion and bootstrap IP addresses tags: setup blockinfile: path: inventory block: | [kvm_host] - {{ env_ip_kvm_host }} + {{env.ip.kvm}} [bastion] - {{ env_ip_bastion }} + {{env.ip.bastion}} [bootstrap] - {{ env_ip_bootstrap }} + {{env.ip.bootstrap}} [control_nodes] - {{ env_ip_control_0 }} - {{ env_ip_control_1 }} - {{ env_ip_control_2 }} [compute_nodes] - {{env_ip_compute_0}} - {{env_ip_compute_1}} state: present +- name: Add control nodes' IP addresses to inventory + tags: setup + lineinfile: + path: inventory + insertafter: "control_nodes" + line: "{{ item }}" + loop: "{{env.ip.control}}" + +- name: Add compute nodes' IP addresses to inventory + tags: setup + lineinfile: + path: inventory + insertafter: "compute_nodes" + line: "{{ item }}" + loop: "{{env.ip.compute}}" + - name: check inventory setup tags: setup command: ansible-inventory --list @@ -42,19 +53,12 @@ tags: setup meta: refresh_inventory -- name: fill ansible.cfg with provided variable ansible ssh key file name - tags: setup - ansible.builtin.lineinfile: - path: ansible.cfg - insertafter: '\[defaults\]' - line: private_key_file=~/.ssh/{{ env_ssh_ans_name }} - - name: fill ansible.cfg with default ansible password tags: setup ansible.builtin.lineinfile: path: ansible.cfg insertafter: '\[defaults\]' - line: ansible_password={{env_vm_root_passwd}} + line: ansible_password={{env.access.login.kvm.root_password}} - name: fill ansible.cfg with default ansible user tags: setup @@ -67,27 +71,27 @@ tags: setup file: state: absent - path: roles/get-ocp/files/ocp_ssh_pub + path: roles/get_ocp/files/ocp_ssh_pub - name: create ocp_ssh_pub if it needs to be tags: setup file: - path: roles/get-ocp/files/ocp_ssh_pub + path: roles/get_ocp/files/ocp_ssh_pub mode: '0755' state: touch -- name: comment out auto-attach rhel subscription line in main.yaml if requested with env.yaml auto_attach_rhel_sub boolean +- name: comment out auto-attach RHEL subscription role calls in main.yaml if requested tags: setup replace: path: main.yaml regexp: "- attach_subscription" replace: "#- attach_subscription" - when: not auto_attach_rhel_sub + when: not env.redhat.attach_subscription -- name: Comment out DNS setup role in main.yaml if requested with env.yaml env_dns_on_bastion boolean +- name: Comment out DNS setup on bastion role calls in main.yaml if requested tags: setup replace: regexp: "- dns" path: main.yaml replace: "#- dns" - when: not env_dns_on_bastion \ No newline at end of file + when: not env.networking.dns.setup_on_bastion \ No newline at end of file diff --git a/roles/ssh_agent/tasks/main.yaml b/roles/ssh_agent/tasks/main.yaml index f3910dfc..78ed9abd 100644 --- a/roles/ssh_agent/tasks/main.yaml +++ b/roles/ssh_agent/tasks/main.yaml @@ -1,11 +1,11 @@ --- -- name: add ansible ssh key to ssh-agent. See README Step 5 note for additional details. - tags: ssh-agent - shell: eval $(ssh-agent) && ssh-add ~/.ssh/{{env_ssh_ans_name}} +- name: Add ansible SSH key to ssh-agent + tags: ssh_agent, ssh + shell: eval $(ssh-agent) && ssh-add ~/.ssh/ansible register: ssh_agent_setup -- name: print results from setting up ssh agent - tags: ssh-agent +- name: Print results from setting up SSH agent + tags: ssh_agent, ssh debug: - var: ssh_agent_setup + var: ssh_agent_setup \ No newline at end of file diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index 23f0c2d7..862d20ac 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -1,24 +1,24 @@ --- - name: Load in variables - tags: ssh-copy-id + tags: ssh_copy_id, ssh include_vars: "{{item}}" with_items: - env.yaml - roles/ssh_copy_id/vars/path_to_key_pair.yaml -- name: get ansible.pub key for check in next task - tags: ssh-copy-id +- name: Get ansible.pub key for check in next task + tags: ssh_copy_id, ssh set_fact: ans_pub_key: "{{ lookup('file', '~/.ssh/ansible.pub') }}" -- name: print key_check - tags: ssh-copy-id +- name: Print Ansible public key + tags: ssh_copy_id, ssh debug: msg: "{{ ans_pub_key }}" -- name: delete ssh key from known hosts if it already exists for idempotency - tags: ssh-copy-id +- name: Delete SSH key from known hosts if it already exists for idempotency + tags: ssh_copy_id, ssh lineinfile: path: "~/.ssh/known_hosts" line: "{{ ssh_target_ip }}" @@ -26,30 +26,30 @@ delegate_to: localhost - name: Use template file to create expect script - tags: ssh-copy-id + tags: ssh_copy_id, ssh template: src: ssh-copy-id.exp.j2 dest: roles/ssh_copy_id/files/ssh-copy-id-expect-pass.exp force: yes - name: Copy SSH ID to remote host with pre-provided password - tags: ssh-copy-id + tags: ssh_copy_id, ssh command: "expect roles/ssh_copy_id/files/ssh-copy-id-expect-pass.exp" register: ssh_copy -- name: delete templated expect script - tags: ssh-copy-id +- name: Print results of copying ssh id to remote host. + tags: ssh_copy_id, ssh + debug: + var: ssh_copy + +- name: Delete templated expect script + tags: ssh_copy_id, ssh file: path: roles/ssh_copy_id/files/ssh-copy-id-expect-pass.exp state: absent -- name: re-create ssh-copy-id files folder - tags: ssh-copy-id,ssh +- name: Re-create ssh-copy-id files folder + tags: ssh_copy_id, ssh file: path: roles/ssh_copy_id/files/ - state: directory - -- name: Print results of copying ssh id to remote host. - tags: ssh-copy-id - debug: - var: ssh_copy \ No newline at end of file + state: directory \ No newline at end of file diff --git a/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 index d132625e..288c6b1c 100644 --- a/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 +++ b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 @@ -1,7 +1,6 @@ #!/usr/local/bin/expect -f -set force_conservative 0 ;# set to 1 to force conservative mode even if - ;# script wasn't run conservatively originally +set force_conservative 0 if {$force_conservative} { set send_slow {1 .1} proc send {ignore arg} { @@ -13,5 +12,5 @@ if {$force_conservative} { set timeout -1 spawn ssh-copy-id -o StrictHostKeyChecking=no -i {{path_to_key_pair}} root@{{ ssh_target_ip }} expect "*assword: " -send -- "{{env_vm_passwd}}\r" +send -- "{{env.access.login.kvm.root_password}}\r" expect eof \ No newline at end of file diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index 61b4f309..5ad1aacf 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -1,18 +1,18 @@ --- -- name: Check to see if local .ssh directory exists - tags: keymastr +- name: Check to see if local SSH directory exists + tags: ssh_key_gen, ssh stat: path: "~/.ssh" register: ssh_directory_exists_check -- name: Print results of .ssh directory check - tags: keymastr +- name: Print results of SSH directory check + tags: ssh_key_gen, ssh debug: var: ssh_directory_exists_check -- name: Create .ssh local directory if it doesn't already exist - tags: keymastr +- name: Create SSH local directory if it doesn't already exist + tags: ssh_key_gen, ssh file: path: "~/.ssh" state: directory @@ -20,50 +20,47 @@ register: ssh_directory_creation when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false -- name: Print results of ssh directory creation - tags: keymastr +- name: Print results of SSH directory creation + tags: ssh_key_gen, ssh debug: var: ssh_directory_creation -- name: Check .ssh key pair files exist +- name: Check SSH if key pair files exist stat: path: "~/.ssh/{{item}}" register: ssh_key_file_exists_check with_items: - - "{{env_ssh_ans_name}}" - - "{{env_ssh_ans_name}}.pub" + - "ansible" + - "ansible.pub" -- name: Print results of ssh key pair files check - tags: keymastr +- name: Print results of SSH key pair files check + tags: ssh_key_gen, ssh debug: var: ssh_key_file_exists_check.results[0].stat.exists -- name: create a vars file for key path - tags: keymastr +- name: Create a vars file for path to key + tags: ssh_key_gen, ssh file: state: touch path: roles/ssh_copy_id/vars/path_to_key_pair.yaml -- name: Save key path for use in ssh-copy-id role - tags: keymastr +- name: Save path to key pair for use in ssh-copy-id role + tags: ssh_key_gen, ssh lineinfile: line: "path_to_key_pair: {{ssh_key_file_exists_check.results[1].invocation.module_args.path}}" path: roles/ssh_copy_id/vars/path_to_key_pair.yaml -- name: Print results of ssh key pair files check - tags: keymastr - debug: - var: ssh_key_file_exists_check.results[0].stat.exists - -- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key - tags: keymastr +- name: Generate an OpenSSH keypair with the default values (4096 bits, RSA) + tags: ssh_key_gen, ssh community.crypto.openssh_keypair: - path: ~/.ssh/{{ env_ssh_ans_name }} + path: ~/.ssh/ansible passphrase: "" + comment: "{{ env.access.ssh.ansible.comment }}" register: ssh_key_creation when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: Print results of ssh key pair creation - tags: keymastr + tags: ssh_key_gen, ssh debug: - var: ssh_key_creation \ No newline at end of file + var: ssh_key_creation + when: ssh_key_creation.changed == true \ No newline at end of file diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml index 5aac979e..324f9e61 100644 --- a/roles/teardown_vms/tasks/main.yaml +++ b/roles/teardown_vms/tasks/main.yaml @@ -1,15 +1,67 @@ --- -- name: Destroy running VMs. Expect ignored errors if some VMs are already destroyed. +- name: Destroy bastion for full, skip for partial teardown.. Expect ignored errors if it is already destroyed. community.libvirt.virt: - name: "{{ item }}" + name: "{{ env.hostname.bastion }}" command: destroy - loop: "{{ vms }}" + when: bastion_teardown ignore_errors: yes -- name: Undefine remaining existing VMs. Expect ignored errors if some VMs are already undefined. +- name: Undefine bastion for full, skip for partial teardown. Expect ignored errors if it is already undefined. community.libvirt.virt: - name: "{{ item }}" + name: "{{ env.hostname.bastion }}" command: undefine - loop: "{{ vms }}" + when: bastion_teardown + ignore_errors: yes + +- name: Destroy bootstrap. Expect ignored errors if it is already destroyed. + community.libvirt.virt: + name: "{{ env.hostname.bootstrap }}" + command: destroy + ignore_errors: yes + +- name: Undefine bootstrap. Expect ignored errors if it is already undefined. + community.libvirt.virt: + name: "{{ env.hostname.bootstrap }}" + command: undefine + ignore_errors: yes + +- name: Destroy running control nodes. Expect ignored errors if some VMs are already destroyed. + community.libvirt.virt: + name: "{{ env.hostname.control[i] }}" + command: destroy + with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i + ignore_errors: yes + +- name: Undefine remaining control nodes. Expect ignored errors if some VMs are already undefined. + community.libvirt.virt: + name: "{{ env.hostname.control[i] }}" + command: undefine + with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i + ignore_errors: yes + +- name: Destroy running compute nodes. Expect ignored errors if some VMs are already destroyed. + community.libvirt.virt: + name: "{{ env.hostname.compute[i] }}" + command: destroy + with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i + ignore_errors: yes + +- name: Undefine remaining compute nodes. Expect ignored errors if some VMs are already undefined. + community.libvirt.virt: + name: "{{ env.hostname.compute[i] }}" + command: undefine + with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + loop_control: + extended: yes + index_var: i ignore_errors: yes \ No newline at end of file diff --git a/roles/wait_for_bootstrap/tasks/main.yaml b/roles/wait_for_bootstrap/tasks/main.yaml index 0ff5311d..4cf3ccb7 100644 --- a/roles/wait_for_bootstrap/tasks/main.yaml +++ b/roles/wait_for_bootstrap/tasks/main.yaml @@ -10,7 +10,7 @@ retries: 120 delay: 30 -- name: print output of oc whoami, should be system:admin +- name: Print output of oc whoami, should be system:admin tags: wait_for_bootstrap debug: var: oc_whoami.stdout @@ -34,15 +34,15 @@ - name: Destroy bootstrap. Expect ignored errors if bootstrap is already destroyed. tags: wait_for_bootstrap community.libvirt.virt: - name: "{{ env_bootstrap_name }}" + name: "{{ env.hostname.bootstrap }}" command: destroy ignore_errors: yes - delegate_to: "{{ env_ip_kvm_host}}" + delegate_to: "{{ env.ip.kvm }}" - name: Undefine bootstrap. Expect ignored errors if bootstrap is already undefined. tags: wait_for_bootstrap community.libvirt.virt: - name: "{{ env_bootstrap_name }}" + name: "{{ env.hostname.bootstrap }}" command: undefine ignore_errors: yes - delegate_to: "{{ env_ip_kvm_host}}" + delegate_to: "{{ env.ip.kvm }}" diff --git a/roles/wait_for_cluster_operators/tasks/main.yaml b/roles/wait_for_cluster_operators/tasks/main.yaml index d995411b..f0842dcc 100644 --- a/roles/wait_for_cluster_operators/tasks/main.yaml +++ b/roles/wait_for_cluster_operators/tasks/main.yaml @@ -1,23 +1,69 @@ --- -- name: Check cluster operators +- name: First round of checking cluster operators tags: wait_for_cluster_operators environment: KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: oc get co register: oc_get_co -- name: print clusteroperator status +- name: View first round cluster operator status check tags: wait_for_cluster_operators debug: var: oc_get_co.stdout_lines -- name: Make sure all cluster operators are Available before continuing. This may take a while. +- name: First round of waiting for cluster operators. Trying 5 times before printing status again. tags: wait_for_cluster_operators environment: KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: oc get co | awk '{print $3}' register: co_check until: ("False" not in co_check.stdout) - retries: 120 + retries: 5 + delay: 30 + ignore_errors: yes + +- name: Second round of checking cluster operators + tags: wait_for_cluster_operators + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: oc get co + register: oc_get_co + +- name: View second round cluster operator status check + tags: wait_for_cluster_operators + debug: + var: oc_get_co.stdout_lines + +- name: Second round of waiting for cluster operators. Trying 5 times before printing status again. + tags: wait_for_cluster_operators + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: oc get co | awk '{print $3}' + register: co_check + until: ("False" not in co_check.stdout) + retries: 5 + delay: 30 + ignore_errors: yes + +- name: Third round of checking cluster operators + tags: wait_for_cluster_operators + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: oc get co + register: oc_get_co + +- name: View third round clusteroperator status check + tags: wait_for_cluster_operators + debug: + var: oc_get_co.stdout_lines + +- name: Third and final round of waiting for cluster operators. Trying 10 times before failing. + tags: wait_for_cluster_operators + environment: + KUBECONFIG: "/ocpinst/auth/kubeconfig" + shell: oc get co | awk '{print $3}' + register: co_check + until: ("False" not in co_check.stdout) + retries: 10 delay: 30 \ No newline at end of file diff --git a/roles/wait_for_install_complete/tasks/main.yaml b/roles/wait_for_install_complete/tasks/main.yaml index 6ba1292b..f794138c 100644 --- a/roles/wait_for_install_complete/tasks/main.yaml +++ b/roles/wait_for_install_complete/tasks/main.yaml @@ -13,7 +13,7 @@ - name: Set OCP URL tags: wait_for_install_complete set_fact: - ocp_url: https://console-openshift-console.apps.{{env_metadata_name}}.{{env_baseDomain}} + ocp_url: https://console-openshift-console.apps.{{env.install_config.metadata_name}}.{{env.install_config.base_domain}} - name: Set OCP password tags: wait_for_install_complete diff --git a/teardown.yaml b/teardown.yaml index bd65ffc0..4513b37e 100644 --- a/teardown.yaml +++ b/teardown.yaml @@ -1,29 +1,14 @@ --- -# Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. -# If you have more nodes than what is present in the "vms" list below, feel free to add more to the list. -# After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastionvm,bastion,create_nodes'" - -- hosts: localhost - tags: full_teardown - connection: local - become: false - gather_facts: no - vars_files: - - env.yaml - tasks: - - name: remove bastion from localhost's known_hosts file - lineinfile: - path: "~/.ssh/known_hosts" - regexp: "{{ env_ip_bastion}}" - state: absent +# Use the "full_teardown" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. +# After you run this playbook, run the main playbook from the beginning with no tags ("ansible-playbook main.yaml --ask-become-pass") - hosts: kvm_host tags: full_teardown become: true gather_facts: no vars: - - vms: ['{{env_bastion_name}}', '{{env_bootstrap_name}}', '{{env_control_0_name}}', '{{env_control_1_name}}', '{{env_control_2_name}}', '{{env_compute_0_name}}', '{{env_compute_1_name}}'] + - bastion_teardown: yes vars_files: - env.yaml post_tasks: @@ -32,19 +17,45 @@ paths: /var/lib/libvirt/images file_type: file excludes: - - lost+found + - "lost+found" register: found_files - name: delete files in /var/lib/libvirt/images except for lost+found file: path: "{{ item.path }}" state: absent with_items: "{{ found_files['files'] }}" + - name: remove local workstation from KVM hosts' authorized_keys file + file: + path: "~/.ssh/authorized_keys" + state: absent roles: - teardown_vms -# Use the "partial" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. -# If you have more nodes than what is present in the "vms" list below, feel free to add more to the list. -# After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastion,create_nodes'" +- hosts: localhost + tags: full_teardown + connection: local + become: false + gather_facts: no + vars: + - files_to_reset: ['~/.ssh/ansible','~/.ssh/ansible.pub'] # feel free to add as needed + vars_files: + - env.yaml + pre_tasks: + - name: remove bastion from localhost's known_hosts file + lineinfile: + path: "~/.ssh/known_hosts" + regexp: "{{ env.ip.bastion}}" + state: absent + - name: remove KVM host from localhost's known_hosts file + lineinfile: + path: "~/.ssh/known_hosts" + regexp: "{{ env.ip.kvm}}" + state: absent + roles: + - reset_files + +# Use the "partial_teardown" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. +# After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastion,create_nodes,verification'" - hosts: localhost tags: partial_teardown @@ -57,7 +68,7 @@ - name: remove bastion from localhost's known_hosts file lineinfile: path: "~/.ssh/known_hosts" - regexp: "{{ env_ip_bastion}}" + regexp: "{{ env.ip.bastion}}" state: absent - hosts: bastion @@ -75,26 +86,9 @@ tags: partial_teardown become: true gather_facts: no + vars: + - bastion_teardown: no vars_files: - env.yaml - pre_tasks: - - name: Create list of VMs to teardown. - set_fact: - vms: ['{{env_bootstrap_name}}','{{env_control_0_name}}','{{env_control_1_name}}','{{env_control_2_name}}','{{env_compute_0_name}}','{{env_compute_1_name}}'] - roles: - - teardown_vms - -# Use the "boot_teardown" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. - -- hosts: kvm_host - tags: boot_teardown - become: true - gather_facts: no - vars_files: - - env.yaml - pre_tasks: - - name: Create list of VMs to teardown. - set_fact: - vms: ['{{env_bootstrap_name}}'] roles: - - teardown_vms + - teardown_vms \ No newline at end of file From fda2df46d1ed3808a9b8889860f7efba832e6dab Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 9 Dec 2021 03:08:04 -0600 Subject: [PATCH 551/885] Updated to include scaling 1.2.0 update information Signed-off-by: Jacob Emery --- CHANGELOG.md | 38 +++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76f8fd1c..144031c4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,11 +2,28 @@ All notable changes to this project will be documented in this file. -### Jump-To: -* [Latest](#1.1.0) -* [1.0.0](#1.0.0) -* [0.0.1](#0.0.1) -* [Roadmap](#Roadmap) +## 1.2.0 +### Scaling Update - 2021-12-09 +### Summary +- Now supports any number of control and compute nodes to be provisioned in the cluster. +- This update heavily modifies the variable structure in env.yaml in order to make scaling work. + +### Added +- Support for scaling of control and compute nodes. + +### Modified +- Variable structure in env.yaml in order to support scaling. +- Tags to match their corresponding role. +- Every reference to a variable from env.yaml to match the new structure. + +## Roadmap +* Add option to have load balancer on bastion or not +* Add option for OpenShift to use a proxy server +* Add picture of finished infrastructure to README +* Add README’s for each role +* Make ssh-copy-id role idempotent +* Air-gapped (disconnected) install of OpenShift option +* Add an option to automte the creation of an LPAR and install RHEL on KVM host ## 1.1.0 ### Automated OCP Verification Update - 2021-12-03 @@ -50,13 +67,4 @@ All notable changes to this project will be documented in this file. - Encryption of env.yaml as it was unnecessary and increased complexity ## 0.0.1 -### Unreleased 2021-08-24 - -## Roadmap -* Add option in env.yaml to create HAProxy on bastion or not -* Add option for using a proxy server for OpenShift in install-config via env.yaml -* Add functionality to provision more than 3 control and 2 compute nodes -* Make ssh-copy-id role idempotent. -* Add picture of finished infrastructure to README -* Add README’s for each role -* Air-gapped (disconnected) install of OpenShift option \ No newline at end of file +### Unreleased 2021-08-24 \ No newline at end of file From 1d82b9b2557556c4bb344a33518ca8043a6d5bd3 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 9 Dec 2021 03:22:25 -0600 Subject: [PATCH 552/885] Changed headers Signed-off-by: Jacob Emery --- CHANGELOG.md | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 144031c4..10051402 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,8 +2,7 @@ All notable changes to this project will be documented in this file. -## 1.2.0 -### Scaling Update - 2021-12-09 +## 1.2.0 - Scaling Update - 2021-12-09 ### Summary - Now supports any number of control and compute nodes to be provisioned in the cluster. - This update heavily modifies the variable structure in env.yaml in order to make scaling work. @@ -25,8 +24,7 @@ All notable changes to this project will be documented in this file. * Air-gapped (disconnected) install of OpenShift option * Add an option to automte the creation of an LPAR and install RHEL on KVM host -## 1.1.0 -### Automated OCP Verification Update - 2021-12-03 +## 1.1.0 - Automated OCP Verification Update - 2021-12-03 ### Summary - Fully automated all OCP verification steps. Cutting the number of steps nearly in half. The main playbook can now run completely hands-off from kicking it off all the way to an operational cluster. The last step provides the first-time login credentials. @@ -42,8 +40,7 @@ All notable changes to this project will be documented in this file. ### Removed - Instructions in README for doing OCP verification steps manually -## 1.0.0 -### Automated Bastion Update 2021-11-24 +## 1.0.0 - Automated Bastion Update 2021-11-24 ### Summary - Fully automated bastion installation and configuration using cloud-init From 2fa77811e568d17b91aaa4f70b335c9227366ba3 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 9 Dec 2021 03:22:39 -0600 Subject: [PATCH 553/885] Fixed typos Signed-off-by: Jacob Emery --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 30314661..55f4cf06 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ for local workstation running Ansible * 1 TB of disk space mounted to /var/lib/libvirt/images * Red Hat Enterprise Linux (RHEL) 8.4 with networking configured and a root password set * Access to 8 (for a minimum installation) pre-allocated IPv4 addresses -* Note on DNS: The [main playbook](main.yaml) will create a DNS server on the bastion by default. If you plan to use a pre-existing DNS server instead, please make sure to mark the variable env.networking.dns.setup_on_bastion to 'false' in [env.yaml](env.yaml) to skip that step. Either way, the playbook will double-check the DNS configuration before continuing. +* Note on DNS: The [main playbook](main.yaml) will create a DNS server on the bastion by default. If you plan to use a pre-existing DNS server instead, when filling out the variables in [env.yaml](env.yaml) in Step 3, please make sure to mark 'env.networking.dns.setup_on_bastion' to 'false'. Either way, the playbook will double-check the DNS configuration before continuing. ## Installation Instructions: @@ -44,7 +44,7 @@ for local workstation running Ansible * Navigate to a folder where you would like to store this project in your terminal * Run "git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git" * **Step 2: Get Red Hat Info** - * In a web browser, navigate to Red Hat's [customer portal](https://access.redhat.com/products/red-hat-enterprise-linux/), click on the 'Download Latest' button, use the drop-down to select Red Hat Enterprise Linux for IBM z Systems, select your desired version, make sure 'Architcture' is 's390x', and then scroll down to 'Red Hat Enterprise Linux X.X Update KVM Guest Image' and click on 'Download Now'. See where it downloads, copy the path and paste it into [env.yaml](env.yaml) as the variable 'env_rhel_qcow2'. + * In a web browser, navigate to Red Hat's [customer portal](https://access.redhat.com/products/red-hat-enterprise-linux/), click on the 'Download Latest' button, use the drop-down to select Red Hat Enterprise Linux for IBM z Systems, select your desired version, make sure 'Architcture' is 's390x', and then scroll down to 'Red Hat Enterprise Linux X.X Update KVM Guest Image' and click on 'Download Now'. See where it downloads, copy the path and paste it into [env.yaml](env.yaml) as the variable 'env.redhat.path_to_qcow2'. * In a web browser, navigate to the Red Hat [console](https://console.redhat.com/openshift/install/ibmz/user-provisioned) and copy the OpenShift pull secret and paste it into [env.yaml](env.yaml) as the variable 'env_pullSecret'. * **Step 3: Set Variables** * In a text editor of your choice, open [env.yaml](env.yaml) @@ -72,10 +72,10 @@ for local workstation running Ansible ## Troubleshooting: If you encounter errors while running the main playbook, there are a few things you can do: -1) Double check your variables in env.yaml +1) Double check your variables in [env.yaml](env.yaml) 2) Inspect the part that failed by opening roles/role_name/tasks/main.yaml 3) Google the specific error message -3) Re-Run the role indivually with [tags](#Tags) +3) Re-run the role indivually with [tags](#Tags) and the verbosity '-v' option to get more debugging information (more v's give more info). For example: "ansible-playbook main.yaml --ask-become-pass --tags get_ocp -vvv" 4) Teardown troublesome KVM guests with [teardown](#Teardown) scripts and start again with [tags](#Tags). To start from the beginning, run "ansible-playbook teardown.yaml --ask-become-pass --tags full_teardown 6) E-mail Jacob Emery at jacob.emery@ibm.com 7) If it's a problem with an OpenShift verification step, first re-reun the role with [tags](#Tags). If that doesn't work, SSH into the bastion as root ("ssh root@bastion-ip-address-here") and then run,"export KUBECONFIG=/ocpinst/auth/kubeconfig" and then "oc whoami" and make sure it ouputs "system:admin". Then run the shell command from the role you would like to check on manually: i.e. 'oc get nodes', 'oc get co', etc. From e3f2ae220ce8af4f6719bf5c116494041e89ec0c Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 9 Dec 2021 12:17:44 -0600 Subject: [PATCH 554/885] Removed dev/test info Signed-off-by: Jacob Emery --- env.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.yaml b/env.yaml index 36ee5b0b..b89a890b 100644 --- a/env.yaml +++ b/env.yaml @@ -12,7 +12,7 @@ env: install_config: api_version: v1 metadata_name: #X #Will be combined with base_domain to create FQDNs - base_domain: sanfran.pbm.ihost.com #X + base_domain: #X compute: replicas: #X architecture: s390x From 7de1f4655bc3e819e135d075a10ff5f101bb7cfb Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 9 Dec 2021 12:23:35 -0600 Subject: [PATCH 555/885] Added name-of-file.qcow2 to example for path_to_qcow2 variable Signed-off-by: Jacob Emery --- env.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.yaml b/env.yaml index b89a890b..d0be5a93 100644 --- a/env.yaml +++ b/env.yaml @@ -8,7 +8,7 @@ env: attach_subscription: true username: #X password: #X - path_to_qcow2: #Absolute path to RHEL qcow2 file on workstation running Ansible, i.e. /Users/username/Downloads (If unclear, see README step 2) + path_to_qcow2: #Absolute path to RHEL qcow2 file on workstation running Ansible, i.e. /Users/username/Downloads/name-of-file.qcow2 (If unclear, see README step 2) install_config: api_version: v1 metadata_name: #X #Will be combined with base_domain to create FQDNs From dc7ca0a26f2edde0dbd318d53fe14bcbbef3b9c0 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 10 Dec 2021 12:36:03 -0600 Subject: [PATCH 556/885] Added .gitkeep file Signed-off-by: Jacob Emery --- roles/ssh_copy_id/files/.gitkeep | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 roles/ssh_copy_id/files/.gitkeep diff --git a/roles/ssh_copy_id/files/.gitkeep b/roles/ssh_copy_id/files/.gitkeep new file mode 100644 index 00000000..e69de29b From d115afe767ea91c3ed5b6746ec6b552f7eaa80e1 Mon Sep 17 00:00:00 2001 From: Nico Boehr Date: Thu, 2 Dec 2021 14:44:44 +0100 Subject: [PATCH 557/885] ssh-copy-id: make expect script idempotent This makes the expect script idempotent, if the SSH key was already present on the machine. --- roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 index 288c6b1c..8d255022 100644 --- a/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 +++ b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 @@ -11,6 +11,10 @@ if {$force_conservative} { set timeout -1 spawn ssh-copy-id -o StrictHostKeyChecking=no -i {{path_to_key_pair}} root@{{ ssh_target_ip }} -expect "*assword: " -send -- "{{env.access.login.kvm.root_password}}\r" -expect eof \ No newline at end of file +expect { + "*assword: " { + send -- "{{env.access.login.kvm.root_password}}\r" + expect eof + } + "Number of key(s) added:" {} +} From bc0e5a151e59009c87dadf83a7cdcda89c1ab993 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 14 Dec 2021 10:35:48 -0600 Subject: [PATCH 558/885] Fixed template to use bastion's password correctly Signed-off-by: Jacob Emery --- roles/create_bastion/templates/cloud_init.cfg.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/create_bastion/templates/cloud_init.cfg.j2 b/roles/create_bastion/templates/cloud_init.cfg.j2 index 281902f2..fd6fba98 100644 --- a/roles/create_bastion/templates/cloud_init.cfg.j2 +++ b/roles/create_bastion/templates/cloud_init.cfg.j2 @@ -15,7 +15,7 @@ disable_root: false chpasswd: list: | root:{{ env.access.login.bastion.root_password }} - {{ env.access.login.bastion.username }}:{{ env.access.login.bastion.username }} + {{ env.access.login.bastion.username }}:{{ env.access.login.bastion.password }} expire: False #growpart: From 9e432a6f9aec20697cbce6837deb14fccc665d3b Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 14 Dec 2021 10:38:20 -0600 Subject: [PATCH 559/885] Fixed initframfs variable reference to new structure Signed-off-by: Jacob Emery --- roles/prep_kvm_guests/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 5c977f36..57625a7e 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -28,7 +28,7 @@ - name: Get Red Hat CoreOS initramfs tags: prep_kvm_guests get_url: - url: "{{ env_rhcos_initramfs }}" + url: "{{ env.coreos.initramfs }}" dest: /var/lib/libvirt/images/rhcos-live-initramfs.s390x.img mode: '0755' force: yes From f7cd437ef828b9b042c77c3559756ecad14fc1ce Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 23 Dec 2021 00:19:04 -0500 Subject: [PATCH 560/885] Added removal and recreation of OCP SSH key variables to teardown playbook Signed-off-by: Jacob Emery --- teardown.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/teardown.yaml b/teardown.yaml index 4513b37e..40f8ae3a 100644 --- a/teardown.yaml +++ b/teardown.yaml @@ -51,6 +51,20 @@ path: "~/.ssh/known_hosts" regexp: "{{ env.ip.kvm}}" state: absent + - name: Delete SSH key vars files + file: + path: "{{ item }}" + state: absent + loop: + - "{{ roles/get_ocp/files/ocp_ssh_pub.yaml }}" + - "{{ roles/ssh_copy_id/vars/path_to_key_pair.yaml }}" + - name: Recreate empty SSH key vars files + file: + path: "{{ item }}" + state: absent + loop: + - "{{ roles/get_ocp/files/ocp_ssh_pub.yaml }}" + - "{{ roles/ssh_copy_id/vars/path_to_key_pair.yaml }}" roles: - reset_files From 90142b2ff6ab896513df6b354105f93c635bde55 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 23 Dec 2021 00:23:56 -0500 Subject: [PATCH 561/885] Added additional command to pre-requisites for Mac users Signed-off-by: Jacob Emery --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 55f4cf06..cffcccc5 100644 --- a/README.md +++ b/README.md @@ -27,8 +27,8 @@ for local workstation running Ansible * Python3 intalled on your local computer ([how-to](https://realpython.com/installing-python/)) * Ansible installed on your local computer ([how-to](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html)) * If you are using Mac OS X for your localhost workstation to run Ansible, you also need to have: - * homebrew package manager installed ([how-to](https://brew.sh/)) - * Updated software for command line tools (run "softwareupdate --all --install" in your terminal) + * Homebrew package manager installed ([how-to](https://brew.sh/)) + * Updated software for command line tools (run "softwareupdate --all --install" and "xcode-select --install" in your terminal) * Access to a logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: * 6 Integrated Facilities for Linux (IFLs) with SMT2 enabled * 85 GB of RAM From 969df61749e549c5bcf8baa2091a77af436f0810 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 23 Dec 2021 00:28:04 -0500 Subject: [PATCH 562/885] Added example file name to path_to_qcow variable. Signed-off-by: Jacob Emery --- env.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.yaml b/env.yaml index d0be5a93..b89a890b 100644 --- a/env.yaml +++ b/env.yaml @@ -8,7 +8,7 @@ env: attach_subscription: true username: #X password: #X - path_to_qcow2: #Absolute path to RHEL qcow2 file on workstation running Ansible, i.e. /Users/username/Downloads/name-of-file.qcow2 (If unclear, see README step 2) + path_to_qcow2: #Absolute path to RHEL qcow2 file on workstation running Ansible, i.e. /Users/username/Downloads (If unclear, see README step 2) install_config: api_version: v1 metadata_name: #X #Will be combined with base_domain to create FQDNs From 80b1dd5707762faef38593f1bbcc7e673180317f Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 15:11:51 -0800 Subject: [PATCH 563/885] Updated README to be easier to read and use Signed-off-by: Jacob Emery --- README.md | 231 ++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 156 insertions(+), 75 deletions(-) diff --git a/README.md b/README.md index cffcccc5..fe0cf7ca 100644 --- a/README.md +++ b/README.md @@ -5,64 +5,107 @@ * [Supported Operating Systems](#Supported-Operating-Systems) * [Pre-Requisites](#Pre-Requisites) * [Instructions](#Installation-Instructions) -* [Setup](#Setup) -* [Provisioning](#Provisioning) -* [Post-Install Complete](#Post-Install-Complete) + * [Setup](#Setup) + * [Provisioning](#Provisioning) + * [Post-Install Complete](#Post-Install-Complete) * [Troubleshooting](#Troubleshooting) * [Teardown](#Teardown) * [Tags](#Tags) ## Scope -* The goal of this playbook is to automate the setup and deployment of a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing Kernel Virtual Machine (KVM) as the virtualization method. -* This README file gives extremely detailed step-by-step instructions for you to use as a reference. It assumes near-zero experience with Ansible. +* The goal of this playbook is to automate the setup and deployment of a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing Kernel Virtual Machine (KVM) as the hypervisor. +* This README file gives extremely detailed step-by-step instructions for you to use as a reference. It assumes basic understanding of the command-line, but near-zero experience with Ansible itself. ## Supported Operating Systems -for local workstation running Ansible +(for local workstation running Ansible) * Linux (RedHat and Debian) * MacOS X -## Pre-Requisites: +## Pre-Requisites * A Red Hat account ([Sign Up](https://www.redhat.com/wapps/ugc/register.html?_flowId=register-flow&_flowExecutionKey=e1s1)) * A [license](https://access.redhat.com/products/red-hat-openshift-container-platform/) or [free trial](https://www.redhat.com/en/technologies/cloud-computing/openshift/try-it) of Red Hat OpenShift Container Platform for IBM Z systems - s390x architecture (OCP license comes with licenses for RHEL and CoreOS) -* Python3 intalled on your local computer ([how-to](https://realpython.com/installing-python/)) -* Ansible installed on your local computer ([how-to](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html)) -* If you are using Mac OS X for your localhost workstation to run Ansible, you also need to have: - * Homebrew package manager installed ([how-to](https://brew.sh/)) - * Updated software for command line tools (run "softwareupdate --all --install" and "xcode-select --install" in your terminal) * Access to a logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: * 6 Integrated Facilities for Linux (IFLs) with SMT2 enabled * 85 GB of RAM * 1 TB of disk space mounted to /var/lib/libvirt/images - * Red Hat Enterprise Linux (RHEL) 8.4 with networking configured and a root password set + * Red Hat Enterprise Linux (RHEL) 8.4 installed with networking configured and a user with sudo privileges created. * Access to 8 (for a minimum installation) pre-allocated IPv4 addresses -* Note on DNS: The [main playbook](main.yaml) will create a DNS server on the bastion by default. If you plan to use a pre-existing DNS server instead, when filling out the variables in [env.yaml](env.yaml) in Step 3, please make sure to mark 'env.networking.dns.setup_on_bastion' to 'false'. Either way, the playbook will double-check the DNS configuration before continuing. + * Note on DNS: The [main playbook](main.yaml) will create a DNS server on the bastion by default. If you plan to use a existing DNS server instead, when filling out the variables in [env.yaml](env.yaml) in Step 3, please make sure to mark `env.networking.dns.setup_on_bastion` to `false`. Either way, the playbook will double-check the DNS configuration before continuing. +* If you are using MacOS for your workstation running Ansible, you also need to have: + * [Homebrew](https://brew.sh/) package manager installed: + ~~~ + /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" + ~~~ + * Updated software for command line tools: + ~~~ + softwareupdate --all --install + ~~~ + ~~~ + xcode-select --install + ~~~ +* [Python3]((https://realpython.com/installing-python/)) and [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) intalled on your local workstation \ + * Mac: + ~~~ + brew install python3 + ~~~ + ~~~ + brew install ansible + ~~~ + * Linux: + ~~~ + sudo apt install python3 + ~~~ + ~~~ + sudo apt install ansible + ~~~ + * or (depending on your distribution), + ~~~ + sudo yum install python3 + ~~~ + ~~~ + sudo yum install ansible + ~~~ -## Installation Instructions: +## Installation Instructions -### Setup: +### Setup * **Step 1: Get This Repository** - * Navigate to a folder where you would like to store this project in your terminal - * Run "git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git" + * In your terminal, navigate to a folder where you would like to store this project, copy/paste the following and hit Enter: + ~~~ + git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git + ~~~ * **Step 2: Get Red Hat Info** - * In a web browser, navigate to Red Hat's [customer portal](https://access.redhat.com/products/red-hat-enterprise-linux/), click on the 'Download Latest' button, use the drop-down to select Red Hat Enterprise Linux for IBM z Systems, select your desired version, make sure 'Architcture' is 's390x', and then scroll down to 'Red Hat Enterprise Linux X.X Update KVM Guest Image' and click on 'Download Now'. See where it downloads, copy the path and paste it into [env.yaml](env.yaml) as the variable 'env.redhat.path_to_qcow2'. - * In a web browser, navigate to the Red Hat [console](https://console.redhat.com/openshift/install/ibmz/user-provisioned) and copy the OpenShift pull secret and paste it into [env.yaml](env.yaml) as the variable 'env_pullSecret'. + * In a web browser, navigate to Red Hat's [customer portal](https://access.redhat.com/products/red-hat-enterprise-linux/), click on the 'Download Latest' button, use the drop-down to select Red Hat Enterprise Linux for IBM z Systems, select your desired version, make sure 'Architcture' is 's390x', and then scroll down to 'Red Hat Enterprise Linux X.X Update KVM Guest Image' and click on 'Download Now'. + * See where it downloads, copy the path and paste it into [env.yaml](env.yaml) as the variable `env.redhat.path_to_qcow2`. + * In a web browser, navigate to the Red Hat [console](https://console.redhat.com/openshift/install/ibmz/user-provisioned) and copy the OpenShift pull secret and paste it into [env.yaml](env.yaml) as the variable `env.redhat.pull_secret`. * **Step 3: Set Variables** * In a text editor of your choice, open [env.yaml](env.yaml) - * Fill out variables marked with '#X' to match your specific installation. - * Many variables are pre-filled with defaults, change pre-filled variables at your own discretion. + * Fill out variables marked with `#X` to match your specific installation. + * There are two sections of this file, separated by a comment block, which distinguishes variables that need to be filled in and variables that are pre-filled with defaults but can be altered if desired. * This is the most important step in the process. Take the time to make sure everything here is correct. * **Step 4: Setup Script** * Navigate to the folder where you cloned the Git Repository in your terminal. - * Run this shell command: "ansible-playbook setup.yaml --ask-become-pass" + * Run this shell command: + ~~~ + ansible-playbook setup.yaml + ~~~ + * If you'd like to make any last changes to the [variables file](env.yaml), the [inventory](inventory) or the Ansible [configuration file](ansible.cfg), do so now. ### Provisioning * **Step 5: Running the Main Playbook** * Navigate to the folder where you cloned the Git repository in your terminal. - * Start the main playbook by running this shell command: "ansible-playbook main.yaml --ask-become-pass" + * Start the main playbook by running this shell command: + ~~~ + ansible-playbook main.yaml + ~~~ * Watch Ansible as it completes the installation, correcting errors if they arise. * To look at what is running in detail, open roles/'task-you-want-to-inspect'/tasks/main.yaml - * If the process fails in error, go through the steps in the [troubleshooting](#Troubleshooting) section. use [tags](#Tags) to selectively start from a certain point. See the [main playbook](main.yaml) to determine what part you would like to run and use those tags when running the [main playbook](main.yaml). Example: "ansible-playbook main.yaml --ask-become-pass --tags 'get-ocp,create_nodes'" - * Note: we chose to not edit the user's .bash_profile/.bashrc with an automatic ssh-add command because that would change the user's local workstation set-up in a way that was potentially undesirable. Therefore, if you close out your terminal session in the middle of provisioning, you will need to run "ansible-playbook main.yaml --tags ssh-agent" before doing anything else. + * If the process fails in error: + * Go through the steps in the [troubleshooting](#Troubleshooting) section. + * Use [tags](#Tags) to selectively start from a certain point. See the [main playbook](main.yaml) to determine what part you would like to run and use those tags when running the [main playbook](main.yaml), for example: + ~~~ + ansible-playbook main.yaml --tags 'get_ocp,create_nodes' + ~~~ ### Post-Install Complete * **Step 6: First-Time Login** @@ -70,61 +113,99 @@ for local workstation running Ansible * Use a web-browser to type in the URL, which should take you to a sign-in page. Use the provided credentials to sign in. * Congratulations! Your OpenShift cluster installation is now complete. -## Troubleshooting: +## Troubleshooting If you encounter errors while running the main playbook, there are a few things you can do: -1) Double check your variables in [env.yaml](env.yaml) -2) Inspect the part that failed by opening roles/role_name/tasks/main.yaml -3) Google the specific error message -3) Re-run the role indivually with [tags](#Tags) and the verbosity '-v' option to get more debugging information (more v's give more info). For example: "ansible-playbook main.yaml --ask-become-pass --tags get_ocp -vvv" -4) Teardown troublesome KVM guests with [teardown](#Teardown) scripts and start again with [tags](#Tags). To start from the beginning, run "ansible-playbook teardown.yaml --ask-become-pass --tags full_teardown +1) Double check your variables in [env.yaml](env.yaml). +2) Inspect the part that failed by opening `roles/role_name_to_inspect/tasks/main.yaml` +3) Google the specific error message. +3) Re-run the role indivually with [tags](#Tags) and the verbosity '-v' option to get more debugging information (more v's give more info). For example: + ~~~ + ansible-playbook main.yaml --ask-become-pass --tags get_ocp -vvv + ~~~ +4) Teardown troublesome KVM guests with [teardown](#Teardown) scripts and start again with [tags](#Tags). To start from the beginning, run: + ~~~ + ansible-playbook teardown.yaml --ask-become-pass --tags full + ~~~ 6) E-mail Jacob Emery at jacob.emery@ibm.com -7) If it's a problem with an OpenShift verification step, first re-reun the role with [tags](#Tags). If that doesn't work, SSH into the bastion as root ("ssh root@bastion-ip-address-here") and then run,"export KUBECONFIG=/ocpinst/auth/kubeconfig" and then "oc whoami" and make sure it ouputs "system:admin". Then run the shell command from the role you would like to check on manually: i.e. 'oc get nodes', 'oc get co', etc. +7) If it's a problem with an OpenShift verification step, first re-reun the role with [tags](#Tags). If that doesn't work, SSH into the bastion as root ("ssh root@bastion-ip-address-here") and then run,"export KUBECONFIG=~/ocpinst/auth/kubeconfig" and then "oc whoami" and make sure it ouputs "system:admin". Then run the shell command from the role you would like to check on manually: i.e. 'oc get nodes', 'oc get co', etc. ## Teardown: -* If you would like to teardown your VMs, first determine whether you would like to do a full or partial teardown, specified below. -* Full: To teardown all the VMs running on your KVM host, run: "ansible-playbook teardown.yaml --ask-become-pass --tags full_teardown". Start back again from the beginning by running "ansible-playbook main.yaml --ask-become-pass" -* Partial: To teardown all the VMS except for the bastion, run: "ansible-playbook teardown.yaml --ask-become-pass --tags partial_teardown". To start the main.yaml playbook back from that point, run main.yaml with "--tags 'get_ocp,create_nodes,verification'" +* If you would like to teardown your VMs, first determine whether you would like to do a `full`, `partial`, or `app` teardown, specified below. +* `full`: + * To teardown all the OpenShift KVM guest virtual machines (will not teardown KVM host or extra RHEL app VMs) run: + ~~~ + ansible-playbook teardown.yaml --tags full + ~~~ + * Start back again from the beginning by running + ~~~ + ansible-playbook main.yaml + ~~~ +* `partial`: + * To teardown all OpenShift KVM guest virtual machines except the bastion (will also not teardown KVM host or extra RHEL app VMs) run: + ~~~ + ansible-playbook teardown.yaml --tags partial + ~~~ + * To start the main.yaml playbook back from that point, run: + ~~~ + ansible-playbook main.yaml --tags 'get_ocp,create_nodes,verification' + ~~~ +* `app`: + * To teardown only the extra RHEL VMs for non-cluster applications, run: + ~~~ + ansible-playbook teardown.yaml --tags partial + ~~~ + * To re-create those VMs, run: + ~~~ + ansible-playbook main.yaml --tags app + ~~~ ## Tags * To be more selective with what parts of playbooks run, use tags. * This is especially helpful for troubleshooting. -* To determine what you part of a playbook you would like to run, check the list below. Tags match their corresponding roles. There are also some tags like "bastion" that cover multiple roles. To see these tags, see the [main playbook](main.yaml). -* Examples: -* "ansible-playbook main.yaml --ask-become-pass --tags get_ocp" (for one tag), or -* "ansible-playbook main.yaml --ask-become-pass --tags 'bastion,get_ocp'" (for multiple tags) +* To determine what part of a playbook you would like to run, check the list below. Each [role](roles) has a corresponding tag. There are also some tags like "bastion" that cover multiple roles. To see these tags, see the [main playbook](main.yaml). Here's how to use the tags: + * with one tag: + ~~~ + ansible-playbook main.yaml --tags get_ocp + ~~~ + * with multiple tags (enclose tags with single or double quotes, separate with commas): + ~~~ + ansible-playbook main.yaml --tags 'bastion,get_ocp' + ~~~ -List of Tags (in alphabetical order): -* approve_certs = Tasks for approve_certs role -* attach_subscription = Auto-attach Red Hat subscription role -* bastion = Configuration of bastion -* check_nodes = Tasks for check_nodes role -* check_dns = Check DNS resolution -* check_ssh = Check SSH role -* compute = Creation of the compute nodes -* control = Creation of the control nodes -* create_bastion = Creation of bastion KVM guest -* create_bootstrap = Creation of boostrap KVM guest -* create_nodes = Second set of KVM host's plays -* dns = Configuration of DNS server on bastion -* full_teardown = Use with teardown.yaml to bring down all KVM guests -* get_ocp = Prepare bastion for installing OpenShift -* haproxy = Configuration of load balancer on bastion -* httpd = Configuration of Apache server on bastion -* install_packages = Install and update packages -* kvm_host = All KVM host tasks -* kvm_prep = First set of KVM host's tasks -* localhost = Tasks that apply to the local machine running Ansible -* prep_kvm_guest = Get Red Hat CoreOS kernel and initramfs on host -* partial_teardown = Use with teardown.yaml to bring down all VMs except bastion -* set_selinux_permissive = Tasks related to SELinux settings -* set_firewall = Configuration of firewall -* setup = First set of setup tasks on the localhost -* ssh = All SSH tasks -* ssh_agent = Setting up SSH agent -* ssh_copy_id = Copying SSH key to target -* ssh_key_gen = Ansible SSH keypair creation -* ssh_ocp_key_gen = Generate SSH key pair for OpenShift on bastion -* verification = All OpenShift cluster verification tasks -* wait_for_bootstrap = Tasks for to wait_for_bootstrap role -* wait_for_cluster_operators = Tasks for wait_for_cluster_operators -* wait_for_install_complete = Tasks for wait_for_install_complete role \ No newline at end of file +* List of Tags: + * `approve_certs`: Tasks for approve_certs role + * `app_setup`: Tasks related to setting up the extra RHEL VMs + * `attach_subscription`: Auto-attach Red Hat subscription role + * `bastion`: All bastion tasks + * `bastion_setup`: Configuration of the bastion node, not including verification steps. + * `check_nodes`: Tasks for check_nodes role + * `check_dns`: Check DNS resolution + * `check_ssh`: Check SSH role + * `compute`: Creation of the compute nodes + * `control`: Creation of the control nodes + * `create_bastion`: Creation of bastion KVM guest + * `create_bootstrap`: Creation of boostrap KVM guest + * `create_nodes`: Second set of KVM host's plays + * `dns`: Configuration of DNS server on bastion + * `full`: Use with teardown.yaml to bring down all KVM guests + * `get_ocp`: Prepare bastion for installing OpenShift + * `haproxy`: Configuration of load balancer on bastion + * `httpd`: Configuration of Apache server on bastion + * `install_packages`: Install and update packages + * `kvm_host`: All KVM host tasks + * `kvm_prep`: First set of KVM host's tasks + * `workstation`: Tasks that apply to the local machine running Ansible + * `prep_kvm_guest`: Get Red Hat CoreOS kernel and initramfs on host + * `partial`: Use with teardown.yaml to bring down all VMs except bastion + * `set_selinux_permissive`: Tasks related to SELinux settings + * `set_firewall`: Configuration of firewall + * `setup`: First set of setup tasks on the workstation + * `ssh`: All SSH tasks + * `ssh_agent`: Setting up SSH agent + * `ssh_copy_id`: Copying SSH key to target + * `ssh_key_gen`: Ansible SSH keypair creation + * `ssh_ocp_key_gen`: Generate SSH key pair for OpenShift on bastion + * `verification`: All OpenShift cluster verification tasks + * `wait_for_bootstrap`: Tasks for to wait_for_bootstrap role + * `wait_for_cluster_operators`: Tasks for wait_for_cluster_operators + * `wait_for_install_complete`: Tasks for wait_for_install_complete role \ No newline at end of file From 524d33e66a04c107fbdbbe9c95ee9f1fa97319cf Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:26:00 -0800 Subject: [PATCH 564/885] Added extra RHEL VM non-cluster apps setup section to main playbook, moved kubeconfig to playbook level instead of task level, added ssh_target vars, updated tags. Signed-off-by: Jacob Emery --- main.yaml | 45 ++++++++++++++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 15 deletions(-) diff --git a/main.yaml b/main.yaml index 6eb63bdc..6edc868d 100644 --- a/main.yaml +++ b/main.yaml @@ -1,21 +1,21 @@ --- -- hosts: localhost - tags: setup +- hosts: workstation + tags: workstation,kvm_host connection: local become: false gather_facts: no vars_files: - env.yaml vars: - - ssh_target_ip: "{{ env.ip.kvm }}" + - ssh_target: ["{{ env.ip.kvm }}","{{ env.access.login.kvm.user }}","{{ env.access.login.kvm.pass }}"] roles: - ssh_key_gen - ssh_copy_id - ssh_agent - hosts: kvm_host - tags: kvm_host, kvm_prep + tags: kvm_host,kvm_prep become: true vars_files: - env.yaml @@ -23,26 +23,26 @@ - packages: "{{ env.pkgs.kvm}}" roles: - check_ssh - - attach_subscription + - {role: attach_subscription, when: env.redhat.username is defined and env.redhat.password is defined} - install_packages - set_selinux_permissive - macvtap - create_bastion -- hosts: localhost - tags: bastion +- hosts: workstation + tags: workstation,bastion connection: local become: false gather_facts: no vars_files: - env.yaml vars: - - ssh_target_ip: "{{ env.ip.bastion }}" + - ssh_target: ["{{ env.ip.bastion }}","{{ env.access.login.bastion.user }}","{{ env.access.login.bastion.pass }}"] roles: - ssh_copy_id - hosts: bastion - tags: bastion + tags: bastion_setup,bastion become: true vars_files: - env.yaml @@ -50,19 +50,19 @@ - packages: "{{env.pkgs.bastion}}" roles: - check_ssh - - attach_subscription + - {role: attach_subscription, when: env.redhat.username is defined and env.redhat.password is defined} - install_packages - ssh_ocp_key_gen - set_selinux_permissive - set_firewall - - dns + - {role: dns, when: env.networking.dns.setup_on_bastion } - check_dns - haproxy - httpd - - get_ocp + - {role: get_ocp, become: false } - hosts: kvm_host - tags: kvm_host, create_nodes + tags: kvm_host,create_nodes become: true gather_facts: no vars_files: @@ -72,10 +72,25 @@ - create_bootstrap - create_control_nodes - create_compute_nodes + - {role: create_extra_rhel, when: env.ip.app is defined } -- hosts: bastion - tags: verification, bastion +- hosts: app + tags: app_setup,app become: true + vars_files: + - env.yaml + vars: + - packages: "{{ env.pkgs.app}}" + roles: + - {role: attach_subscription, when: env.ip.app is defined and env.redhat.username is defined and env.redhat.password is defined} + - {role: install_packages , when: env.ip.app is defined } + +- hosts: bastion + tags: verification,bastion + become: false + become_user: jacob + environment: + KUBECONFIG: "/home/{{ env.access.login.bastion.user }}/ocpinst/auth/kubeconfig" gather_facts: yes vars_files: - env.yaml From 72936e305fadd1e7e52fa1980f9ae1c5486dfc2d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:28:41 -0800 Subject: [PATCH 565/885] Moved kubeconfig to playbook level, added more rounds of checking certs for approval Signed-off-by: Jacob Emery --- roles/approve_certs/tasks/main.yaml | 167 ++++++++++++++++++++++------ 1 file changed, 132 insertions(+), 35 deletions(-) diff --git a/roles/approve_certs/tasks/main.yaml b/roles/approve_certs/tasks/main.yaml index 32024767..16fdc280 100644 --- a/roles/approve_certs/tasks/main.yaml +++ b/roles/approve_certs/tasks/main.yaml @@ -1,91 +1,188 @@ --- -- name: Approving all pending CSR +- name: Approving all pending certificates tags: approve_certs - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: | - /ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /ocpinst/oc adm certificate approve - register: csr_approved_1 + /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + register: csr_approved ignore_errors: yes -- name: Viewing first csr approved +- name: Viewing approved certificates tags: approve_certs debug: - msg: "{{csr_approved_1.stdout_lines}}" + msg: "{{csr_approved.stdout_lines}}" - name: Pause to let new certificates needing approval to generate tags: approve_certs pause: seconds: 30 -- name: Second round approving all pending CSR +- name: Approving all pending certificates tags: approve_certs - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: | - /ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /ocpinst/oc adm certificate approve - register: csr_approved_2 + /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + register: csr_approved ignore_errors: yes -- name: Viewing second csr approved +- name: Viewing approved certificates tags: approve_certs debug: - msg: "{{csr_approved_2.stdout_lines}}" + msg: "{{csr_approved.stdout_lines}}" - name: Pause to let new certificates needing approval to generate tags: approve_certs pause: seconds: 30 -- name: Third round approving all pending CSR +- name: Approving all pending certificates tags: approve_certs - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: | - /ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /ocpinst/oc adm certificate approve - register: csr_approved_3 + /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + register: csr_approved ignore_errors: yes -- name: Viewing third csr approved +- name: Viewing approved certificates tags: approve_certs debug: - msg: "{{csr_approved_3.stdout_lines}}" + msg: "{{csr_approved.stdout_lines}}" - name: Pause to let new certificates needing approval to generate tags: approve_certs pause: seconds: 30 -- name: Fourth round approving all pending CSR +- name: Approving all pending certificates tags: approve_certs - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: | - /ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /ocpinst/oc adm certificate approve - register: csr_approved_4 + /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + register: csr_approved ignore_errors: yes -- name: Viewing fourth csr approved +- name: Viewing approved certificates tags: approve_certs debug: - msg: "{{csr_approved_4.stdout_lines}}" + msg: "{{csr_approved.stdout_lines}}" - name: Pause to let new certificates needing approval to generate tags: approve_certs pause: seconds: 30 -- name: Fifth round approving all pending CSR +- name: Approving all pending certificates tags: approve_certs - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: | - /ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /ocpinst/oc adm certificate approve - register: csr_approved_5 + /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + register: csr_approved ignore_errors: yes -- name: Viewing fifth csr approved +- name: Viewing approved certificates tags: approve_certs debug: - msg: "{{csr_approved_4.stdout_lines}}" \ No newline at end of file + msg: "{{csr_approved.stdout_lines}}" + +- name: Pause to let new certificates needing approval to generate + tags: approve_certs + pause: + seconds: 30 + +- name: Approving all pending certificates + tags: approve_certs + shell: | + /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + register: csr_approved + ignore_errors: yes + +- name: Viewing approved certificates + tags: approve_certs + debug: + msg: "{{csr_approved.stdout_lines}}" + +- name: Pause to let new certificates needing approval to generate + tags: approve_certs + pause: + seconds: 30 + +- name: Approving all pending certificates + tags: approve_certs + shell: | + /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + register: csr_approved + ignore_errors: yes + +- name: Viewing approved certificates + tags: approve_certs + debug: + msg: "{{csr_approved.stdout_lines}}" + +- name: Pause to let new certificates needing approval to generate + tags: approve_certs + pause: + seconds: 30 + +- name: Approving all pending certificates + tags: approve_certs + shell: | + /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + register: csr_approved + ignore_errors: yes + +- name: Viewing approved certificates + tags: approve_certs + debug: + msg: "{{csr_approved.stdout_lines}}" + +- name: Pause to let new certificates needing approval to generate + tags: approve_certs + pause: + seconds: 30 + +- name: Approving all pending certificates + tags: approve_certs + shell: | + /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + register: csr_approved + ignore_errors: yes + +- name: Viewing approved certificates + tags: approve_certs + debug: + msg: "{{csr_approved.stdout_lines}}" + +- name: Pause to let new certificates needing approval to generate + tags: approve_certs + pause: + seconds: 30 + +- name: Approving all pending certificates + tags: approve_certs + shell: | + /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + register: csr_approved + ignore_errors: yes + +- name: Viewing approved certificates + tags: approve_certs + debug: + msg: "{{csr_approved.stdout_lines}}" + +- name: Pause to let new certificates needing approval to generate + tags: approve_certs + pause: + seconds: 30 + +- name: Approving all pending certificates + tags: approve_certs + shell: | + /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + register: csr_approved + ignore_errors: yes + +- name: Viewing approved certificates + tags: approve_certs + debug: + msg: "{{csr_approved.stdout_lines}}" + +- name: Pause to let new certificates needing approval to generate + tags: approve_certs + pause: + seconds: 30 From 0989fd04022550954b009a3048fca38f395b63ef Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:29:52 -0800 Subject: [PATCH 566/885] Added infra nodes and extra RHEL VM apps to DNS check Signed-off-by: Jacob Emery --- roles/check_dns/tasks/main.yaml | 40 +++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/roles/check_dns/tasks/main.yaml b/roles/check_dns/tasks/main.yaml index 7e6a9245..c6f3fb57 100644 --- a/roles/check_dns/tasks/main.yaml +++ b/roles/check_dns/tasks/main.yaml @@ -8,16 +8,16 @@ tags: check_dns,dns shell: "dig +short {{ item }} | tail -n1" loop: - - "{{ env.hostname.bastion }}.{{ env.install_config.base_domain }}" - - "api.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}" - - "api-int.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}" - - "test.apps.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}" + - "{{ env.hostname.bastion }}.{{ env.networking.base_domain }}" + - "api.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}" + - "api-int.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}" + - "test.apps.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}" register: bastion_lookup failed_when: env.ip.bastion != bastion_lookup.stdout - name: Check internal cluster DNS resolution for bootstrap tags: check_dns,dns - shell: "dig +short {{ env.hostname.bootstrap }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }} | tail -n1" + shell: "dig +short {{ env.hostname.bootstrap }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }} | tail -n1" register: bootstrap_lookup failed_when: env.ip.bootstrap != bootstrap_lookup.stdout @@ -28,24 +28,46 @@ - name: Check control nodes DNS resolution tags: check_dns,dns - shell: "dig +short {{ env.hostname.control[i] }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }} | tail -n1" + shell: "dig +short {{ env.hostname.control[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }} | tail -n1" register: control_lookup failed_when: env.ip.control[i] != control_lookup.stdout - with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 loop_control: extended: yes index_var: i - name: Check compute nodes DNS resolution tags: check_dns,dns - shell: "dig +short {{ env.hostname.compute[i] }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }} | tail -n1" + shell: "dig +short {{ env.hostname.compute[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }} | tail -n1" register: compute_lookup failed_when: env.ip.compute[i] != compute_lookup.stdout - with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 loop_control: extended: yes index_var: i +- name: Check infrastructure nodes DNS resolution + tags: check_dns,dns + shell: "dig +short {{ env.hostname.infra[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }} | tail -n1" + register: infra_lookup + failed_when: env.ip.infra[i] != infra_lookup.stdout + with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + when: env.hostname.infra is defined + +- name: Check extra RHEL VM apps DNS resolution + tags: check_dns,dns + shell: "dig +short {{ env.hostname.app[i] }}.{{ env.networking.base_domain }} | tail -n1" + register: app_lookup + failed_when: env.ip.app[i] != app_lookup.stdout + with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + when: env.hostname.app is defined + - name: Check external DNS resolution from DNS forwarder tags: check_dns,dns register: external_dns_check From 799d4d2656c4a9f0656b284a687d87a1bb2c000a Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:31:13 -0800 Subject: [PATCH 567/885] Moved kubeconfig to playbook level Signed-off-by: Jacob Emery --- roles/check_nodes/tasks/main.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/roles/check_nodes/tasks/main.yaml b/roles/check_nodes/tasks/main.yaml index b8cfbdaf..5199b086 100644 --- a/roles/check_nodes/tasks/main.yaml +++ b/roles/check_nodes/tasks/main.yaml @@ -2,8 +2,6 @@ - name: Check nodes status tags: check_nodes - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: oc get nodes | awk '{print $1, $2}' register: oc_get_nodes @@ -14,8 +12,6 @@ - name: Make sure nodes are 'Ready' before continuing tags: check_nodes - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: oc get nodes | awk '{print $2}' register: nodes_check until: ("NotReady" not in nodes_check.stdout) From d85a2973e1d414204bf30fe47d3f1afec664cd2d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:32:00 -0800 Subject: [PATCH 568/885] Added more rounds of checking cluster operators and moved kubeconfig to playbook level Signed-off-by: Jacob Emery --- .../tasks/main.yaml | 92 ++++++++++++++++--- 1 file changed, 78 insertions(+), 14 deletions(-) diff --git a/roles/wait_for_cluster_operators/tasks/main.yaml b/roles/wait_for_cluster_operators/tasks/main.yaml index f0842dcc..f9d3e1b3 100644 --- a/roles/wait_for_cluster_operators/tasks/main.yaml +++ b/roles/wait_for_cluster_operators/tasks/main.yaml @@ -2,8 +2,6 @@ - name: First round of checking cluster operators tags: wait_for_cluster_operators - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: oc get co register: oc_get_co @@ -14,8 +12,6 @@ - name: First round of waiting for cluster operators. Trying 5 times before printing status again. tags: wait_for_cluster_operators - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: oc get co | awk '{print $3}' register: co_check until: ("False" not in co_check.stdout) @@ -25,8 +21,6 @@ - name: Second round of checking cluster operators tags: wait_for_cluster_operators - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: oc get co register: oc_get_co @@ -37,8 +31,6 @@ - name: Second round of waiting for cluster operators. Trying 5 times before printing status again. tags: wait_for_cluster_operators - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: oc get co | awk '{print $3}' register: co_check until: ("False" not in co_check.stdout) @@ -48,20 +40,92 @@ - name: Third round of checking cluster operators tags: wait_for_cluster_operators - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: oc get co register: oc_get_co -- name: View third round clusteroperator status check +- name: View third round cluster operator status check tags: wait_for_cluster_operators debug: var: oc_get_co.stdout_lines -- name: Third and final round of waiting for cluster operators. Trying 10 times before failing. +- name: Third round of waiting for cluster operators. Trying 5 times before printing status again. + tags: wait_for_cluster_operators + shell: oc get co | awk '{print $3}' + register: co_check + until: ("False" not in co_check.stdout) + retries: 5 + delay: 30 + ignore_errors: yes + +- name: Fourth round of checking cluster operators + tags: wait_for_cluster_operators + shell: oc get co + register: oc_get_co + +- name: View fourth round cluster operator status check + tags: wait_for_cluster_operators + debug: + var: oc_get_co.stdout_lines + +- name: Fourth round of waiting for cluster operators. Trying 5 times before printing status again. + tags: wait_for_cluster_operators + shell: oc get co | awk '{print $3}' + register: co_check + until: ("False" not in co_check.stdout) + retries: 5 + delay: 30 + ignore_errors: yes + +- name: Fifth round of checking cluster operators + tags: wait_for_cluster_operators + shell: oc get co + register: oc_get_co + +- name: View fifth round cluster operator status check + tags: wait_for_cluster_operators + debug: + var: oc_get_co.stdout_lines + +- name: Fifth round of waiting for cluster operators. Trying 5 times before printing status again. + tags: wait_for_cluster_operators + shell: oc get co | awk '{print $3}' + register: co_check + until: ("False" not in co_check.stdout) + retries: 5 + delay: 30 + ignore_errors: yes + +- name: Sixth round of checking cluster operators + tags: wait_for_cluster_operators + shell: oc get co + register: oc_get_co + +- name: View sixth round cluster operator status check + tags: wait_for_cluster_operators + debug: + var: oc_get_co.stdout_lines + +- name: Sixth round of waiting for cluster operators. Trying 5 times before printing status again. + tags: wait_for_cluster_operators + shell: oc get co | awk '{print $3}' + register: co_check + until: ("False" not in co_check.stdout) + retries: 5 + delay: 30 + ignore_errors: yes + +- name: Seventh round of checking cluster operators + tags: wait_for_cluster_operators + shell: oc get co + register: oc_get_co + +- name: View seventh round clusteroperator status check + tags: wait_for_cluster_operators + debug: + var: oc_get_co.stdout_lines + +- name: Seventh and final round of waiting for cluster operators. Trying 10 times before failing. tags: wait_for_cluster_operators - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: oc get co | awk '{print $3}' register: co_check until: ("False" not in co_check.stdout) From 853bb49d0691609cbd4ec825625bfe160f156dd3 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:33:31 -0800 Subject: [PATCH 569/885] Added removal of bootstrap's qcow2 file after undefining, moved kubeconfig to playbook level Signed-off-by: Jacob Emery --- roles/wait_for_bootstrap/tasks/main.yaml | 29 ++++++++++++++---------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/roles/wait_for_bootstrap/tasks/main.yaml b/roles/wait_for_bootstrap/tasks/main.yaml index 4cf3ccb7..ca775189 100644 --- a/roles/wait_for_bootstrap/tasks/main.yaml +++ b/roles/wait_for_bootstrap/tasks/main.yaml @@ -1,33 +1,31 @@ --- -- name: Make sure kubeconfig works +- name: Make sure kubeconfig works properly. tags: wait_for_bootstrap - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" shell: oc whoami register: oc_whoami until: oc_whoami.stdout == "system:admin" retries: 120 delay: 30 -- name: Print output of oc whoami, should be system:admin +- name: Print output of oc whoami, should be "system:admin" if previous task worked. tags: wait_for_bootstrap debug: var: oc_whoami.stdout -- name: Watch bootstrap - tags: wait_for_bootstrap - command: chdir=/ocpinst ./openshift-install wait-for bootstrap-complete +- name: Watch wait-for bootstrap-complete process. + tags: wait_for_bootstrap + shell: openshift-install wait-for bootstrap-complete --dir=/home/{{ env.access.login.bastion.user }}/ocpinst async: 3600 poll: 0 - register: bootstrap_complete_sleeper + register: watch_bootstrap -- name: Retry bootstrap job id check until it's finished. This may take some time. +- name: Retry wait-for bootstrap-complete job ID check until it's finished. This may take some time. tags: wait_for_bootstrap async_status: - jid: "{{ bootstrap_complete_sleeper.ansible_job_id }}" - register: job_result - until: job_result.finished + jid: "{{ watch_bootstrap.ansible_job_id }}" + register: bootstrapping + until: bootstrapping.finished retries: 120 delay: 30 @@ -46,3 +44,10 @@ command: undefine ignore_errors: yes delegate_to: "{{ env.ip.kvm }}" + +- name: Remove qcow2 + tags: wait_for_bootstrap + file: + path: /var/lib/libvirt/images/{{env.hostname.bootstrap}}-bootstrap.qcow2 + state: absent + delegate_to: "{{ env.ip.kvm }}" \ No newline at end of file From c6006ab39dfca1e41197c4e715c89c0cbe1bd0db Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:35:39 -0800 Subject: [PATCH 570/885] Added role to create optional extra RHEL VMs for non-cluster applications Signed-off-by: Jacob Emery --- roles/create_extra_rhel/tasks/main.yaml | 131 ++++++++++++++++++ .../templates/cloud_init.cfg.j2 | 54 ++++++++ .../templates/network_config_static.cfg.j2 | 10 ++ 3 files changed, 195 insertions(+) create mode 100644 roles/create_extra_rhel/tasks/main.yaml create mode 100644 roles/create_extra_rhel/templates/cloud_init.cfg.j2 create mode 100644 roles/create_extra_rhel/templates/network_config_static.cfg.j2 diff --git a/roles/create_extra_rhel/tasks/main.yaml b/roles/create_extra_rhel/tasks/main.yaml new file mode 100644 index 00000000..48c0a493 --- /dev/null +++ b/roles/create_extra_rhel/tasks/main.yaml @@ -0,0 +1,131 @@ +--- + +- name: Load in variables from env.yaml + tags: create_extra_rhel,app + include_vars: env.yaml + +- name: Check to see if qcow2 file already exists on KVM host + tags: create_extra_rhel,app + stat: + path: /var/lib/libvirt/images/bastion_base.qcow2 + ignore_errors: yes + register: qcow2_check + +- name: Copy RHEL qcow2 file to KVM host if it's not there already. This may take a while. + tags: create_extra_rhel,app + copy: + src: "{{ env.redhat.path_to_qcow2 }}" + dest: /var/lib/libvirt/images/bastion_base.qcow2 + mode: '600' + owner: qemu + group: qemu + when: qcow2_check.stat.exists == false + +- name: Create working directory + tags: create_extra_rhel,app + file: + path: /var/lib/libvirt/images/tmp/{{ item }} + state: directory + mode: '0755' + loop: "{{ env.hostname.app }}" + +- name: Create base image + tags: create_extra_rhel,app + command: "qemu-img create -b /var/lib/libvirt/images/bastion_base.qcow2 -f qcow2 /var/lib/libvirt/images/{{item}}.qcow2 {{env.node_resources.app.disk_size}}G" + register: qemu_create + loop: "{{env.hostname.app}}" + +- name: Set apps qcow2 permissions + tags: create_extra_rhel,app + command: chmod 600 /var/lib/libvirt/images/{{env.hostname.app[i]}}.qcow2 + with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Set apps qcow2 ownership to qemu + tags: create_extra_rhel,app + command: chown qemu:qemu /var/lib/libvirt/images/{{env.hostname.app[i]}}.qcow2 + with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Get info about qemu image creation + tags: create_extra_rhel,app + command: "qemu-img info /var/lib/libvirt/images/{{ item }}.qcow2" + register: qemu_info + loop: "{{env.hostname.app}}" + +- name: Create instance-id + tags: create_extra_rhel,app + shell: "echo \"instance-id: $(uuidgen || echo i-abcdefg)\" > /var/lib/libvirt/images/tmp/{{ item }}/meta-data" + register: uuidgen + loop: "{{ env.hostname.app }}" + +- name: Use cloud_init.cfg.j2 template to make user-data file + tags: create_extra_rhel,app + template: + src: roles/create_extra_rhel/templates/cloud_init.cfg.j2 + dest: /var/lib/libvirt/images/tmp/{{ item }}/user-data + loop: "{{ env.hostname.app }}" + +- name: Use network_config_static.cfg.j2 template to make network-config file + tags: create_extra_rhel,app + template: + src: roles/create_extra_rhel/templates/network_config_static.cfg.j2 + dest: /var/lib/libvirt/images/tmp/{{ env.hostname.app[i] }}/network-config + with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Generate iso file + tags: create_extra_rhel,app + command: genisoimage -output /var/lib/libvirt/images/{{ item }}-seed.img -volid cidata -joliet -rock /var/lib/libvirt/images/tmp/{{ item }}/meta-data /var/lib/libvirt/images/tmp/{{ item }}/network-config /var/lib/libvirt/images/tmp/{{ item }}/user-data + register: gen_iso + loop: "{{ env.hostname.app }}" + +- name: Set apps seed images permissions + tags: create_extra_rhel,app + command: chmod 600 /var/lib/libvirt/images/{{ env.hostname.app[i] }}-seed.img + with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Set apps seed images ownership to qemu + tags: create_extra_rhel,app + command: chown qemu:qemu /var/lib/libvirt/images/{{ env.hostname.app[i] }}-seed.img + with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Boot app + tags: create_extra_rhel,app + command: virt-install + --name {{ item }} \ + --virt-type kvm \ + --memory {{ env.node_resources.app.ram }} \ + --vcpus {{ env.node_resources.app.vcpu }} \ + --boot hd \ + --disk path=/var/lib/libvirt/images/{{ item }}-seed.img,device=cdrom \ + --disk path=/var/lib/libvirt/images/{{ item }}.qcow2,device=disk \ + --graphics none \ + --os-type Linux --os-variant rhel{{env.node_resources.app.os_variant}} \ + --network network=macvtap-net \ + --noautoconsole \ + --noreboot + loop: "{{ env.hostname.app }}" + +- name: Restart apps + tags: create_extra_rhel,app + command: virsh start {{ item }} + loop: "{{ env.hostname.app }}" + +- name: Waiting 3 minutes for automated apps installation and configuration to complete. To monitor, use a web browser to go to https://your-kvm-host-ip-address-here:9090 + tags: create_extra_rhel,app + pause: + minutes: 3 + diff --git a/roles/create_extra_rhel/templates/cloud_init.cfg.j2 b/roles/create_extra_rhel/templates/cloud_init.cfg.j2 new file mode 100644 index 00000000..a273ac28 --- /dev/null +++ b/roles/create_extra_rhel/templates/cloud_init.cfg.j2 @@ -0,0 +1,54 @@ +#cloud-config +hostname: {{item}} +fqdn: {{item}}.{{ env.networking.base_domain }} +manage_etc_hosts: true +users: + - name: {{ env.access.login.app.user }} + sudo: ALL=(ALL) NOPASSWD:ALL + groups: adm,sys + home: /home/{{ env.access.login.app.user }} + shell: /bin/bash + lock_passwd: false +# allow both password auth and cert auth via ssh (console access can still login) +ssh_pwauth: true +disable_root: false +chpasswd: + list: | + root:{{ env.access.login.app.sudo_pass }} + {{ env.access.login.app.user }}:{{ env.access.login.app.pass }} + expire: False + +#growpart: +# mode: auto +# devices: ['/'] +#disk_setup: +# /dev/vdb: +# table_type: gpt +# layout: True +# overwrite: False +#fs_setup: +# - label: DATA_XFS +# filesystem: xfs +# device: '/dev/vdb' +# partition: auto +# #cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s +#mounts: +# # [ /dev/vdx, /mountpoint, fstype ] +# - [ LABEL=DATA_XFS, /dataxfs, xfs ] + +# 3rd col=fs type, 4th col=permissions, 5th=backup enabled, 6th=fsck order +mount_default_fields: [ None, None, "ext4", "defaults,noatime","0","2"] + +# every boot +bootcmd: + - [ sh, -c, 'echo ran cloud-init again at $(date) | sudo tee -a /root/bootcmd.log' ] + - [ sh, -c, 'echo $(date) instid=$INSTANCE_ID | sudo tee -a /root/bootcmd.log' ] + +# run once for network static IP fix +runcmd: + - [ sh, -c, 'sed -i s/BOOTPROTO=dhcp/BOOTPROTO=static/ /etc/sysconfig/network-scripts/ifcfg-eth0' ] + - [ sh, -c, 'ifdown eth0 && sleep 1 && ifup eth0 && sleep 1 && ip a' ] + - [ sh, -c, 'echo $(date) instid=$INSTANCE_ID | sudo tee -a /root/runcmd.log' ] + +# written to /var/log/cloud-init.log +final_message: "The system is finally up, after $UPTIME seconds" diff --git a/roles/create_extra_rhel/templates/network_config_static.cfg.j2 b/roles/create_extra_rhel/templates/network_config_static.cfg.j2 new file mode 100644 index 00000000..e7571f68 --- /dev/null +++ b/roles/create_extra_rhel/templates/network_config_static.cfg.j2 @@ -0,0 +1,10 @@ +version: 2 +ethernets: + eth0: + dhcp4: false + # default libvirt network + addresses: [ {{ env.ip.app[i] }} ] + gateway4: {{ env.networking.gateway }} + nameservers: + search: [ {{ env.networking.base_domain }} ] + addresses: [ {{ env.networking.dns.nameserver }},{{ env.networking.dns.forwarder }} ] \ No newline at end of file From 413e62401906800000e1431a3c1811a4030034fd Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:38:28 -0800 Subject: [PATCH 571/885] Reworked how OCP SSH key is transferred to install-config, removed deprecated ocp_ssh_pub.yaml file Signed-off-by: Jacob Emery --- roles/get_ocp/files/ocp_ssh_pub.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100755 roles/get_ocp/files/ocp_ssh_pub.yaml diff --git a/roles/get_ocp/files/ocp_ssh_pub.yaml b/roles/get_ocp/files/ocp_ssh_pub.yaml deleted file mode 100755 index e69de29b..00000000 From 8ee5a5be312932f0b052e1a8871ce444b3adee2b Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:40:00 -0800 Subject: [PATCH 572/885] Set permissions and ownership of bastion config files Signed-off-by: Jacob Emery --- roles/create_bastion/tasks/main.yaml | 65 ++++++++++++++++++---------- 1 file changed, 42 insertions(+), 23 deletions(-) diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index b23046e6..bdaa3c66 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -1,101 +1,119 @@ --- - name: Load in variables from env.yaml - tags: create_bastion + tags: create_bastion,bastion include_vars: env.yaml - name: Enable cockpit console - tags: create_bastion + tags: create_bastion,bastion command: systemctl enable --now cockpit.socket - name: Remove working directory for idempotency - tags: create_bastion + tags: create_bastion,bastion file: path: /var/lib/libvirt/images/tmp state: absent - name: Create working directory - tags: create_bastion + tags: create_bastion,bastion file: path: /var/lib/libvirt/images/tmp state: directory mode: '0755' - name: Check to see if qcow2 file already exists on KVM host - tags: create_bastion + tags: create_bastion,bastion stat: path: /var/lib/libvirt/images/bastion_base.qcow2 register: qcow2_check - name: Copy RHEL qcow2 file to KVM host. This may take a while. - tags: create_bastion + tags: create_bastion,bastion copy: src: "{{ env.redhat.path_to_qcow2 }}" dest: /var/lib/libvirt/images/bastion_base.qcow2 - mode: '0775' + mode: '600' + owner: qemu + group: qemu when: qcow2_check.stat.exists == false register: rhel_qcow2_download - name: Remove snapshot for idempotency - tags: create_bastion + tags: create_bastion,bastion file: path: /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2 state: absent - name: Create base image - tags: create_bastion + tags: create_bastion,bastion command: "qemu-img create -b /var/lib/libvirt/images/bastion_base.qcow2 -f qcow2 /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2 {{env.node_resources.bastion.disk_size}}G" register: qemu_create - name: Print result of creation of base image - tags: create_bastion + tags: create_bastion,bastion debug: var: qemu_create +- name: Set bastion qcow2 permissions + tags: create_bastion,bastion + command: chmod 600 /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2 + +- name: Set bastion qcow2 ownership to qemu + tags: create_bastion,bastion + command: chown qemu:qemu /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2 + - name: Get info about qemu image creation - tags: create_bastion + tags: create_bastion,bastion command: "qemu-img info /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2" register: qemu_info - name: Print output from qemu image creation information - tags: create_bastion + tags: create_bastion,bastion debug: var: qemu_info - name: Create instance-id - tags: create_bastion + tags: create_bastion,bastion shell: "echo \"instance-id: $(uuidgen || echo i-abcdefg)\" > /var/lib/libvirt/images/tmp/meta-data" register: uuidgen - name: Print output from uuidgen command - tags: create_bastion + tags: create_bastion,bastion debug: var: uuidgen - name: Use cloud_init.cfg.j2 template to make user-data file - tags: create_bastion + tags: create_bastion,bastion template: src: cloud_init.cfg.j2 dest: /var/lib/libvirt/images/tmp/user-data - name: Use network_config_static.cfg.j2 template to make network-config file - tags: create_bastion + tags: create_bastion,bastion template: src: network_config_static.cfg.j2 dest: /var/lib/libvirt/images/tmp/network-config - name: Generate iso file - tags: create_bastion + tags: create_bastion,bastion command: genisoimage -output /var/lib/libvirt/images/{{env.hostname.bastion}}-seed.img -volid cidata -joliet -rock /var/lib/libvirt/images/tmp/meta-data /var/lib/libvirt/images/tmp/network-config /var/lib/libvirt/images/tmp/user-data register: gen_iso +- name: Set bastion seed image permissions + tags: create_bastion,bastion + command: chmod 600 /var/lib/libvirt/images/{{env.hostname.bastion}}-seed.img + +- name: Set bastion seed image ownership to qemu + tags: create_bastion,bastion + command: chown qemu:qemu /var/lib/libvirt/images/{{env.hostname.bastion}}-seed.img + - name: Print output from generating iso - tags: create_bastion + tags: create_bastion,bastion debug: var: gen_iso - name: Check if bastion already exists. Expect an ignored error if it doesn't exist. - tags: create_bastion + tags: create_bastion,bastion community.libvirt.virt: name: "{{ env.hostname.bastion }}" command: status @@ -103,7 +121,7 @@ ignore_errors: true - name: Boot bastion - tags: create_bastion + tags: create_bastion,bastion command: virt-install --name {{ env.hostname.bastion }} \ --virt-type kvm \ @@ -120,11 +138,12 @@ when: bastion_check.failed == true - name: Restart bastion - tags: create_bastion + tags: create_bastion,bastion command: virsh start {{ env.hostname.bastion }} + when: bastion_check.failed == true -- name: Waiting 3 minutes for automated bastion installation and configuration to complete. To monitor, use a web browser to go to https://your-kvm-host-ip-address-here:9090, sign in as 'root' and use the password you set for env.access.login.kvm.root_password in env.yaml, then go to the 'Virtual Machines' tab and click on the bastion's hostname. - tags: create_bastion +- name: Waiting 3 minutes for automated bastion installation and configuration to complete. To monitor, use a web browser to go to https://your-kvm-host-ip-address-here:9090, sign in as 'root' and use the password you set for env.access.login.kvm.sudo_pass in env.yaml, then go to the 'Virtual Machines' tab and click on the bastion's hostname. + tags: create_bastion,bastion pause: minutes: 3 when: bastion_check.failed == true \ No newline at end of file From 9f72bdbc575077ce231f15e04efa294cc8648dcf Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:42:34 -0800 Subject: [PATCH 573/885] Updated variable names to match new structure in env.yaml Signed-off-by: Jacob Emery --- roles/create_bastion/templates/cloud_init.cfg.j2 | 10 +++++----- .../templates/network_config_static.cfg.j2 | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/roles/create_bastion/templates/cloud_init.cfg.j2 b/roles/create_bastion/templates/cloud_init.cfg.j2 index fd6fba98..fb9119f0 100644 --- a/roles/create_bastion/templates/cloud_init.cfg.j2 +++ b/roles/create_bastion/templates/cloud_init.cfg.j2 @@ -1,12 +1,12 @@ #cloud-config hostname: {{env.hostname.bastion}} -fqdn: {{env.hostname.bastion}}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }} +fqdn: {{env.hostname.bastion}}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }} manage_etc_hosts: true users: - - name: {{ env.access.login.bastion.username }} + - name: {{ env.access.login.bastion.user }} sudo: ALL=(ALL) NOPASSWD:ALL groups: adm,sys - home: /home/{{ env.access.login.bastion.username }} + home: /home/{{ env.access.login.bastion.user }} shell: /bin/bash lock_passwd: false # allow both password auth and cert auth via ssh (console access can still login) @@ -14,8 +14,8 @@ ssh_pwauth: true disable_root: false chpasswd: list: | - root:{{ env.access.login.bastion.root_password }} - {{ env.access.login.bastion.username }}:{{ env.access.login.bastion.password }} + root:{{ env.access.login.bastion.sudo_pass }} + {{ env.access.login.bastion.user }}:{{ env.access.login.bastion.pass }} expire: False #growpart: diff --git a/roles/create_bastion/templates/network_config_static.cfg.j2 b/roles/create_bastion/templates/network_config_static.cfg.j2 index 5a8ff0a4..0846767a 100644 --- a/roles/create_bastion/templates/network_config_static.cfg.j2 +++ b/roles/create_bastion/templates/network_config_static.cfg.j2 @@ -6,5 +6,5 @@ ethernets: addresses: [ {{ env.ip.bastion }} ] gateway4: {{ env.networking.gateway }} nameservers: - search: [ {{ env.install_config.base_domain }} ] + search: [ {{ env.networking.base_domain }} ] addresses: [ {{ env.networking.dns.nameserver }},{{ env.networking.dns.forwarder }} ] \ No newline at end of file From 3c8f3fe95b41cbd7e7831c85649fbe9375e1cb4b Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:43:43 -0800 Subject: [PATCH 574/885] Set permissions and ownership of bootstrap config files Signed-off-by: Jacob Emery --- roles/create_bootstrap/tasks/main.yaml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index b6959b5f..2962d222 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -15,7 +15,7 @@ - name: Print status of bootstrap tags: create_bootstrap debug: - var: bootstrap_check + var: bootstrap_check.msg - name: Start bootstrap installation tags: create_bootstrap @@ -34,4 +34,12 @@ --graphics none \ --wait=-1 \ --noautoconsole - when: bootstrap_check.failed == true \ No newline at end of file + when: bootstrap_check.failed == true + +- name: Set bootstrap qcow2 permissions + tags: create_bootstrap + command: chmod 600 /var/lib/libvirt/images/{{env.hostname.bootstrap}}-bootstrap.qcow2 + +- name: Set bootstrap qcow2 ownership to qemu + tags: create_bootstrap + command: chown qemu:qemu /var/lib/libvirt/images/{{env.hostname.bootstrap}}-bootstrap.qcow2 \ No newline at end of file From 806bb580818419c45612c4a7036c369f3b48ce4c Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:45:39 -0800 Subject: [PATCH 575/885] Added creation of infrastructure nodes Signed-off-by: Jacob Emery --- roles/create_compute_nodes/tasks/main.yaml | 27 ++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index f0e53004..b87829d3 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -20,7 +20,30 @@ --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.ip.bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.ip.compute[i]}}::{{env.networking.gateway}}:{{env.networking.netmask}}:{{env.hostname.compute[i]}}::none:1500 nameserver={{env.networking.dns.nameserver}} coreos.inst.ignition_url=http://{{env.ip.bastion}}:8080/ignition/worker.ign" \ --wait=-1 \ --noautoconsole - with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 loop_control: extended: yes - index_var: i \ No newline at end of file + index_var: i + +- name: Install CoreOS on infra nodes + tags: create_compute_nodes + command: | + virt-install \ + --name {{env.hostname.infra[i]}} \ + --disk size={{env.node_resources.infra.disk_size}} \ + --ram {{env.node_resources.infra.ram}} \ + --cpu host \ + --vcpus {{env.node_resources.infra.vcpu}} \ + --os-type linux \ + --os-variant rhel{{env.node_resources.infra.os_variant}} \ + --network network=macvtap-net \ + --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.ip.bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.ip.infra[i]}}::{{env.networking.gateway}}:{{env.networking.netmask}}:{{env.hostname.infra[i]}}::none:1500 nameserver={{env.networking.dns.nameserver}} coreos.inst.ignition_url=http://{{env.ip.bastion}}:8080/ignition/worker.ign" \ + --wait=-1 \ + --noautoconsole + with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + when: env.hostname.infra is defined + From 7cdfca62ff785edf6e79b634967acd1a8faeeb72 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:46:57 -0800 Subject: [PATCH 576/885] Used different method for getting loop count Signed-off-by: Jacob Emery --- roles/create_control_nodes/tasks/main.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index e96e5d7d..49fe226a 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -21,7 +21,7 @@ --graphics none \ --wait=-1 \ --noautoconsole - with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 loop_control: extended: yes index_var: i \ No newline at end of file From 7eea7092dd4e2b63d601cd1cf083d3d200d1f86b Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:48:00 -0800 Subject: [PATCH 577/885] Added infrastructure nodes and apps to DNS configuration Signed-off-by: Jacob Emery --- roles/dns/tasks/main.yaml | 76 ++++++++++++++++++++++----- roles/dns/templates/dns-named.conf.j2 | 6 +-- roles/dns/templates/dns.db.j2 | 14 ++--- roles/dns/templates/dns.rev.j2 | 12 ++--- 4 files changed, 79 insertions(+), 29 deletions(-) diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index 99a56fc5..0d22d90c 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -36,7 +36,7 @@ tags: dns template: src: dns.db.j2 - dest: /var/named/{{env.install_config.metadata_name}}.db + dest: /var/named/{{env.networking.metadata_name}}.db owner: named group: named mode: '0755' @@ -45,10 +45,10 @@ - name: Add control nodes to DNS forwarding file on bastion tags: dns lineinfile: - path: /var/named/{{env.install_config.metadata_name}}.db + path: /var/named/{{env.networking.metadata_name}}.db insertafter: "entries for the control nodes" - line: "{{ env.hostname.control[i] }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. IN A {{ env.ip.control[i] }}" - with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + line: "{{ env.hostname.control[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. IN A {{ env.ip.control[i] }}" + with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -56,19 +56,43 @@ - name: Add compute nodes to DNS forwarding file on bastion tags: dns lineinfile: - path: /var/named/{{env.install_config.metadata_name}}.db + path: /var/named/{{env.networking.metadata_name}}.db insertafter: "entries for the compute nodes" - line: "{{ env.hostname.compute[i] }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. IN A {{ env.ip.compute[i] }}" - with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + line: "{{ env.hostname.compute[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. IN A {{ env.ip.compute[i] }}" + with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 loop_control: extended: yes index_var: i +- name: Add infrastructure nodes to DNS forwarding file on bastion if requested + tags: dns + lineinfile: + path: /var/named/{{env.networking.metadata_name}}.db + insertafter: "entries for extra RHEL VMs" + line: "{{ env.hostname.infra[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. IN A {{ env.ip.infra[i] }}" + with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + when: env.hostname.infra is defined + +- name: Add extra RHEL VM apps to DNS forwarding file on bastion if requested + tags: dns + lineinfile: + path: /var/named/{{env.networking.metadata_name}}.db + insertafter: "entries for extra RHEL VMs" + line: "{{ env.hostname.app[i] }}.{{ env.networking.base_domain }}. IN A {{ env.ip.app[i] }}" + with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + when: env.hostname.app is defined + - name: Template DNS reverse lookup file to bastion tags: dns template: src: dns.rev.j2 - dest: /var/named/{{env.install_config.metadata_name}}.rev + dest: /var/named/{{env.networking.metadata_name}}.rev owner: named group: named mode: '0755' @@ -77,10 +101,10 @@ - name: Add control nodes to DNS reverse lookup file on bastion tags: dns lineinfile: - path: /var/named/{{env.install_config.metadata_name}}.rev + path: /var/named/{{env.networking.metadata_name}}.rev insertafter: "PTR Record IP address to Hostname" - line: "{{ env.ip.control[i].split('.').3 }} IN PTR {{ env.hostname.control[i] }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}." - with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + line: "{{ env.ip.control[i].split('.').3 }} IN PTR {{ env.hostname.control[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}." + with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -88,13 +112,37 @@ - name: Add compute nodes to DNS reverse lookup file on bastion tags: dns lineinfile: - path: /var/named/{{env.install_config.metadata_name}}.rev + path: /var/named/{{env.networking.metadata_name}}.rev + insertafter: "PTR Record IP address to Hostname" + line: "{{ env.ip.compute[i].split('.').3 }} IN PTR {{ env.hostname.compute[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}." + with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Add infrastructure nodes to DNS reverse lookup file on bastion + tags: dns + lineinfile: + path: /var/named/{{env.networking.metadata_name}}.rev + insertafter: "PTR Record IP address to Hostname" + line: "{{ env.ip.infra[i].split('.').3 }} IN PTR {{ env.hostname.infra[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}." + with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + when: env.hostname.infra is defined + +- name: Add extra RHEL VM apps to DNS reverse lookup file on bastion if requested + tags: dns + lineinfile: + path: /var/named/{{env.networking.metadata_name}}.rev insertafter: "PTR Record IP address to Hostname" - line: "{{ env.ip.compute[i].split('.').3 }} IN PTR {{ env.hostname.compute[i] }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}." - with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + line: "{{ env.ip.app[i].split('.').3 }} IN PTR {{ env.hostname.app[i] }}.{{ env.networking.base_domain }}." + with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 loop_control: extended: yes index_var: i + when: env.hostname.app is defined - name: Restart named to update changes made to DNS tags: dns diff --git a/roles/dns/templates/dns-named.conf.j2 b/roles/dns/templates/dns-named.conf.j2 index 89918fb3..0f872308 100644 --- a/roles/dns/templates/dns-named.conf.j2 +++ b/roles/dns/templates/dns-named.conf.j2 @@ -60,9 +60,9 @@ include "/etc/named.rfc1912.zones"; include "/etc/named.root.key"; //forward zone -zone "{{ env.install_config.base_domain }}" IN { +zone "{{ env.networking.base_domain }}" IN { type master; - file "/var/named/{{ env.install_config.metadata_name }}.db"; + file "/var/named/{{ env.networking.metadata_name }}.db"; allow-update { any; }; allow-query { any; }; }; @@ -70,7 +70,7 @@ zone "{{ env.install_config.base_domain }}" IN { //backward zone zone "{{ bastion_split_ip.2 }}.{{ bastion_split_ip.1 }}.{{ bastion_split_ip.0 }}.in-addr.arpa" IN { type master; - file "/var/named/{{ env.install_config.metadata_name }}.rev"; + file "/var/named/{{ env.networking.metadata_name }}.rev"; allow-update { any; }; allow-query { any; }; }; diff --git a/roles/dns/templates/dns.db.j2 b/roles/dns/templates/dns.db.j2 index 0412bc49..7ad7b6ea 100644 --- a/roles/dns/templates/dns.db.j2 +++ b/roles/dns/templates/dns.db.j2 @@ -1,5 +1,5 @@ $TTL 86400 -@ IN SOA {{ env.hostname.bastion }}.{{ env.install_config.base_domain }}. admin.{{ env.install_config.base_domain }}.( +@ IN SOA {{ env.hostname.bastion }}.{{ env.networking.base_domain }}. admin.{{ env.networking.base_domain }}.( 2020021821 ;Serial 3600 ;Refresh 1800 ;Retry @@ -8,23 +8,25 @@ $TTL 86400 ) ;Name Server / Bastion Information -@ IN NS {{ env.hostname.bastion }}.{{ env.install_config.base_domain }}. +@ IN NS {{ env.hostname.bastion }}.{{ env.networking.base_domain }}. ;IP Address for Name Server {{ env.hostname.bastion }} IN A {{ env.ip.bastion }} ;entry for bootstrap host. -{{ env.hostname.bootstrap }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. IN A {{ env.ip.bootstrap }} +{{ env.hostname.bootstrap }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. IN A {{ env.ip.bootstrap }} ;entries for the control nodes ;entries for the compute nodes +;entries for extra RHEL VMs (if requested) + ;The api identifies the IP of your load balancer. -api.{{ env.install_config.metadata_name }} IN CNAME {{ env.hostname.bastion }}.{{ env.install_config.base_domain }}. -api-int.{{ env.install_config.metadata_name }} IN CNAME {{ env.hostname.bastion }}.{{ env.install_config.base_domain }}. +api.{{ env.networking.metadata_name }} IN CNAME {{ env.hostname.bastion }}.{{ env.networking.base_domain }}. +api-int.{{ env.networking.metadata_name }} IN CNAME {{ env.hostname.bastion }}.{{ env.networking.base_domain }}. ;The wildcard also identifies the load balancer. -*.apps.{{ env.install_config.metadata_name }} IN CNAME {{ env.hostname.bastion }}.{{ env.install_config.base_domain }}. +*.apps.{{ env.networking.metadata_name }} IN CNAME {{ env.hostname.bastion }}.{{ env.networking.base_domain }}. ;EOF \ No newline at end of file diff --git a/roles/dns/templates/dns.rev.j2 b/roles/dns/templates/dns.rev.j2 index 212ab21e..42943bbb 100644 --- a/roles/dns/templates/dns.rev.j2 +++ b/roles/dns/templates/dns.rev.j2 @@ -1,5 +1,5 @@ $TTL 86400 -@ IN SOA {{ env.hostname.bastion }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. admin.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }} ( +@ IN SOA {{ env.hostname.bastion }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. admin.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }} ( 2020011800 ;Serial 3600 ;Refresh 1800 ;Retry @@ -7,13 +7,13 @@ $TTL 86400 86400 ;Minimum TTL ) ;Name Server Information -@ IN NS {{ env.hostname.bastion }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. +@ IN NS {{ env.hostname.bastion }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. {{ env.hostname.bastion }} IN A {{ env.ip.bastion }} ;Reverse lookup for Name Server -{{ bastion_split_ip.3 }} IN PTR {{ env.hostname.bastion }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. +{{ bastion_split_ip.3 }} IN PTR {{ env.hostname.bastion }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. ;PTR Record IP address to Hostname -{{ bootstrap_split_ip.3 }} IN PTR {{ env.hostname.bootstrap }}.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. -{{ bastion_split_ip.3 }} IN PTR api-int.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. -{{ bastion_split_ip.3 }} IN PTR api.{{ env.install_config.metadata_name }}.{{ env.install_config.base_domain }}. \ No newline at end of file +{{ bootstrap_split_ip.3 }} IN PTR {{ env.hostname.bootstrap }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. +{{ bastion_split_ip.3 }} IN PTR api-int.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. +{{ bastion_split_ip.3 }} IN PTR api.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. \ No newline at end of file From 8be1c51d65a427c60d1f864a5517d75040518b30 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:49:09 -0800 Subject: [PATCH 578/885] Set ownership and permissions of OpenShift config files. Signed-off-by: Jacob Emery --- roles/get_ocp/tasks/main.yaml | 116 ++++++++++++++++++++++++---------- 1 file changed, 84 insertions(+), 32 deletions(-) diff --git a/roles/get_ocp/tasks/main.yaml b/roles/get_ocp/tasks/main.yaml index fbcaac97..df7d1da8 100644 --- a/roles/get_ocp/tasks/main.yaml +++ b/roles/get_ocp/tasks/main.yaml @@ -6,10 +6,13 @@ - name: Create directory bin for mirrors tags: get_ocp + become: true file: path: /var/www/html/bin state: directory mode: '0755' + owner: "{{ env.access.login.bastion.user }}" + group: "{{ env.access.login.bastion.user }}" - name: Check to see if rootfs already exists on bastion tags: get_ocp @@ -17,7 +20,7 @@ path: /var/www/html/bin/rhcos-live-rootfs.s390x.img register: rootfs_check -- name: Get Red Hat CoreOS rootfs file +- name: Get Red Hat CoreOS rootfs file if it's not there already. tags: get_ocp get_url: url: "{{ env.coreos.rootfs }}" @@ -28,94 +31,143 @@ - name: Delete OCP download directory for idempotency, because ignition files deprecate after 24 hours. tags: get_ocp + become: yes file: - path: /ocpinst + path: /home/{{ env.access.login.bastion.user }}/ocpinst state: absent - name: Create OCP download directory tags: get_ocp file: - path: /ocpinst/ + path: /home/{{ env.access.login.bastion.user }}/ocpinst state: directory - name: Unzip OCP client and installer tags: get_ocp ansible.builtin.unarchive: src: "{{ item }}" - dest: /ocpinst/ + dest: /home/{{ env.access.login.bastion.user }}/ocpinst/ remote_src: yes loop: - "{{ env.openshift.client }}" - "{{ env.openshift.installer }}" -- name: Copy kubectl and oc files to bastion +- name: Copy kubectl, oc, and openshift-install binaries to /usr/local/bin tags: get_ocp + become: yes ansible.builtin.copy: - src: /ocpinst/{{item}} + src: /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} dest: /usr/local/bin/{{item}} - remote_src: yes - owner: root - group: root mode: '0755' + remote_src: yes loop: - kubectl - oc - -- name: Copy openshift-install file to bastion - tags: get_ocp - ansible.builtin.copy: - src: /ocpinst/openshift-install - dest: /usr/local/bin/openshift-install - remote_src: yes - owner: root - group: root - mode: '0755' - -- name: Fetch SSH key from bastion for use in template - tags: getocp,bastion - ansible.builtin.fetch: - src: ~/.ssh/id_rsa.pub - dest: roles/get_ocp/files/ocp_ssh_pub.yaml - flat: yes + - openshift-install - name: Use template file to create install-config tags: get_ocp template: src: install-config.yaml.j2 - dest: /ocpinst/install-config.yaml + dest: "{{ item }}" force: yes - backup: yes + loop: + - /home/{{ env.access.login.bastion.user }}/ocpinst/install-config.yaml + - /home/{{ env.access.login.bastion.user }}/ocpinst/install-config-backup.yaml + +- name: Capture OCP public key + tags: get_ocp + command: cat /home/{{ env.access.login.bastion.user }}/.ssh/id_rsa.pub + register: ocp_pub_key + +- name: Place SSH key in install-config + tags: get_ocp + lineinfile: + line: "sshKey: '{{ ocp_pub_key.stdout }}'" + path: "{{ item }}" + loop: + - /home/{{ env.access.login.bastion.user }}/ocpinst/install-config.yaml + - /home/{{ env.access.login.bastion.user }}/ocpinst/install-config-backup.yaml - name: Create manifests tags: get_ocp - command: /ocpinst/openshift-install create manifests --dir=/ocpinst/ + command: /home/{{ env.access.login.bastion.user }}/ocpinst/openshift-install create manifests --dir=/home/{{ env.access.login.bastion.user }}/ocpinst/ become: yes - name: Set masters schedulable parameter to false tags: get_ocp + become: yes replace: - path: /ocpinst/manifests/cluster-scheduler-02-config.yml + path: /home/{{ env.access.login.bastion.user }}/ocpinst/manifests/cluster-scheduler-02-config.yml regexp: ': true' replace: ': false' +- name: Set permissions for ocpinst directory contents to bastion admin user + tags: get_ocp + become: yes + command: chmod 0755 /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} + loop: + - manifests + - openshift + - .openshift_install.log + - .openshift_install_state.json + +- name: Set ownership of ocpinst directory contents to bastion admin user + tags: get_ocp + become: yes + command: chown {{ env.access.login.bastion.user }}:{{ env.access.login.bastion.user }} /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} + loop: + - manifests + - openshift + - .openshift_install.log + - .openshift_install_state.json + - name: Create ignition files tags: get_ocp - command: /ocpinst/openshift-install create ignition-configs --dir=/ocpinst/ become: yes + command: /home/{{ env.access.login.bastion.user }}/ocpinst/openshift-install create ignition-configs --dir=/home/{{ env.access.login.bastion.user }}/ocpinst/ + +- name: Set permissions of ignitions and related files to bastion admin user + tags: get_ocp + become: yes + command: chmod 0755 /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} + loop: + - bootstrap.ign + - master.ign + - worker.ign + - auth + - metadata.json + +- name: Set ownership of ignitions and related files to bastion admin user + tags: get_ocp + become: yes + command: chown {{ env.access.login.bastion.user }}:{{ env.access.login.bastion.user }} /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} + loop: + - bootstrap.ign + - master.ign + - worker.ign + - auth + - auth/kubeconfig + - auth/kubeadmin-password + - metadata.json - name: create ignition directory on webserver tags: get_ocp + become: yes file: path: /var/www/html/ignition state: directory - name: Copy ignition files to web server tags: get_ocp + become: yes copy: - src: /ocpinst/{{ item }}.ign + src: /home/{{ env.access.login.bastion.user }}/ocpinst/{{ item }}.ign dest: /var/www/html/ignition remote_src: yes mode: '775' + group: "{{ env.access.login.bastion.user }}" + owner: "{{ env.access.login.bastion.user }}" loop: - bootstrap - master From f37d0979cc2f36b5250261b2f3794a6d91533075 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:50:24 -0800 Subject: [PATCH 579/885] Changed variable names to match new structure in env.yaml and used new method for pulling SSH key Signed-off-by: Jacob Emery --- roles/get_ocp/templates/install-config.yaml.j2 | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/roles/get_ocp/templates/install-config.yaml.j2 b/roles/get_ocp/templates/install-config.yaml.j2 index 0dfb74bb..06067293 100644 --- a/roles/get_ocp/templates/install-config.yaml.j2 +++ b/roles/get_ocp/templates/install-config.yaml.j2 @@ -1,17 +1,17 @@ apiVersion: {{ env.install_config.api_version }} -baseDomain: {{ env.install_config.base_domain }} +baseDomain: {{ env.networking.base_domain }} compute: - hyperthreading: {{ env.install_config.compute.hyperthreading }} name: worker - replicas: {{ env.install_config.compute.replicas }} + replicas: {{(env.ip.compute | length)}} architecture: {{ env.install_config.compute.architecture }} controlPlane: hyperthreading: {{ env.install_config.control.hyperthreading }} name: master - replicas: {{ env.install_config.control.replicas }} + replicas: {{(env.ip.control | length)}} architecture: {{ env.install_config.control.architecture }} metadata: - name: {{ env.install_config.metadata_name }} + name: {{ env.networking.metadata_name }} networking: clusterNetwork: - cidr: {{ env.install_config.cluster_network.cidr }} @@ -22,5 +22,4 @@ networking: platform: none: {} fips: {{ env.install_config.fips }} -pullSecret: '{{ env.install_config.pull_secret }}' -sshKey: '{{ lookup('file', 'roles/get_ocp/files/ocp_ssh_pub.yaml') }}' \ No newline at end of file +pullSecret: '{{ env.redhat.pull_secret }}' \ No newline at end of file From 77eaaa391af0d20311a9b073ca53aa7b0a3ee810 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:50:50 -0800 Subject: [PATCH 580/885] Added infra nodes and apps to haproxy config Signed-off-by: Jacob Emery --- roles/haproxy/tasks/main.yaml | 35 +++++++++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index e6c369bb..c26b2606 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -24,7 +24,7 @@ line: " server {{ env.hostname.control[i] }} {{env.ip.control[i]}}:6443 check inter 1s" path: /etc/haproxy/haproxy.cfg insertafter: "6443 section" - with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -35,7 +35,7 @@ line: " server {{ env.hostname.control[i] }} {{env.ip.control[i]}}:22623 check inter 1s" path: /etc/haproxy/haproxy.cfg insertafter: "22623 section" - with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -46,7 +46,18 @@ line: " server {{ env.hostname.compute[i] }} {{ env.ip.compute[i] }}:443 check inter 1s" path: /etc/haproxy/haproxy.cfg insertafter: "443 section" - with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Add infrastructure node information to 443 section in haproxy config + tags: haproxy + lineinfile: + line: " server {{ env.hostname.infra[i] }} {{ env.ip.infra[i] }}:443 check inter 1s" + path: /etc/haproxy/haproxy.cfg + insertafter: "443 section" + with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -56,7 +67,17 @@ lineinfile: line: " server {{ env.hostname.compute[i] }} {{ env.ip.compute[i] }}:80 check inter 1s" path: /etc/haproxy/haproxy.cfg - with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Add infrastructure node information to 80 section in haproxy config + tags: haproxy + lineinfile: + line: " server {{ env.hostname.infra[i] }} {{ env.ip.infra[i] }}:80 check inter 1s" + path: /etc/haproxy/haproxy.cfg + with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -65,6 +86,12 @@ tags: haproxy command: setsebool -P haproxy_connect_any 1 +- name: Enable haproxy + tags: dns + ansible.builtin.systemd: + name: haproxy + enabled: yes + - name: Restart haproxy tags: haproxy systemd: From bf963ebd3b6adbb52c0983e464468c001d9a5b33 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:51:08 -0800 Subject: [PATCH 581/885] Added infra nodes and apps to haproxy config Signed-off-by: Jacob Emery --- roles/haproxy/templates/haproxy.cfg.j2 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2 index c90cd32c..df2d8eb9 100644 --- a/roles/haproxy/templates/haproxy.cfg.j2 +++ b/roles/haproxy/templates/haproxy.cfg.j2 @@ -27,8 +27,8 @@ frontend stats stats hide-version stats refresh 30s stats show-node - stats show-desc Stats for {{env.install_config.metadata_name}} cluster - stats auth admin:{{env.install_config.metadata_name}} + stats show-desc Stats for {{env.networking.metadata_name}} cluster + stats auth admin:{{env.networking.metadata_name}} stats uri /stats listen api-server-6443 bind *:6443 From 032d138ae724eab3904226e91a6a8fd12918c7b3 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:53:03 -0800 Subject: [PATCH 582/885] Switched from outdated dnf to yum for installing dependencies and updated task names to explain better. Signed-off-by: Jacob Emery --- roles/install_dependencies/tasks/main.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml index 7a3c844d..f3cc77b7 100644 --- a/roles/install_dependencies/tasks/main.yaml +++ b/roles/install_dependencies/tasks/main.yaml @@ -1,9 +1,9 @@ --- -- name: Gather facts to get OS family to see which setup script to run +- name: Gather facts to get OS family to see which task to run ansible.builtin.gather_facts: -- name: install Ansible dependencies and packages +- name: Check for latest versions then install dependencies and packages for Mac workstations, skip if not Mac. shell: "{{ item }}" loop: - ansible-galaxy collection install community.general @@ -14,14 +14,14 @@ - brew install expect when: ansible_facts['os_family'] == "Darwin" -- name: install Ansible dependencies and packages +- name: Check for latest versions then install dependencies and packages for Debian workstations, skip if not Debian. shell: "{{ item }}" loop: - ansible-galaxy collection install community.general - ansible-galaxy collection install community.crypto - ansible-galaxy collection install ansible.posix - ansible-galaxy collection install community.libvirt - - sudo dnf install openssh -y - - sudo dnf install expect -y + - sudo yum install openssh -y + - sudo yum install expect -y when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" \ No newline at end of file From 64803a55d46a8e446f8d889d8ae4ff3a0b038b3a Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:53:34 -0800 Subject: [PATCH 583/885] Added port 53/tcp to firewall Signed-off-by: Jacob Emery --- roles/set_firewall/tasks/main.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index dd34dde6..1f4af6ef 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -13,6 +13,7 @@ - 4443/tcp - 6443/tcp - 22623/tcp + - 53/tcp - 53/udp - name: Permit traffic in default zone for http and https From 9ec6c6ac545718d226b52bc7b2e6a49235ddb657 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:54:42 -0800 Subject: [PATCH 584/885] Added infra nodes and apps to set_inventory role Signed-off-by: Jacob Emery --- roles/set_inventory/tasks/main.yaml | 82 +++++++++++++---------------- 1 file changed, 37 insertions(+), 45 deletions(-) diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index f71da992..76df342c 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -8,12 +8,15 @@ tags: setup blockinfile: path: inventory + marker: "#{mark} ansible managed block from set_inventory role" + marker_begin: "start of" + marker_end: "end of" block: | [kvm_host] - {{env.ip.kvm}} + {{env.ip.kvm}} ansible_connection=ssh ansible_ssh_user={{env.access.login.kvm.user}} ansible_ssh_pass={{env.access.login.kvm.pass}} ansible_become_password={{env.access.login.kvm.sudo_pass}} [bastion] - {{env.ip.bastion}} + {{env.ip.bastion}} ansible_connection=ssh ansible_ssh_user={{env.access.login.bastion.user}} ansible_ssh_pass={{env.access.login.bastion.pass}} ansible_become_password={{env.access.login.bastion.sudo_pass}} [bootstrap] {{env.ip.bootstrap}} @@ -39,59 +42,48 @@ line: "{{ item }}" loop: "{{env.ip.compute}}" -- name: check inventory setup - tags: setup - command: ansible-inventory --list - register: inv_check - failed_when: inv_check.rc != 0 - -- name: Gather facts to re-read inventory after changes made to inventory +- name: Add infrastructure nodes group to inventory if set tags: setup - ansible.builtin.gather_facts: - -- name: Refresh inventory - tags: setup - meta: refresh_inventory + lineinfile: + path: inventory + line: "[infra]" + when: env.ip.infra is defined -- name: fill ansible.cfg with default ansible password +- name: Add infrastructure nodes' IP addresses to inventory tags: setup - ansible.builtin.lineinfile: - path: ansible.cfg - insertafter: '\[defaults\]' - line: ansible_password={{env.access.login.kvm.root_password}} + lineinfile: + path: inventory + insertafter: "infra" + line: "{{ item }}" + loop: "{{env.ip.infra}}" + when: env.ip.infra is defined -- name: fill ansible.cfg with default ansible user +- name: Add extra RHEL VM apps group to inventory if set tags: setup - ansible.builtin.lineinfile: - path: ansible.cfg - insertafter: '\[defaults\]' - line: remote_user=root + lineinfile: + path: inventory + line: "[app]" + when: env.ip.app is defined -- name: delete ocp_ssh_pub file if it exists already to ensure idempotence +- name: Add extra RHEL VM apps' IP addresses to inventory tags: setup - file: - state: absent - path: roles/get_ocp/files/ocp_ssh_pub + lineinfile: + path: inventory + insertafter: "app" + line: "{{ item }} ansible_connection=ssh ansible_ssh_user={{env.access.login.app.user}} ansible_ssh_pass={{env.access.login.app.pass}} ansible_become_password={{env.access.login.app.sudo_pass}}" + loop: "{{env.ip.app}}" + when: env.ip.app is defined -- name: create ocp_ssh_pub if it needs to be +- name: check inventory setup tags: setup - file: - path: roles/get_ocp/files/ocp_ssh_pub - mode: '0755' - state: touch + command: ansible-inventory --list + register: inv_check + failed_when: inv_check.rc != 0 -- name: comment out auto-attach RHEL subscription role calls in main.yaml if requested +- name: Gather facts to re-read inventory after changes made to inventory tags: setup - replace: - path: main.yaml - regexp: "- attach_subscription" - replace: "#- attach_subscription" - when: not env.redhat.attach_subscription + ansible.builtin.gather_facts: -- name: Comment out DNS setup on bastion role calls in main.yaml if requested +- name: Refresh inventory tags: setup - replace: - regexp: "- dns" - path: main.yaml - replace: "#- dns" - when: not env.networking.dns.setup_on_bastion \ No newline at end of file + meta: refresh_inventory \ No newline at end of file From fd4ce1af2fd0e2931d443536c8ae796504394872 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:57:19 -0800 Subject: [PATCH 585/885] Changed Ansible connection method to be primarily SSH with password authentication because Ansible needed the servers' passwords either way since this is first-time set-up. Still copy's SSH keys because that is useful for configuration later Signed-off-by: Jacob Emery --- roles/ssh_copy_id/tasks/main.yaml | 4 +- .../ssh_copy_id/templates/ssh-copy-id.exp.j2 | 4 +- roles/ssh_copy_id/vars/path_to_key_pair.yaml | 3 +- roles/ssh_key_gen/tasks/main.yaml | 51 +++++------------ roles/ssh_ocp_key_gen/tasks/main.yaml | 56 ++++++++----------- 5 files changed, 44 insertions(+), 74 deletions(-) diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index 862d20ac..c978a313 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -21,9 +21,9 @@ tags: ssh_copy_id, ssh lineinfile: path: "~/.ssh/known_hosts" - line: "{{ ssh_target_ip }}" + line: "{{ ssh_target[0] }}" state: absent - delegate_to: localhost + delegate_to: workstation - name: Use template file to create expect script tags: ssh_copy_id, ssh diff --git a/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 index 288c6b1c..d26319d7 100644 --- a/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 +++ b/roles/ssh_copy_id/templates/ssh-copy-id.exp.j2 @@ -10,7 +10,7 @@ if {$force_conservative} { } set timeout -1 -spawn ssh-copy-id -o StrictHostKeyChecking=no -i {{path_to_key_pair}} root@{{ ssh_target_ip }} +spawn ssh-copy-id -o StrictHostKeyChecking=no -i {{path_to_key_pair}} {{ ssh_target[1] }}@{{ ssh_target[0] }} expect "*assword: " -send -- "{{env.access.login.kvm.root_password}}\r" +send -- "{{ ssh_target[2] }}\r" expect eof \ No newline at end of file diff --git a/roles/ssh_copy_id/vars/path_to_key_pair.yaml b/roles/ssh_copy_id/vars/path_to_key_pair.yaml index 5eae78d0..f78991d6 100644 --- a/roles/ssh_copy_id/vars/path_to_key_pair.yaml +++ b/roles/ssh_copy_id/vars/path_to_key_pair.yaml @@ -1 +1,2 @@ -# Will be filled in by ssh-key-gen role \ No newline at end of file +# Will be filled in by ssh-key-gen role +path_to_key_pair: /Users/jacob/.ssh/ansible.pub diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index 5ad1aacf..d321a745 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -1,4 +1,7 @@ --- +- name: Load in variables from env.yaml + tags: ssh_key_gen, ssh + include_vars: env.yaml - name: Check to see if local SSH directory exists tags: ssh_key_gen, ssh @@ -6,11 +9,6 @@ path: "~/.ssh" register: ssh_directory_exists_check -- name: Print results of SSH directory check - tags: ssh_key_gen, ssh - debug: - var: ssh_directory_exists_check - - name: Create SSH local directory if it doesn't already exist tags: ssh_key_gen, ssh file: @@ -18,25 +16,21 @@ state: directory mode: '700' register: ssh_directory_creation - when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false + when: ssh_directory_exists_check.stat.exists == false -- name: Print results of SSH directory creation +- name: Generate an OpenSSH keypair with the default values (4096 bits, RSA) tags: ssh_key_gen, ssh - debug: - var: ssh_directory_creation - -- name: Check SSH if key pair files exist - stat: - path: "~/.ssh/{{item}}" - register: ssh_key_file_exists_check - with_items: - - "ansible" - - "ansible.pub" + community.crypto.openssh_keypair: + path: ~/.ssh/ansible + passphrase: "" + comment: "{{ env.access.ssh.ansible_key_comment }}" + regenerate: always + register: ssh_key_creation -- name: Print results of SSH key pair files check +- name: Print results of ssh key pair creation tags: ssh_key_gen, ssh debug: - var: ssh_key_file_exists_check.results[0].stat.exists + var: ssh_key_creation - name: Create a vars file for path to key tags: ssh_key_gen, ssh @@ -47,20 +41,5 @@ - name: Save path to key pair for use in ssh-copy-id role tags: ssh_key_gen, ssh lineinfile: - line: "path_to_key_pair: {{ssh_key_file_exists_check.results[1].invocation.module_args.path}}" - path: roles/ssh_copy_id/vars/path_to_key_pair.yaml - -- name: Generate an OpenSSH keypair with the default values (4096 bits, RSA) - tags: ssh_key_gen, ssh - community.crypto.openssh_keypair: - path: ~/.ssh/ansible - passphrase: "" - comment: "{{ env.access.ssh.ansible.comment }}" - register: ssh_key_creation - when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - -- name: Print results of ssh key pair creation - tags: ssh_key_gen, ssh - debug: - var: ssh_key_creation - when: ssh_key_creation.changed == true \ No newline at end of file + line: "path_to_key_pair: {{ssh_key_creation.filename}}.pub" + path: roles/ssh_copy_id/vars/path_to_key_pair.yaml \ No newline at end of file diff --git a/roles/ssh_ocp_key_gen/tasks/main.yaml b/roles/ssh_ocp_key_gen/tasks/main.yaml index 33485989..d2ed9265 100644 --- a/roles/ssh_ocp_key_gen/tasks/main.yaml +++ b/roles/ssh_ocp_key_gen/tasks/main.yaml @@ -7,56 +7,46 @@ - name: Check to see if local SSH directory exists tags: ssh_ocp_key_gen, ssh stat: - path: ~/.ssh + path: /home/{{ env.access.login.bastion.user }}/.ssh register: ssh_directory_exists_check -- name: Print results of SSH directory check - tags: ssh_ocp_key_gen, ssh - debug: - var: ssh_directory_exists_check - - name: Create SSH local directory if it doesn't already exist tags: ssh_ocp_key_gen, ssh file: - path: ~/.ssh + path: /home/{{ env.access.login.bastion.user }}/.ssh state: directory mode: "0700" register: ssh_directory_creation - when: ssh_directory_exists_check is defined and ssh_directory_exists_check.stat.exists == false - -- name: Print results of SSH directory creation - tags: ssh_ocp_key_gen, ssh - debug: - var: ssh_directory_creation - -- name: Check SSH key pair files exist - tags: ssh_ocp_key_gen, ssh - stat: - path: ~/.ssh/{{item}} - register: ssh_key_file_exists_check - with_items: - - "id_rsa" - - "id_rsa.pub" - -- name: Print results of SSH key pair files check - tags: ssh_ocp_key_gen, ssh - debug: - var: ssh_key_file_exists_check.results[1].stat.exists + when: ssh_directory_exists_check.stat.exists == false - name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key if it doesn't exist already tags: ssh_ocp_key_gen, ssh community.crypto.openssh_keypair: - path: ~/.ssh/id_rsa + path: /home/{{ env.access.login.bastion.user }}/.ssh/id_rsa backend: opensshbin - owner: root + owner: "{{ env.access.login.bastion.user }}" passphrase: "" - comment: "{{ env.access.ssh.ocp.comment }}" - regenerate: full_idempotence + comment: "{{ env.access.ssh.ocp_key_comment }}" + regenerate: always register: ssh_ocp - when: ssh_key_file_exists_check is defined and ssh_key_file_exists_check.results[0].stat.exists == false and ssh_key_file_exists_check.results[1].stat.exists == false - name: Print results of SSH key generation tags: ssh_ocp_key_gen, ssh debug: var: ssh_ocp.public_key - when: ssh_ocp.changed == true \ No newline at end of file + when: ssh_ocp.changed == true + +- name: Set SSH key permissions + tags: ssh_ocp_key_gen, ssh + command: chmod 600 /home/{{ env.access.login.bastion.user }}/.ssh/{{item}} + loop: + - id_rsa + - id_rsa.pub + +- name: Set SSH key ownership + tags: ssh_ocp_key_gen, ssh + command: chown {{ env.access.login.bastion.user }}:{{ env.access.login.bastion.user }} /home/{{ env.access.login.bastion.user }}/.ssh/{{item}} + loop: + - id_rsa + - id_rsa.pub + From 4cd622e50afb21606f5918eb9c79c907f54d5570 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:58:25 -0800 Subject: [PATCH 586/885] Changed variable names to match new structure in env.yaml and moved kubeconfig to playbook level Signed-off-by: Jacob Emery --- roles/wait_for_install_complete/tasks/main.yaml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/roles/wait_for_install_complete/tasks/main.yaml b/roles/wait_for_install_complete/tasks/main.yaml index f794138c..6da2e81d 100644 --- a/roles/wait_for_install_complete/tasks/main.yaml +++ b/roles/wait_for_install_complete/tasks/main.yaml @@ -2,9 +2,7 @@ - name: Wait for OpenShift install to complete tags: wait_for_install_complete - environment: - KUBECONFIG: "/ocpinst/auth/kubeconfig" - shell: /ocpinst/openshift-install --dir=/ocpinst wait-for install-complete + shell: /home/{{ env.access.login.bastion.user }}/ocpinst/openshift-install --dir=/home/{{ env.access.login.bastion.user }}/ocpinst wait-for install-complete register: wait_install_complete until: ("Install complete!" in wait_install_complete.stderr) retries: 120 @@ -13,11 +11,11 @@ - name: Set OCP URL tags: wait_for_install_complete set_fact: - ocp_url: https://console-openshift-console.apps.{{env.install_config.metadata_name}}.{{env.install_config.base_domain}} + ocp_url: https://console-openshift-console.apps.{{env.networking.metadata_name}}.{{env.networking.base_domain}} - name: Set OCP password tags: wait_for_install_complete - command: "cat /ocpinst/auth/kubeadmin-password" + command: "cat /home/{{ env.access.login.bastion.user }}/ocpinst/auth/kubeadmin-password" register: ocp_passwd - name: Congratulations! OpenShift installation complete. Use the information below for first-time login. From 4e3b59e5289dc867dd7586cbc0bbde285aac3c89 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 16:59:22 -0800 Subject: [PATCH 587/885] Changed name of local workstation from 'localhost' to 'workstation' because 'localhost' is a universal term Signed-off-by: Jacob Emery --- setup.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.yaml b/setup.yaml index a540e74d..2133f0b4 100644 --- a/setup.yaml +++ b/setup.yaml @@ -1,7 +1,7 @@ --- -- hosts: localhost - tags: localhost, prep +- hosts: workstation + tags: workstation, prep connection: local become: false gather_facts: yes From 89ba0bc16a721799ab3c50856b530063be547c05 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 17:00:51 -0800 Subject: [PATCH 588/885] Added infra nodes and updated variables to match new structure in env.yaml. Signed-off-by: Jacob Emery --- roles/teardown_vms/tasks/main.yaml | 34 ++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml index 324f9e61..682c85d0 100644 --- a/roles/teardown_vms/tasks/main.yaml +++ b/roles/teardown_vms/tasks/main.yaml @@ -1,6 +1,6 @@ --- -- name: Destroy bastion for full, skip for partial teardown.. Expect ignored errors if it is already destroyed. +- name: Destroy bastion for full, skip for partial teardown. Expect ignored errors if it is already destroyed. community.libvirt.virt: name: "{{ env.hostname.bastion }}" command: destroy @@ -30,7 +30,7 @@ community.libvirt.virt: name: "{{ env.hostname.control[i] }}" command: destroy - with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -40,7 +40,7 @@ community.libvirt.virt: name: "{{ env.hostname.control[i] }}" command: undefine - with_sequence: start=0 end={{env.install_config.control.replicas - 1}} stride=1 + with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -50,7 +50,7 @@ community.libvirt.virt: name: "{{ env.hostname.compute[i] }}" command: destroy - with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -60,8 +60,30 @@ community.libvirt.virt: name: "{{ env.hostname.compute[i] }}" command: undefine - with_sequence: start=0 end={{env.install_config.compute.replicas - 1}} stride=1 + with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 loop_control: extended: yes index_var: i - ignore_errors: yes \ No newline at end of file + ignore_errors: yes + +- name: Destroy running infrastructure nodes, if defined. Expect ignored errors if some VMs are already destroyed. + community.libvirt.virt: + name: "{{ env.hostname.infra[i] }}" + command: destroy + with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + ignore_errors: yes + when: env.hostname.infra is defined + +- name: Undefine remaining infrastructure nodes. Expect ignored errors if some VMs are already undefined. + community.libvirt.virt: + name: "{{ env.hostname.infra[i] }}" + command: undefine + with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + ignore_errors: yes + when: env.hostname.infra is defined \ No newline at end of file From 494b537247e2ee2fd771902cfb75108c2a177608 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 17:02:17 -0800 Subject: [PATCH 589/885] Removed 'teardown' from tag names, added app teardown option Signed-off-by: Jacob Emery --- teardown.yaml | 130 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 90 insertions(+), 40 deletions(-) diff --git a/teardown.yaml b/teardown.yaml index 40f8ae3a..4ac53414 100644 --- a/teardown.yaml +++ b/teardown.yaml @@ -1,10 +1,10 @@ --- -# Use the "full_teardown" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. +# Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. Extra RHEL apps will not be torn down. # After you run this playbook, run the main playbook from the beginning with no tags ("ansible-playbook main.yaml --ask-become-pass") - hosts: kvm_host - tags: full_teardown + tags: full become: true gather_facts: no vars: @@ -31,8 +31,8 @@ roles: - teardown_vms -- hosts: localhost - tags: full_teardown +- hosts: workstation + tags: full connection: local become: false gather_facts: no @@ -41,63 +41,35 @@ vars_files: - env.yaml pre_tasks: - - name: remove bastion from localhost's known_hosts file + - name: remove bastion from workstation's known_hosts file lineinfile: path: "~/.ssh/known_hosts" regexp: "{{ env.ip.bastion}}" state: absent - - name: remove KVM host from localhost's known_hosts file + - name: remove KVM host from workstation's known_hosts file lineinfile: path: "~/.ssh/known_hosts" regexp: "{{ env.ip.kvm}}" state: absent - - name: Delete SSH key vars files - file: - path: "{{ item }}" - state: absent - loop: - - "{{ roles/get_ocp/files/ocp_ssh_pub.yaml }}" - - "{{ roles/ssh_copy_id/vars/path_to_key_pair.yaml }}" - - name: Recreate empty SSH key vars files - file: - path: "{{ item }}" - state: absent - loop: - - "{{ roles/get_ocp/files/ocp_ssh_pub.yaml }}" - - "{{ roles/ssh_copy_id/vars/path_to_key_pair.yaml }}" roles: - reset_files -# Use the "partial_teardown" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. -# After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'bastion,create_nodes,verification'" - -- hosts: localhost - tags: partial_teardown - connection: local - become: false - gather_facts: no - vars_files: - - env.yaml - tasks: - - name: remove bastion from localhost's known_hosts file - lineinfile: - path: "~/.ssh/known_hosts" - regexp: "{{ env.ip.bastion}}" - state: absent +# Use the "partial" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. If extra RHEL VM apps were created, they will not be torn down. +# After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'get_ocp,create_nodes,verification'" - hosts: bastion - tags: partial_teardown + tags: partial become: true gather_facts: no vars: - - files_to_reset: ['~/.ssh/known_hosts', '~/.ssh/id_rsa','~/.ssh/id_rsa.pub','/ocpinst'] # feel free to add as needed + - files_to_reset: ['/home/{{ env.access.login.bastion.user }}/ocpinst'] # feel free to add as needed vars_files: - env.yaml roles: - reset_files - hosts: kvm_host - tags: partial_teardown + tags: partial become: true gather_facts: no vars: @@ -105,4 +77,82 @@ vars_files: - env.yaml roles: - - teardown_vms \ No newline at end of file + - teardown_vms + +# Use the "app" tag with teardown.yaml to teardown all extra RHEL VM apps running on the KVM host. +# To recreate them, run the main playbook with "--tags app" + +- hosts: workstation + tags: app + connection: local + become: false + gather_facts: no + vars_files: + - env.yaml + tasks: + - name: remove apps from workstation's known_hosts file for idempotency if created + lineinfile: + path: "~/.ssh/known_hosts" + regexp: "{{ env.ip.app[i] }}" + state: absent + with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + when: env.ip.app is defined + +- hosts: kvm_host + tags: app + become: true + gather_facts: no + vars_files: + - env.yaml + tasks: + - name: Destroy running app nodes. Expect ignored errors if some VMs are already destroyed. + community.libvirt.virt: + name: "{{ item }}" + command: destroy + loop: "{{ env.hostname.app }}" + ignore_errors: yes + + - name: Undefine remaining app nodes. Expect ignored errors if some VMs are already undefined. + community.libvirt.virt: + name: "{{ item }}" + command: undefine + loop: "{{ env.hostname.app }}" + ignore_errors: yes + + - name: Remove apps qcow2 files for idempotency + tags: create_extra_rhel + file: + path: /var/lib/libvirt/images/{{ item }}.qcow2 + state: absent + loop: "{{env.hostname.app}}" + + - name: Remove seeds for idempotency + tags: create_extra_rhel + file: + path: /var/lib/libvirt/images/{{ item }}-seed.img + state: absent + loop: "{{env.hostname.app}}" + + - name: Remove app meta data for idempotency + tags: create_extra_rhel + file: + path: /var/lib/libvirt/images/tmp/{{ item }}/meta-data + state: absent + loop: "{{ env.hostname.app }}" + + - name: Remove app user data for idempotency + tags: create_extra_rhel + file: + path: /var/lib/libvirt/images/tmp/{{ item }}/user-data + state: absent + loop: "{{ env.hostname.app }}" + + - name: Remove app network config data for idempotency + tags: create_extra_rhel + file: + path: /var/lib/libvirt/images/tmp/{{ item }}/network-config + state: absent + loop: "{{ env.hostname.app }}" \ No newline at end of file From ef621cf534d6a6d8ad1d571fc8f161730388083e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 17:18:21 -0800 Subject: [PATCH 590/885] Changed variable structure to have a line separating variables that needed to be filled in by the user and variables that are pre-filled. Also added functionality for exta RHEL VMs for non-cluster applications and infrastructure nodes. Signed-off-by: Jacob Emery --- env.yaml | 187 +++++++++++++++++++++++++++++++++---------------------- 1 file changed, 113 insertions(+), 74 deletions(-) diff --git a/env.yaml b/env.yaml index b89a890b..90991fb8 100644 --- a/env.yaml +++ b/env.yaml @@ -1,113 +1,135 @@ -# The home for all your variables. The single source of truth for your specific installation. -# Variables with a #X need to be filled in. -# This is the most important step in the process, pelase take your time to make sure these are set correctly. -# A note on YAML: only the lowest level variables in a hierarchy need to be filled out. (i.e. below, don't put anything after "redhat", but do fill in "username") +# This is the home for all your variables. The single source of truth for your specific installation. +# Variables with a X need to be filled in. There's a point below which marks that nothing after it needs to be modified for a default installation. +# This is the most important step in the process, please take your time to make sure these are set correctly. +# A note on YAML syntax: only the lowest level variables in each hierarchy need to be filled out. +# For example, below, don't put anything after "env" or "redhat", but do delete the '#X' and fill in "username" env: redhat: - attach_subscription: true - username: #X - password: #X - path_to_qcow2: #Absolute path to RHEL qcow2 file on workstation running Ansible, i.e. /Users/username/Downloads (If unclear, see README step 2) - install_config: - api_version: v1 - metadata_name: #X #Will be combined with base_domain to create FQDNs - base_domain: #X - compute: - replicas: #X - architecture: s390x - hyperthreading: Enabled - control: - replicas: #X - architecture: s390x - hyperthreading: Enabled - cluster_network: - cidr: 10.128.0.0/14 - host_prefix: 23 - type: OpenShiftSDN - service_network: 172.30.0.0/16 - fips: "false" # "true" or "false" (include quotes) - pull_secret: '#X' #paste it into these single quotes - #OpenShift SSH key is generated via Ansible in ssh_ocp_key_gen role + username: X #Providing your Red Hat login credentials here will auto attach your RHEL subscription. + password: X #If you do not provide it, you will have to do so manually before packages can be installed. + path_to_qcow2: X #Absolute path to RHEL qcow2 file on workstation running Ansible, e.g. /Users/username/Downloads/rhel-8.5-s390x-kvm.qcow2 (If unclear, see README step 2) + pull_secret: 'X' #paste OpenShift pull secret into these single quotes. If unclear, see README step 2. + # IP addresses for the nodes that Ansible will be run against. +# Feel free to add as many nodes as needed. # This will automatically fill out the inventory file when setup.yaml is run. ip: - kvm: #X - bastion: #X - bootstrap: #X + kvm: X + bastion: X + bootstrap: X control: - - #X - - #X - - #X + - X + - X + - X compute: - - #X - - #X + - X + - X + #Un-comment and fill out list to create infrastructure nodes + #infra: + #- X + #Un-comment and fill out list to create extra RHEL KVM guests for non-cluster applications + #app: + #- X +# Make sure the total number of each type of node matches up with number of IPs above. hostname: - kvm: #X - bastion: #X - bootstrap: #X + kvm: X + bastion: X + bootstrap: X control: - - #X - - #X - - #X + - X + - X + - X compute: - - #X - - #X - -#Packages to be installed on the KVM host and bastion. Feel free to add more as needed. - pkgs: - kvm: ['@server-product-environment','@hardware-monitoring','@network-file-system-client','@remote-system-management', - '@headless-management','@system-tools','libvirt-devel','libvirt-daemon-kvm','qemu-kvm','virt-manager','genisoimage', - 'libvirt-daemon-config-network','libvirt-client','qemu-img','virt-install','virt-viewer','libvirt-daemon-kvm','libvirt'] - bastion: ['haproxy','httpd','bind','bind-utils','expect','firewalld','mod_ssl'] + - X + - X + #Un-comment and fill out list to create infrastructure nodes + #infra: + #- X + #- X + #Un-comment and fill out list to create extra RHEL KVM guests for non-cluster applications + #app: + #- X + #- X networking: - interface_name: #X #KVM network interface name: i.e. enc1 - gateway: #X - netmask: #X + metadata_name: X #e.g. ocpz + base_domain: X #e.g. pbm.ihost.com (Will be combined with metadata_name above to create fully qualified domain names) + interface_name: X #KVM host network interface name: e.g. enc1 or vlan21@enc1 + gateway: X + netmask: X dns: setup_on_bastion: true #Set to false if you do not want to setup a DNS server on the bastion because you already have a DNS server elsewhere. - nameserver: #X #If above variable is true, then this variable should be the same as env.ip.bastion above. - forwarder: 8.8.8.8 + nameserver: X #If above variable is true, then this variable should be the same as env.ip.bastion above. + forwarder: X #For cluster to reach external internet. Can use 8.8.8.8 as a default. #To create user on bastion, create and copy ssh keys access: login: - bastion: - username: #X - password: #X - root_password: #X kvm: - root_password: #X + user: X + pass: X + sudo_pass: X + bastion: + user: X + pass: X + sudo_pass: X + app: + user: X + pass: X + sudo_pass: X ssh: - ansible: - comment: "" - ocp: - comment: "" + ansible_key_comment: "Ansible key" + ocp_key_comment: "OpenShift key" + + +####################################################################################### +# All variables below this point do not need to be changed for a default installation # +####################################################################################### + -#Pre-filled values are minimum requirements for nodes. +#Packages to be installed on the KVM host, bastion, and extra RHEL VMs for non-cluster applications (only used if app IP and hostnames are defined above). +#Feel free to add more as needed. + pkgs: + kvm: ['@server-product-environment','@hardware-monitoring','@network-file-system-client','@remote-system-management', + '@headless-management','@system-tools','libvirt-devel','libvirt-daemon-kvm','qemu-kvm','virt-manager','genisoimage', + 'libvirt-daemon-config-network','libvirt-client','qemu-img','virt-install','virt-viewer','libvirt-daemon-kvm','libvirt'] + bastion: ['haproxy','httpd','bind','bind-utils','expect','firewalld','mod_ssl'] + app: ['@server-product-environment','@system-tools','@remote-system-management'] + +#Pre-filled values are minimum resource requirements for nodes. node_resources: bastion: - disk_size: 30 - ram: 4096 - vcpu: 4 - os_variant: 8.4 + disk_size: 30 #in GB + ram: 4096 #in MB + vcpu: 4 #number of virtual CPUs + os_variant: 8.5 #Red Hat Enterprise Linux version bootstrap: disk_size: 120 ram: 16384 vcpu: 4 - os_variant: 8.4 + os_variant: 8.5 control: disk_size: 120 ram: 16384 vcpu: 4 #8 recommended - os_variant: 8.4 + os_variant: 8.5 compute: disk_size: 120 ram: 8192 vcpu: 2 #6 recommended - os_variant: 8.4 + os_variant: 8.5 + infra: #will only be used if you defined infrastructure node IPs and hostnames + disk_size: 120 + ram: 16384 + vcpu: 8 + os_variant: 8.5 + app: #will only be used if you defined extra RHEL VM app IPs and hostnames + disk_size: 80 + vcpu: 8 + ram: 8192 + os_variant: 8.5 # If you would like to download the latest stable version of OpenShift, leave as is. # Otherwise, replace these links with preferred versions. @@ -120,4 +142,21 @@ env: coreos: kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-kernel-s390x initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-initramfs.s390x.img - rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-rootfs.s390x.img \ No newline at end of file + rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-rootfs.s390x.img + +# If you would like to keep the defaults for the OpenShift install-config file, leave as is. + install_config: + api_version: v1 + compute: + architecture: s390x + hyperthreading: Enabled + control: + architecture: s390x + hyperthreading: Enabled + cluster_network: + cidr: 10.128.0.0/14 + host_prefix: 23 + type: OpenShiftSDN + service_network: 172.30.0.0/16 + fips: "false" #include quotes Note: FIPS is not yet supported for OpenShift on Z. + #OpenShift SSH key is generated via Ansible in ssh_ocp_key_gen role \ No newline at end of file From 4a82a0fe7d912cd37dd874c945ae9a31dc46ec74 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 17:18:39 -0800 Subject: [PATCH 591/885] Updated README Signed-off-by: Jacob Emery --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index fe0cf7ca..ce516d78 100644 --- a/README.md +++ b/README.md @@ -80,7 +80,7 @@ * In a web browser, navigate to the Red Hat [console](https://console.redhat.com/openshift/install/ibmz/user-provisioned) and copy the OpenShift pull secret and paste it into [env.yaml](env.yaml) as the variable `env.redhat.pull_secret`. * **Step 3: Set Variables** * In a text editor of your choice, open [env.yaml](env.yaml) - * Fill out variables marked with `#X` to match your specific installation. + * Fill out variables marked with `X` to match your specific installation. * There are two sections of this file, separated by a comment block, which distinguishes variables that need to be filled in and variables that are pre-filled with defaults but can be altered if desired. * This is the most important step in the process. Take the time to make sure everything here is correct. * **Step 4: Setup Script** From ecbce1620cb0f90b0b5042f94995a57a43d5030c Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 17:18:53 -0800 Subject: [PATCH 592/885] Reset inventory Signed-off-by: Jacob Emery --- inventory | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/inventory b/inventory index a7ceda3d..beb05627 100755 --- a/inventory +++ b/inventory @@ -1,7 +1,7 @@ # will populate from ansible_setup playbook -[localhost] +[workstation] 127.0.0.1 ansible_connection=local -[localhost:vars] +[workstation:vars] ansible_python_interpreter=/usr/bin/python3 From 4531e0faf9e9147fb1b1b8939399c59b1e7e74be Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 17:25:18 -0800 Subject: [PATCH 593/885] Updated CHANGELOG Signed-off-by: Jacob Emery --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 10051402..4aeea3e9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ All notable changes to this project will be documented in this file. -## 1.2.0 - Scaling Update - 2021-12-09 +## 1.2.0 - Scaling - 2021-12-09 ### Summary - Now supports any number of control and compute nodes to be provisioned in the cluster. - This update heavily modifies the variable structure in env.yaml in order to make scaling work. @@ -24,7 +24,7 @@ All notable changes to this project will be documented in this file. * Air-gapped (disconnected) install of OpenShift option * Add an option to automte the creation of an LPAR and install RHEL on KVM host -## 1.1.0 - Automated OCP Verification Update - 2021-12-03 +## 1.1.0 - Automated OCP Verification - 2021-12-03 ### Summary - Fully automated all OCP verification steps. Cutting the number of steps nearly in half. The main playbook can now run completely hands-off from kicking it off all the way to an operational cluster. The last step provides the first-time login credentials. @@ -40,7 +40,7 @@ All notable changes to this project will be documented in this file. ### Removed - Instructions in README for doing OCP verification steps manually -## 1.0.0 - Automated Bastion Update 2021-11-24 +## 1.0.0 - Automated Bastion - 2021-11-24 ### Summary - Fully automated bastion installation and configuration using cloud-init From 138c90156b6b112fa32840edf89d7b4706d1e29a Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Thu, 6 Jan 2022 18:06:43 -0800 Subject: [PATCH 594/885] Updated CHANGELOG with info on 'Infrastructure Nodes and Extra Apps' update Signed-off-by: Jacob Emery --- CHANGELOG.md | 132 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 81 insertions(+), 51 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4aeea3e9..c488fa52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,67 +1,97 @@ # Changelog - All notable changes to this project will be documented in this file. -## 1.2.0 - Scaling - 2021-12-09 -### Summary -- Now supports any number of control and compute nodes to be provisioned in the cluster. -- This update heavily modifies the variable structure in env.yaml in order to make scaling work. - -### Added -- Support for scaling of control and compute nodes. +## Table of Contents +* [Roadmap](#Roadmap) +* [Infrastructure Nodes and Extra Apps](#Infrastructure-Nodes-and-Extra-Apps) +* [Scaling](#Scaling) +* [Automated OCP Verification](#Automated-OCP-Verification) +* [Automated Bastion Install](#Automated-Bastion-Install) +* [First Working Build](#First-Working-Build) +* [Initial Commit](#Initial-Commit) -### Modified -- Variable structure in env.yaml in order to support scaling. -- Tags to match their corresponding role. -- Every reference to a variable from env.yaml to match the new structure. - -## Roadmap +## Roadmap +* Mark infrastructure nodes for specific operators +* Add air-gapped (disconnected) install option * Add option to have load balancer on bastion or not * Add option for OpenShift to use a proxy server * Add picture of finished infrastructure to README * Add README’s for each role * Make ssh-copy-id role idempotent -* Air-gapped (disconnected) install of OpenShift option * Add an option to automte the creation of an LPAR and install RHEL on KVM host -## 1.1.0 - Automated OCP Verification - 2021-12-03 - -### Summary -- Fully automated all OCP verification steps. Cutting the number of steps nearly in half. The main playbook can now run completely hands-off from kicking it off all the way to an operational cluster. The last step provides the first-time login credentials. +## Infrastructure Nodes and Extra Apps +Version: 1.3.0 \ +Released: 2022-01-06 +* ### Summary + * Now able to designate compute nodes as infrastructure nodes, and create optional RHEL VMs for additional non-cluster applications running on the KVM host. +* ### Added + * Support for creating infrastructure nodes and extra apps. + * Added tcp port 53 to firewall. + * Setting of permissions and ownership of important configuration files to admin user instead of root. + * More rounds of checking cluster operators and CSRs in verification steps to ensure the playbook doesn't fail if it takes a long time for those steps to complete. +* ### Modified + * README file to be prettier. + * Ansible connection to SSH with password authentication since it was necessary for copying SSH keys anyway. Kept copying of SSH keys to Ansible-managed servers because it's still useful to have. + * env.yaml to have two sections separated by a comment block: one for variables that need to be filled out, the other for pre-filled variables that can be modified if desired. + * Ansible user from running as root to an admin user with sudo privileges. -### Added -- 5 roles related to automating OCP verification steps: wait_for_bootstrap, approve_certs, check_nodes, wait_for_cluster_operators, and wait_for_install_complete. -- role to check internal and external DNS configuration before continuing. Including checking to make sure the name resolves to the correct IP address. -### Modified -- The mirrors for CoreOS versions to update to 4.9 and tested them. -- The acquisition method of RHEL qcow2 from downloading via ephemeral link to having the user download the file to their local machine as a pre-req. This was changed to avoid having to re-copy the link every time it expires. -- teardown.yaml and reset_files role to be fully idempotent when running the main playbook from the point where each type of teardown sets the user back to. -- Lots of small tweaks. -### Removed -- Instructions in README for doing OCP verification steps manually +## Scaling +Version: 1.2.0 \ +Released: 2021-12-09 +* ### Summary + * Now supports any number of control and compute nodes to be provisioned in the cluster. + * This update heavily modifies the variable structure in env.yaml in order to make scaling work. +* ### Added + * Support for scaling of control and compute nodes. +* ### Modified + * Variable structure in env.yaml in order to support scaling. + * Tags to match their corresponding role. + * Every reference to a variable from env.yaml to match the new structure. -## 1.0.0 - Automated Bastion - 2021-11-24 +## Automated OCP Verification +Version: 1.1.0 \ +Released: 2021-12-03 +* ### Summary + * Fully automated all OCP verification steps. Cutting the number of steps nearly in half. The main playbook can now run completely hands-off from kicking it off all the way to an operational cluster. The last step provides the first-time login credentials. +* ### Added + * 5 roles related to automating OCP verification steps: wait_for_bootstrap, approve_certs, check_nodes, wait_for_cluster_operators, and wait_for_install_complete. + * Role to check internal and external DNS configuration before continuing. Including checking to make sure the name resolves to the correct IP address. +* ### Modified + * The mirrors for CoreOS versions to update to 4.9 and tested them. + * The acquisition method of RHEL qcow2 from downloading via ephemeral link to having the user download the file to their local machine as a pre-req. This was changed to avoid having to re-copy the link every time it expires. + * teardown.yaml and reset_files role to be fully idempotent when running the main playbook from the point where each type of teardown sets the user back to. + * Lots of small tweaks. +* ### Removed + * Instructions in README for doing OCP verification steps manually -### Summary -- Fully automated bastion installation and configuration using cloud-init +## Automated Bastion Install +Version: 1.0.0 \ +Released: 2021-11-24 +* ### Summary + * Fully automated bastion installation and configuration using cloud-init +* ### Added + * Options in env.yaml for creating a DNS server on the bastion or not, and for automatically attaching Red Hat subscriptions + * Variables for bootstrap, bastion, control and compute nodes' specifications in env.yaml + * Node name variables in env.yaml + * Variable for network interface name in env.yaml + * Variable for DNS forwarder in env.yaml + * Templating of DNS configuration files so they don't have to be pre-provided + * Expect script to ssh_copy_id role so that the user doesn't have to type in ssh password when copying ssh key + * Templating of haproxy config file + * A boot_teardown tag in teardown.yaml to automate the teardown of bootstrap node +* ### Modified + * create_bastion role to use cloud-init to fully automate configuration and installation of the bastion node + * teardown.yaml script to decrease complexity and work faster. + * Some tags to match their corresponding role names + * Lots of small improvements and tweaks +* ### Removed + * Encryption of env.yaml as it was unnecessary and increased complexity -### Added -- Options in env.yaml for creating a DNS server on the bastion or not, and for automatically attaching Red Hat subscriptions -- Variables for bootstrap, bastion, control and compute nodes' specifications in env.yaml -- Node name variables in env.yaml -- Variable for network interface name in env.yaml -- Variable for DNS forwarder in env.yaml -- Templating of DNS configuration files so they don't have to be pre-provided -- Expect script to ssh_copy_id role so that the user doesn't have to type in ssh password when copying ssh key -- Templating of haproxy config file -- A boot_teardown tag in teardown.yaml to automate the teardown of bootstrap node -### Modified -- create_bastion role to use cloud-init to fully automate configuration and installation of the bastion node -- teardown.yaml script to decrease complexity and work faster. -- Some tags to match their corresponding role names -- Lots of small improvements and tweaks -### Removed -- Encryption of env.yaml as it was unnecessary and increased complexity +## First Working Build +Version: 0.5.0 \ +Released: 2021-08-24 -## 0.0.1 -### Unreleased 2021-08-24 \ No newline at end of file +## Initial Commit +Version: 0.0.0 \ +Released: 2021-06-11 \ No newline at end of file From f06d5e296d9611259940a8e7a7679e93220ed356 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 10 Jan 2022 03:40:05 -0600 Subject: [PATCH 595/885] Removed become_user from main.yaml verification step. --- main.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/main.yaml b/main.yaml index 6edc868d..aec5124d 100644 --- a/main.yaml +++ b/main.yaml @@ -88,7 +88,6 @@ - hosts: bastion tags: verification,bastion become: false - become_user: jacob environment: KUBECONFIG: "/home/{{ env.access.login.bastion.user }}/ocpinst/auth/kubeconfig" gather_facts: yes @@ -99,4 +98,4 @@ - approve_certs - check_nodes - wait_for_cluster_operators - - wait_for_install_complete \ No newline at end of file + - wait_for_install_complete From 2c8faf3f41d2d2c8a494762435d4aa978816e45e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 10 Jan 2022 02:52:15 -0800 Subject: [PATCH 596/885] Added a few more changes to most recent update Signed-off-by: Jacob Emery --- CHANGELOG.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c488fa52..a4dc664c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,21 +20,27 @@ All notable changes to this project will be documented in this file. * Make ssh-copy-id role idempotent * Add an option to automte the creation of an LPAR and install RHEL on KVM host -## Infrastructure Nodes and Extra Apps +## Infrastructure Nodes, Extra Apps, Security Version: 1.3.0 \ Released: 2022-01-06 * ### Summary * Now able to designate compute nodes as infrastructure nodes, and create optional RHEL VMs for additional non-cluster applications running on the KVM host. + * Made changes to SSH and SELinux tasks to be more secure. * ### Added * Support for creating infrastructure nodes and extra apps. * Added tcp port 53 to firewall. - * Setting of permissions and ownership of important configuration files to admin user instead of root. + * Setting of permissions and ownership of important configuration files to bastion admin user instead of root. + * Wheel to groups that bastion admin user is added to on boot. * More rounds of checking cluster operators and CSRs in verification steps to ensure the playbook doesn't fail if it takes a long time for those steps to complete. + * Task to httpd to allow port 4443 because SELinux is no longer set to permissive (see '[Removed](###Removed)' below). * ### Modified - * README file to be prettier. + * Formatting of README file to be prettier and more useful. * Ansible connection to SSH with password authentication since it was necessary for copying SSH keys anyway. Kept copying of SSH keys to Ansible-managed servers because it's still useful to have. * env.yaml to have two sections separated by a comment block: one for variables that need to be filled out, the other for pre-filled variables that can be modified if desired. * Ansible user from running as root to an admin user with sudo privileges. +* ### Removed + * The need to run anything as root user for security reasons. + * set_selinux_permissive mode role for security reasons. ## Scaling Version: 1.2.0 \ From 98366a8ef34f82b267c202e5e0726354f0133050 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 10 Jan 2022 02:52:40 -0800 Subject: [PATCH 597/885] Fixed Python link Signed-off-by: Jacob Emery --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ce516d78..ba1a9045 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ ~~~ xcode-select --install ~~~ -* [Python3]((https://realpython.com/installing-python/)) and [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) intalled on your local workstation \ +* [Python3](https://realpython.com/installing-python/) and [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) intalled on your local workstation \ * Mac: ~~~ brew install python3 From 0d78b3003ed9e7000ee45a8c0356eab4fd69f783 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 10 Jan 2022 02:53:52 -0800 Subject: [PATCH 598/885] Moved creating extra RHEL VMs into its own section to customize tagging Signed-off-by: Jacob Emery --- main.yaml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/main.yaml b/main.yaml index 6edc868d..fb5d5730 100644 --- a/main.yaml +++ b/main.yaml @@ -25,7 +25,6 @@ - check_ssh - {role: attach_subscription, when: env.redhat.username is defined and env.redhat.password is defined} - install_packages - - set_selinux_permissive - macvtap - create_bastion @@ -53,7 +52,6 @@ - {role: attach_subscription, when: env.redhat.username is defined and env.redhat.password is defined} - install_packages - ssh_ocp_key_gen - - set_selinux_permissive - set_firewall - {role: dns, when: env.networking.dns.setup_on_bastion } - check_dns @@ -72,6 +70,14 @@ - create_bootstrap - create_control_nodes - create_compute_nodes + +- hosts: kvm_host + tags: app + become: true + gather_facts: no + vars_files: + - env.yaml + roles: - {role: create_extra_rhel, when: env.ip.app is defined } - hosts: app @@ -88,7 +94,6 @@ - hosts: bastion tags: verification,bastion become: false - become_user: jacob environment: KUBECONFIG: "/home/{{ env.access.login.bastion.user }}/ocpinst/auth/kubeconfig" gather_facts: yes From bc0d1608b4a2f4921f9b983ef77180dfb15d245e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 10 Jan 2022 02:58:09 -0800 Subject: [PATCH 599/885] Partial teardown needs to remove qcow2 files of nodes Signed-off-by: Jacob Emery --- roles/httpd/tasks/main.yaml | 9 +++++++++ roles/prep_kvm_guests/tasks/main.yaml | 10 +++++++++- roles/set_firewall/tasks/main.yaml | 10 ++-------- roles/set_selinux_permissive/tasks/main.yaml | 7 ------- teardown.yaml | 4 ++-- 5 files changed, 22 insertions(+), 18 deletions(-) delete mode 100644 roles/set_selinux_permissive/tasks/main.yaml diff --git a/roles/httpd/tasks/main.yaml b/roles/httpd/tasks/main.yaml index 20be67f5..9e08681b 100644 --- a/roles/httpd/tasks/main.yaml +++ b/roles/httpd/tasks/main.yaml @@ -6,6 +6,15 @@ name: httpd_t permissive: true +- name: Allow httpd to listen on tcp port 4443 + tags: httpd + community.general.seport: + ports: 4443 + proto: tcp + setype: http_port_t + state: present + reload: yes + - name: enable httpd tags: httpd systemd: diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 57625a7e..79b36169 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -32,4 +32,12 @@ dest: /var/lib/libvirt/images/rhcos-live-initramfs.s390x.img mode: '0755' force: yes - when: initramfs_check.stat.exists == false \ No newline at end of file + when: initramfs_check.stat.exists == false + +- name: Add admin user to qemu and libvirt groups + tags: prep_kvm_guests + become: true + command: usermod -aG {{item}} {{ env.access.login.kvm.user }} + loop: + - libvirt + - qemu \ No newline at end of file diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index 1f4af6ef..cbc3f2c0 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -1,6 +1,6 @@ --- -- name: Add ports to firewalld +- name: Add ports to firewall tags: set_firewall firewalld: port: "{{ item }}" @@ -46,10 +46,4 @@ tags: set_firewall systemd: name: firewalld - state: reloaded - -- name: restart httpd - tags: set_firewall - service: - name: httpd - state: restarted \ No newline at end of file + state: reloaded \ No newline at end of file diff --git a/roles/set_selinux_permissive/tasks/main.yaml b/roles/set_selinux_permissive/tasks/main.yaml deleted file mode 100644 index d3cfee5a..00000000 --- a/roles/set_selinux_permissive/tasks/main.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- - -- name: Put SELinux in permissive mode, logging actions that would be blocked. - tags: selinux - ansible.posix.selinux: - policy: targeted - state: permissive \ No newline at end of file diff --git a/teardown.yaml b/teardown.yaml index 4ac53414..c829e4be 100644 --- a/teardown.yaml +++ b/teardown.yaml @@ -113,14 +113,14 @@ name: "{{ item }}" command: destroy loop: "{{ env.hostname.app }}" - ignore_errors: yes + ignore_errors: true - name: Undefine remaining app nodes. Expect ignored errors if some VMs are already undefined. community.libvirt.virt: name: "{{ item }}" command: undefine loop: "{{ env.hostname.app }}" - ignore_errors: yes + ignore_errors: true - name: Remove apps qcow2 files for idempotency tags: create_extra_rhel From 88af36984efbae00411cbdabb3f2c52503b22b32 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 10 Jan 2022 03:00:16 -0800 Subject: [PATCH 600/885] Changed booleans from yes/no to true/false Signed-off-by: Jacob Emery --- roles/approve_certs/tasks/main.yaml | 22 +++++++++---------- roles/check_dns/tasks/main.yaml | 2 +- roles/create_bootstrap/tasks/main.yaml | 4 +++- roles/create_extra_rhel/tasks/main.yaml | 2 +- roles/get_ocp/tasks/main.yaml | 22 +++++++++---------- roles/teardown_vms/tasks/main.yaml | 20 ++++++++--------- roles/wait_for_bootstrap/tasks/main.yaml | 5 +++-- .../tasks/main.yaml | 12 +++++----- 8 files changed, 46 insertions(+), 43 deletions(-) diff --git a/roles/approve_certs/tasks/main.yaml b/roles/approve_certs/tasks/main.yaml index 16fdc280..1c0b8b02 100644 --- a/roles/approve_certs/tasks/main.yaml +++ b/roles/approve_certs/tasks/main.yaml @@ -5,7 +5,7 @@ shell: | /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve register: csr_approved - ignore_errors: yes + ignore_errors: true - name: Viewing approved certificates tags: approve_certs @@ -22,7 +22,7 @@ shell: | /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve register: csr_approved - ignore_errors: yes + ignore_errors: true - name: Viewing approved certificates tags: approve_certs @@ -39,7 +39,7 @@ shell: | /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve register: csr_approved - ignore_errors: yes + ignore_errors: true - name: Viewing approved certificates tags: approve_certs @@ -56,7 +56,7 @@ shell: | /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve register: csr_approved - ignore_errors: yes + ignore_errors: true - name: Viewing approved certificates tags: approve_certs @@ -73,7 +73,7 @@ shell: | /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve register: csr_approved - ignore_errors: yes + ignore_errors: true - name: Viewing approved certificates tags: approve_certs @@ -90,7 +90,7 @@ shell: | /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve register: csr_approved - ignore_errors: yes + ignore_errors: true - name: Viewing approved certificates tags: approve_certs @@ -107,7 +107,7 @@ shell: | /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve register: csr_approved - ignore_errors: yes + ignore_errors: true - name: Viewing approved certificates tags: approve_certs @@ -124,7 +124,7 @@ shell: | /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve register: csr_approved - ignore_errors: yes + ignore_errors: true - name: Viewing approved certificates tags: approve_certs @@ -141,7 +141,7 @@ shell: | /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve register: csr_approved - ignore_errors: yes + ignore_errors: true - name: Viewing approved certificates tags: approve_certs @@ -158,7 +158,7 @@ shell: | /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve register: csr_approved - ignore_errors: yes + ignore_errors: true - name: Viewing approved certificates tags: approve_certs @@ -175,7 +175,7 @@ shell: | /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve register: csr_approved - ignore_errors: yes + ignore_errors: true - name: Viewing approved certificates tags: approve_certs diff --git a/roles/check_dns/tasks/main.yaml b/roles/check_dns/tasks/main.yaml index c6f3fb57..169c1bbc 100644 --- a/roles/check_dns/tasks/main.yaml +++ b/roles/check_dns/tasks/main.yaml @@ -68,7 +68,7 @@ index_var: i when: env.hostname.app is defined -- name: Check external DNS resolution from DNS forwarder +- name: Check external DNS resolution from forwarder tags: check_dns,dns register: external_dns_check failed_when: '"server can" in external_dns_check.stdout' diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index 2962d222..eea8543e 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -10,7 +10,7 @@ name: "{{ env.hostname.bootstrap }}" command: status register: bootstrap_check - ignore_errors: yes + ignore_errors: true - name: Print status of bootstrap tags: create_bootstrap @@ -37,9 +37,11 @@ when: bootstrap_check.failed == true - name: Set bootstrap qcow2 permissions + become: true tags: create_bootstrap command: chmod 600 /var/lib/libvirt/images/{{env.hostname.bootstrap}}-bootstrap.qcow2 - name: Set bootstrap qcow2 ownership to qemu + become: true tags: create_bootstrap command: chown qemu:qemu /var/lib/libvirt/images/{{env.hostname.bootstrap}}-bootstrap.qcow2 \ No newline at end of file diff --git a/roles/create_extra_rhel/tasks/main.yaml b/roles/create_extra_rhel/tasks/main.yaml index 48c0a493..77a7472c 100644 --- a/roles/create_extra_rhel/tasks/main.yaml +++ b/roles/create_extra_rhel/tasks/main.yaml @@ -8,7 +8,7 @@ tags: create_extra_rhel,app stat: path: /var/lib/libvirt/images/bastion_base.qcow2 - ignore_errors: yes + ignore_errors: true register: qcow2_check - name: Copy RHEL qcow2 file to KVM host if it's not there already. This may take a while. diff --git a/roles/get_ocp/tasks/main.yaml b/roles/get_ocp/tasks/main.yaml index df7d1da8..8aed4be3 100644 --- a/roles/get_ocp/tasks/main.yaml +++ b/roles/get_ocp/tasks/main.yaml @@ -31,7 +31,7 @@ - name: Delete OCP download directory for idempotency, because ignition files deprecate after 24 hours. tags: get_ocp - become: yes + become: true file: path: /home/{{ env.access.login.bastion.user }}/ocpinst state: absent @@ -54,7 +54,7 @@ - name: Copy kubectl, oc, and openshift-install binaries to /usr/local/bin tags: get_ocp - become: yes + become: true ansible.builtin.copy: src: /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} dest: /usr/local/bin/{{item}} @@ -92,11 +92,11 @@ - name: Create manifests tags: get_ocp command: /home/{{ env.access.login.bastion.user }}/ocpinst/openshift-install create manifests --dir=/home/{{ env.access.login.bastion.user }}/ocpinst/ - become: yes + become: true - name: Set masters schedulable parameter to false tags: get_ocp - become: yes + become: true replace: path: /home/{{ env.access.login.bastion.user }}/ocpinst/manifests/cluster-scheduler-02-config.yml regexp: ': true' @@ -104,7 +104,7 @@ - name: Set permissions for ocpinst directory contents to bastion admin user tags: get_ocp - become: yes + become: true command: chmod 0755 /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} loop: - manifests @@ -114,7 +114,7 @@ - name: Set ownership of ocpinst directory contents to bastion admin user tags: get_ocp - become: yes + become: true command: chown {{ env.access.login.bastion.user }}:{{ env.access.login.bastion.user }} /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} loop: - manifests @@ -124,12 +124,12 @@ - name: Create ignition files tags: get_ocp - become: yes + become: true command: /home/{{ env.access.login.bastion.user }}/ocpinst/openshift-install create ignition-configs --dir=/home/{{ env.access.login.bastion.user }}/ocpinst/ - name: Set permissions of ignitions and related files to bastion admin user tags: get_ocp - become: yes + become: true command: chmod 0755 /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} loop: - bootstrap.ign @@ -140,7 +140,7 @@ - name: Set ownership of ignitions and related files to bastion admin user tags: get_ocp - become: yes + become: true command: chown {{ env.access.login.bastion.user }}:{{ env.access.login.bastion.user }} /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} loop: - bootstrap.ign @@ -153,14 +153,14 @@ - name: create ignition directory on webserver tags: get_ocp - become: yes + become: true file: path: /var/www/html/ignition state: directory - name: Copy ignition files to web server tags: get_ocp - become: yes + become: true copy: src: /home/{{ env.access.login.bastion.user }}/ocpinst/{{ item }}.ign dest: /var/www/html/ignition diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml index 682c85d0..92b8f36e 100644 --- a/roles/teardown_vms/tasks/main.yaml +++ b/roles/teardown_vms/tasks/main.yaml @@ -5,26 +5,26 @@ name: "{{ env.hostname.bastion }}" command: destroy when: bastion_teardown - ignore_errors: yes + ignore_errors: true - name: Undefine bastion for full, skip for partial teardown. Expect ignored errors if it is already undefined. community.libvirt.virt: name: "{{ env.hostname.bastion }}" command: undefine when: bastion_teardown - ignore_errors: yes + ignore_errors: true - name: Destroy bootstrap. Expect ignored errors if it is already destroyed. community.libvirt.virt: name: "{{ env.hostname.bootstrap }}" command: destroy - ignore_errors: yes + ignore_errors: true - name: Undefine bootstrap. Expect ignored errors if it is already undefined. community.libvirt.virt: name: "{{ env.hostname.bootstrap }}" command: undefine - ignore_errors: yes + ignore_errors: true - name: Destroy running control nodes. Expect ignored errors if some VMs are already destroyed. community.libvirt.virt: @@ -34,7 +34,7 @@ loop_control: extended: yes index_var: i - ignore_errors: yes + ignore_errors: true - name: Undefine remaining control nodes. Expect ignored errors if some VMs are already undefined. community.libvirt.virt: @@ -44,7 +44,7 @@ loop_control: extended: yes index_var: i - ignore_errors: yes + ignore_errors: true - name: Destroy running compute nodes. Expect ignored errors if some VMs are already destroyed. community.libvirt.virt: @@ -54,7 +54,7 @@ loop_control: extended: yes index_var: i - ignore_errors: yes + ignore_errors: true - name: Undefine remaining compute nodes. Expect ignored errors if some VMs are already undefined. community.libvirt.virt: @@ -64,7 +64,7 @@ loop_control: extended: yes index_var: i - ignore_errors: yes + ignore_errors: true - name: Destroy running infrastructure nodes, if defined. Expect ignored errors if some VMs are already destroyed. community.libvirt.virt: @@ -74,7 +74,7 @@ loop_control: extended: yes index_var: i - ignore_errors: yes + ignore_errors: true when: env.hostname.infra is defined - name: Undefine remaining infrastructure nodes. Expect ignored errors if some VMs are already undefined. @@ -85,5 +85,5 @@ loop_control: extended: yes index_var: i - ignore_errors: yes + ignore_errors: true when: env.hostname.infra is defined \ No newline at end of file diff --git a/roles/wait_for_bootstrap/tasks/main.yaml b/roles/wait_for_bootstrap/tasks/main.yaml index ca775189..11ec6139 100644 --- a/roles/wait_for_bootstrap/tasks/main.yaml +++ b/roles/wait_for_bootstrap/tasks/main.yaml @@ -34,7 +34,7 @@ community.libvirt.virt: name: "{{ env.hostname.bootstrap }}" command: destroy - ignore_errors: yes + ignore_errors: true delegate_to: "{{ env.ip.kvm }}" - name: Undefine bootstrap. Expect ignored errors if bootstrap is already undefined. @@ -42,11 +42,12 @@ community.libvirt.virt: name: "{{ env.hostname.bootstrap }}" command: undefine - ignore_errors: yes + ignore_errors: true delegate_to: "{{ env.ip.kvm }}" - name: Remove qcow2 tags: wait_for_bootstrap + become: true file: path: /var/lib/libvirt/images/{{env.hostname.bootstrap}}-bootstrap.qcow2 state: absent diff --git a/roles/wait_for_cluster_operators/tasks/main.yaml b/roles/wait_for_cluster_operators/tasks/main.yaml index f9d3e1b3..357004ab 100644 --- a/roles/wait_for_cluster_operators/tasks/main.yaml +++ b/roles/wait_for_cluster_operators/tasks/main.yaml @@ -17,7 +17,7 @@ until: ("False" not in co_check.stdout) retries: 5 delay: 30 - ignore_errors: yes + ignore_errors: true - name: Second round of checking cluster operators tags: wait_for_cluster_operators @@ -36,7 +36,7 @@ until: ("False" not in co_check.stdout) retries: 5 delay: 30 - ignore_errors: yes + ignore_errors: true - name: Third round of checking cluster operators tags: wait_for_cluster_operators @@ -55,7 +55,7 @@ until: ("False" not in co_check.stdout) retries: 5 delay: 30 - ignore_errors: yes + ignore_errors: true - name: Fourth round of checking cluster operators tags: wait_for_cluster_operators @@ -74,7 +74,7 @@ until: ("False" not in co_check.stdout) retries: 5 delay: 30 - ignore_errors: yes + ignore_errors: true - name: Fifth round of checking cluster operators tags: wait_for_cluster_operators @@ -93,7 +93,7 @@ until: ("False" not in co_check.stdout) retries: 5 delay: 30 - ignore_errors: yes + ignore_errors: true - name: Sixth round of checking cluster operators tags: wait_for_cluster_operators @@ -112,7 +112,7 @@ until: ("False" not in co_check.stdout) retries: 5 delay: 30 - ignore_errors: yes + ignore_errors: true - name: Seventh round of checking cluster operators tags: wait_for_cluster_operators From 2fbbe2112b870ff879c5978856055113520ab2c7 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 10 Jan 2022 03:00:51 -0800 Subject: [PATCH 601/885] Added wheel to list of groups that bastion admin user gets added to on boot Signed-off-by: Jacob Emery --- roles/create_bastion/templates/cloud_init.cfg.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/create_bastion/templates/cloud_init.cfg.j2 b/roles/create_bastion/templates/cloud_init.cfg.j2 index fb9119f0..345b3dc1 100644 --- a/roles/create_bastion/templates/cloud_init.cfg.j2 +++ b/roles/create_bastion/templates/cloud_init.cfg.j2 @@ -5,7 +5,7 @@ manage_etc_hosts: true users: - name: {{ env.access.login.bastion.user }} sudo: ALL=(ALL) NOPASSWD:ALL - groups: adm,sys + groups: adm,sys,wheel home: /home/{{ env.access.login.bastion.user }} shell: /bin/bash lock_passwd: false From 07f7ff8d8b43868ebc18b057439b38ccdbbcd865 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 10 Jan 2022 14:04:33 -0800 Subject: [PATCH 602/885] Added conditional to infra nodes haproxy config tasks to run only when infra vars are defined in env.yaml Signed-off-by: Jacob Emery --- roles/haproxy/tasks/main.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index c26b2606..0b167461 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -61,6 +61,7 @@ loop_control: extended: yes index_var: i + when: env.hostname.infra is defined - name: Add compute node information to 80 section in haproxy config tags: haproxy @@ -81,6 +82,7 @@ loop_control: extended: yes index_var: i + when: env.hostname.infra is defined - name: Set haproxy boolean to enable connections tags: haproxy From ae71ee6403411b73ad6b5234194b3acf3b69488f Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 10 Jan 2022 14:05:39 -0800 Subject: [PATCH 603/885] Kept sudo password var in set_inventory but removed SSH username and password and switched back to using SSH keys Signed-off-by: Jacob Emery --- roles/set_inventory/tasks/main.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index 76df342c..751d9687 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -13,10 +13,10 @@ marker_end: "end of" block: | [kvm_host] - {{env.ip.kvm}} ansible_connection=ssh ansible_ssh_user={{env.access.login.kvm.user}} ansible_ssh_pass={{env.access.login.kvm.pass}} ansible_become_password={{env.access.login.kvm.sudo_pass}} + {{env.ip.kvm}} ansible_become_password={{env.access.login.kvm.sudo_pass}} [bastion] - {{env.ip.bastion}} ansible_connection=ssh ansible_ssh_user={{env.access.login.bastion.user}} ansible_ssh_pass={{env.access.login.bastion.pass}} ansible_become_password={{env.access.login.bastion.sudo_pass}} + {{env.ip.bastion}} ansible_become_password={{env.access.login.bastion.sudo_pass}} [bootstrap] {{env.ip.bootstrap}} @@ -70,7 +70,7 @@ lineinfile: path: inventory insertafter: "app" - line: "{{ item }} ansible_connection=ssh ansible_ssh_user={{env.access.login.app.user}} ansible_ssh_pass={{env.access.login.app.pass}} ansible_become_password={{env.access.login.app.sudo_pass}}" + line: "{{ item }} ansible_become_password={{env.access.login.app.sudo_pass}}" loop: "{{env.ip.app}}" when: env.ip.app is defined From 8caa6610b649c946fdd74807d02d678fc601a66f Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 10 Jan 2022 14:07:15 -0800 Subject: [PATCH 604/885] Removed SSH password auth bullet point from CHANGELOG, switched back to SSH keys Signed-off-by: Jacob Emery --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a4dc664c..b9eea12c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,7 +35,6 @@ Released: 2022-01-06 * Task to httpd to allow port 4443 because SELinux is no longer set to permissive (see '[Removed](###Removed)' below). * ### Modified * Formatting of README file to be prettier and more useful. - * Ansible connection to SSH with password authentication since it was necessary for copying SSH keys anyway. Kept copying of SSH keys to Ansible-managed servers because it's still useful to have. * env.yaml to have two sections separated by a comment block: one for variables that need to be filled out, the other for pre-filled variables that can be modified if desired. * Ansible user from running as root to an admin user with sudo privileges. * ### Removed From df27e71a6de16080be0a3462a05114254ce77f6f Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 24 Jan 2022 11:38:20 -0600 Subject: [PATCH 605/885] Removed '--ask-become-pass' from README no longer necessary to include in the command --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index ba1a9045..60af24bf 100644 --- a/README.md +++ b/README.md @@ -120,11 +120,11 @@ If you encounter errors while running the main playbook, there are a few things 3) Google the specific error message. 3) Re-run the role indivually with [tags](#Tags) and the verbosity '-v' option to get more debugging information (more v's give more info). For example: ~~~ - ansible-playbook main.yaml --ask-become-pass --tags get_ocp -vvv + ansible-playbook main.yaml --tags get_ocp -vvv ~~~ 4) Teardown troublesome KVM guests with [teardown](#Teardown) scripts and start again with [tags](#Tags). To start from the beginning, run: ~~~ - ansible-playbook teardown.yaml --ask-become-pass --tags full + ansible-playbook teardown.yaml --tags full ~~~ 6) E-mail Jacob Emery at jacob.emery@ibm.com 7) If it's a problem with an OpenShift verification step, first re-reun the role with [tags](#Tags). If that doesn't work, SSH into the bastion as root ("ssh root@bastion-ip-address-here") and then run,"export KUBECONFIG=~/ocpinst/auth/kubeconfig" and then "oc whoami" and make sure it ouputs "system:admin". Then run the shell command from the role you would like to check on manually: i.e. 'oc get nodes', 'oc get co', etc. @@ -208,4 +208,4 @@ If you encounter errors while running the main playbook, there are a few things * `verification`: All OpenShift cluster verification tasks * `wait_for_bootstrap`: Tasks for to wait_for_bootstrap role * `wait_for_cluster_operators`: Tasks for wait_for_cluster_operators - * `wait_for_install_complete`: Tasks for wait_for_install_complete role \ No newline at end of file + * `wait_for_install_complete`: Tasks for wait_for_install_complete role From 72332332ec1b20c3758cf615e573d19ec909ad2b Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Wed, 23 Feb 2022 16:01:29 -0800 Subject: [PATCH 606/885] Added ansible_user parameter In case the user does not use the same username as they do on their workstation. SSH key authentication is set up by the time Ansible needs to connect via SSH the first time. --- roles/set_inventory/tasks/main.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index 751d9687..4f505c4f 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -13,10 +13,10 @@ marker_end: "end of" block: | [kvm_host] - {{env.ip.kvm}} ansible_become_password={{env.access.login.kvm.sudo_pass}} + {{env.ip.kvm}} ansible_user={{ env.access.login.kvm.user }} ansible_become_password={{env.access.login.kvm.sudo_pass}} [bastion] - {{env.ip.bastion}} ansible_become_password={{env.access.login.bastion.sudo_pass}} + {{env.ip.bastion}} ansible_user={{ env.access.login.bastion.user }} ansible_become_password={{env.access.login.bastion.sudo_pass}} [bootstrap] {{env.ip.bootstrap}} @@ -86,4 +86,4 @@ - name: Refresh inventory tags: setup - meta: refresh_inventory \ No newline at end of file + meta: refresh_inventory From a678281768639c734015f3207069a8bc23be898c Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 01:40:23 -0700 Subject: [PATCH 607/885] Updated documentation for latest update Signed-off-by: Jacob Emery --- CHANGELOG.md | 22 ++++- README.md | 176 +++++------------------------------ installation_instructions.md | 89 ++++++++++++++++++ 3 files changed, 132 insertions(+), 155 deletions(-) create mode 100644 installation_instructions.md diff --git a/CHANGELOG.md b/CHANGELOG.md index b9eea12c..c4a95429 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ All notable changes to this project will be documented in this file. ## Table of Contents * [Roadmap](#Roadmap) +* [Automated KVM Host Provisioning](#Automated-KVM-Host-Provisioning) * [Infrastructure Nodes and Extra Apps](#Infrastructure-Nodes-and-Extra-Apps) * [Scaling](#Scaling) * [Automated OCP Verification](#Automated-OCP-Verification) @@ -11,14 +12,29 @@ All notable changes to this project will be documented in this file. * [Initial Commit](#Initial-Commit) ## Roadmap -* Mark infrastructure nodes for specific operators +* Add option to use a VPN to reduce # of IPs needed +* Add the ability to provision multiple LPARs for high availability +* Tag infrastructure nodes for specific operators * Add air-gapped (disconnected) install option -* Add option to have load balancer on bastion or not * Add option for OpenShift to use a proxy server * Add picture of finished infrastructure to README * Add README’s for each role * Make ssh-copy-id role idempotent -* Add an option to automte the creation of an LPAR and install RHEL on KVM host + +## #Automated KVM Host Provisioning +Version 1.4.0 \ +Released: 2022-03-26 +* ### Summary + * Now able to provision the KVM host via Ansible. + * Changed the structure of playbooks, variables, and inventories to use Ansible best practices. +* ### Added + * Support for using IBM's zHMC Ansible modules to automate the creation of a logical partition (LPAR) profile, connect storage group and network card, boot from an FTP server, and then kickstart the installation of RHEL to serve as the KVM hypervisor for the cluster. + * Usage of Ansible vault to encrypt sensitive data. Playbooks must now be run with --ask-vault-pass, e.g. 'ansible-playbook playbooks/site.yaml --ask-vault-pass' +* ### Modified + * Bastion boot method from cloud-init to FTP and kickstart. + * The structure of playbooks. The setup.yaml playbook still must be run before anything else, but now there is a master playbook - site.yaml which imports all other playbooks. This was done to be more user-friendly and in-line with best practices. Previously, everything was all in one playbook and relied on tags to start back from a given point. Relying solely on tags proved tedious. + * The structure for inventories, which allows for more flexibility with deployments and is more in-line with best practices. Now you can have multiple inventories and specify which you would like to use for a given run in the ansible.cfg file. + * The structure of variables, to allow for the separation of the bastion node from the rest of the cluster. This opens up many more possibilities for more complex deployments where, for example, the bastion node is already created. ## Infrastructure Nodes, Extra Apps, Security Version: 1.3.0 \ diff --git a/README.md b/README.md index 60af24bf..7bba2c72 100644 --- a/README.md +++ b/README.md @@ -3,34 +3,36 @@ ## Table of Contents * [Scope](#Scope) * [Supported Operating Systems](#Supported-Operating-Systems) +* [Installation Instructions](#Installation-Instructions) * [Pre-Requisites](#Pre-Requisites) -* [Instructions](#Installation-Instructions) - * [Setup](#Setup) - * [Provisioning](#Provisioning) - * [Post-Install Complete](#Post-Install-Complete) * [Troubleshooting](#Troubleshooting) * [Teardown](#Teardown) * [Tags](#Tags) +* [Vault](#Vault) ## Scope -* The goal of this playbook is to automate the setup and deployment of a User Provisioned Infrastructure (UPI) OpenShift cluster on an IBM Z or LinuxONE mainframe utilizing Kernel Virtual Machine (KVM) as the hypervisor. -* This README file gives extremely detailed step-by-step instructions for you to use as a reference. It assumes basic understanding of the command-line, but near-zero experience with Ansible itself. +* The goal of this playbook is to automate the setup and deployment of a Red Hat OpenShift Container Platform (RHOCP) cluster on IBM Z / LinuxONE with Kernel Virtual Machine (KVM) as the hypervisor. This is a user-provisioned infrastructure (UPI) installation of RHOCP. +* These playbooks assume a basic understanding of the command-line. Using them requires near-zero experience with Ansible, unless you want to customize them. ## Supported Operating Systems (for local workstation running Ansible) * Linux (RedHat and Debian) * MacOS X +## Installation Instructions +A step-by-step guide can be found [here](#installation_instructions.md) in the installation_instructions.md file. + ## Pre-Requisites * A Red Hat account ([Sign Up](https://www.redhat.com/wapps/ugc/register.html?_flowId=register-flow&_flowExecutionKey=e1s1)) * A [license](https://access.redhat.com/products/red-hat-openshift-container-platform/) or [free trial](https://www.redhat.com/en/technologies/cloud-computing/openshift/try-it) of Red Hat OpenShift Container Platform for IBM Z systems - s390x architecture (OCP license comes with licenses for RHEL and CoreOS) -* Access to a logical partition (LPAR) on an IBM Z or LinuxONE mainframe, with at least: - * 6 Integrated Facilities for Linux (IFLs) with SMT2 enabled - * 85 GB of RAM - * 1 TB of disk space mounted to /var/lib/libvirt/images - * Red Hat Enterprise Linux (RHEL) 8.4 installed with networking configured and a user with sudo privileges created. - * Access to 8 (for a minimum installation) pre-allocated IPv4 addresses - * Note on DNS: The [main playbook](main.yaml) will create a DNS server on the bastion by default. If you plan to use a existing DNS server instead, when filling out the variables in [env.yaml](env.yaml) in Step 3, please make sure to mark `env.networking.dns.setup_on_bastion` to `false`. Either way, the playbook will double-check the DNS configuration before continuing. +* Hardware Management Console (HMC) access on IBM Z or LinuxONE (390x) +* Must be Dynamic Partition Manager (DPM) enabled +* An FTP server with RHEL iso mounted +* For a minimum installation, at least: + * 6 Integrated Facilities for Linux (IFLs) with SMT2 enabled + * 85 GB of RAM + * A storage group created with 1 TB of disk space + * 8 IPv4 addresses * If you are using MacOS for your workstation running Ansible, you also need to have: * [Homebrew](https://brew.sh/) package manager installed: ~~~ @@ -66,146 +68,16 @@ sudo yum install ansible ~~~ -## Installation Instructions - -### Setup -* **Step 1: Get This Repository** - * In your terminal, navigate to a folder where you would like to store this project, copy/paste the following and hit Enter: - ~~~ - git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git - ~~~ -* **Step 2: Get Red Hat Info** - * In a web browser, navigate to Red Hat's [customer portal](https://access.redhat.com/products/red-hat-enterprise-linux/), click on the 'Download Latest' button, use the drop-down to select Red Hat Enterprise Linux for IBM z Systems, select your desired version, make sure 'Architcture' is 's390x', and then scroll down to 'Red Hat Enterprise Linux X.X Update KVM Guest Image' and click on 'Download Now'. - * See where it downloads, copy the path and paste it into [env.yaml](env.yaml) as the variable `env.redhat.path_to_qcow2`. - * In a web browser, navigate to the Red Hat [console](https://console.redhat.com/openshift/install/ibmz/user-provisioned) and copy the OpenShift pull secret and paste it into [env.yaml](env.yaml) as the variable `env.redhat.pull_secret`. -* **Step 3: Set Variables** - * In a text editor of your choice, open [env.yaml](env.yaml) - * Fill out variables marked with `X` to match your specific installation. - * There are two sections of this file, separated by a comment block, which distinguishes variables that need to be filled in and variables that are pre-filled with defaults but can be altered if desired. - * This is the most important step in the process. Take the time to make sure everything here is correct. -* **Step 4: Setup Script** - * Navigate to the folder where you cloned the Git Repository in your terminal. - * Run this shell command: - ~~~ - ansible-playbook setup.yaml - ~~~ - * If you'd like to make any last changes to the [variables file](env.yaml), the [inventory](inventory) or the Ansible [configuration file](ansible.cfg), do so now. - -### Provisioning -* **Step 5: Running the Main Playbook** - * Navigate to the folder where you cloned the Git repository in your terminal. - * Start the main playbook by running this shell command: - ~~~ - ansible-playbook main.yaml - ~~~ - * Watch Ansible as it completes the installation, correcting errors if they arise. - * To look at what is running in detail, open roles/'task-you-want-to-inspect'/tasks/main.yaml - * If the process fails in error: - * Go through the steps in the [troubleshooting](#Troubleshooting) section. - * Use [tags](#Tags) to selectively start from a certain point. See the [main playbook](main.yaml) to determine what part you would like to run and use those tags when running the [main playbook](main.yaml), for example: - ~~~ - ansible-playbook main.yaml --tags 'get_ocp,create_nodes' - ~~~ - -### Post-Install Complete -* **Step 6: First-Time Login** - * The last step of the main playbook will print a URL, username and temporary password for first-time login. - * Use a web-browser to type in the URL, which should take you to a sign-in page. Use the provided credentials to sign in. - * Congratulations! Your OpenShift cluster installation is now complete. - -## Troubleshooting -If you encounter errors while running the main playbook, there are a few things you can do: -1) Double check your variables in [env.yaml](env.yaml). -2) Inspect the part that failed by opening `roles/role_name_to_inspect/tasks/main.yaml` -3) Google the specific error message. -3) Re-run the role indivually with [tags](#Tags) and the verbosity '-v' option to get more debugging information (more v's give more info). For example: - ~~~ - ansible-playbook main.yaml --tags get_ocp -vvv - ~~~ -4) Teardown troublesome KVM guests with [teardown](#Teardown) scripts and start again with [tags](#Tags). To start from the beginning, run: - ~~~ - ansible-playbook teardown.yaml --tags full - ~~~ -6) E-mail Jacob Emery at jacob.emery@ibm.com -7) If it's a problem with an OpenShift verification step, first re-reun the role with [tags](#Tags). If that doesn't work, SSH into the bastion as root ("ssh root@bastion-ip-address-here") and then run,"export KUBECONFIG=~/ocpinst/auth/kubeconfig" and then "oc whoami" and make sure it ouputs "system:admin". Then run the shell command from the role you would like to check on manually: i.e. 'oc get nodes', 'oc get co', etc. - -## Teardown: -* If you would like to teardown your VMs, first determine whether you would like to do a `full`, `partial`, or `app` teardown, specified below. -* `full`: - * To teardown all the OpenShift KVM guest virtual machines (will not teardown KVM host or extra RHEL app VMs) run: - ~~~ - ansible-playbook teardown.yaml --tags full - ~~~ - * Start back again from the beginning by running - ~~~ - ansible-playbook main.yaml - ~~~ -* `partial`: - * To teardown all OpenShift KVM guest virtual machines except the bastion (will also not teardown KVM host or extra RHEL app VMs) run: - ~~~ - ansible-playbook teardown.yaml --tags partial - ~~~ - * To start the main.yaml playbook back from that point, run: - ~~~ - ansible-playbook main.yaml --tags 'get_ocp,create_nodes,verification' - ~~~ -* `app`: - * To teardown only the extra RHEL VMs for non-cluster applications, run: - ~~~ - ansible-playbook teardown.yaml --tags partial - ~~~ - * To re-create those VMs, run: - ~~~ - ansible-playbook main.yaml --tags app - ~~~ - ## Tags * To be more selective with what parts of playbooks run, use tags. +* To determine what part of a playbook you would like to run, open the playbook you'd like to run and find the roles parameter. Each [role](roles) has a corresponding tag. * This is especially helpful for troubleshooting. -* To determine what part of a playbook you would like to run, check the list below. Each [role](roles) has a corresponding tag. There are also some tags like "bastion" that cover multiple roles. To see these tags, see the [main playbook](main.yaml). Here's how to use the tags: - * with one tag: - ~~~ - ansible-playbook main.yaml --tags get_ocp - ~~~ - * with multiple tags (enclose tags with single or double quotes, separate with commas): - ~~~ - ansible-playbook main.yaml --tags 'bastion,get_ocp' - ~~~ -* List of Tags: - * `approve_certs`: Tasks for approve_certs role - * `app_setup`: Tasks related to setting up the extra RHEL VMs - * `attach_subscription`: Auto-attach Red Hat subscription role - * `bastion`: All bastion tasks - * `bastion_setup`: Configuration of the bastion node, not including verification steps. - * `check_nodes`: Tasks for check_nodes role - * `check_dns`: Check DNS resolution - * `check_ssh`: Check SSH role - * `compute`: Creation of the compute nodes - * `control`: Creation of the control nodes - * `create_bastion`: Creation of bastion KVM guest - * `create_bootstrap`: Creation of boostrap KVM guest - * `create_nodes`: Second set of KVM host's plays - * `dns`: Configuration of DNS server on bastion - * `full`: Use with teardown.yaml to bring down all KVM guests - * `get_ocp`: Prepare bastion for installing OpenShift - * `haproxy`: Configuration of load balancer on bastion - * `httpd`: Configuration of Apache server on bastion - * `install_packages`: Install and update packages - * `kvm_host`: All KVM host tasks - * `kvm_prep`: First set of KVM host's tasks - * `workstation`: Tasks that apply to the local machine running Ansible - * `prep_kvm_guest`: Get Red Hat CoreOS kernel and initramfs on host - * `partial`: Use with teardown.yaml to bring down all VMs except bastion - * `set_selinux_permissive`: Tasks related to SELinux settings - * `set_firewall`: Configuration of firewall - * `setup`: First set of setup tasks on the workstation - * `ssh`: All SSH tasks - * `ssh_agent`: Setting up SSH agent - * `ssh_copy_id`: Copying SSH key to target - * `ssh_key_gen`: Ansible SSH keypair creation - * `ssh_ocp_key_gen`: Generate SSH key pair for OpenShift on bastion - * `verification`: All OpenShift cluster verification tasks - * `wait_for_bootstrap`: Tasks for to wait_for_bootstrap role - * `wait_for_cluster_operators`: Tasks for wait_for_cluster_operators - * `wait_for_install_complete`: Tasks for wait_for_install_complete role +## Vault +* The setup.yaml playbook encrypts passwords entered into the [master variables file](inventories/default/group_vars/all.yaml) for security. +* The sensitive data is transferred to the [vault](vault.yaml) and the variables are redacted from the original variables file. +* To encrypt/decrypt the vault to view its contents, run either of the following commnds: + ~~~ + ansible-playbook playbooks/vault.yaml --tags decrypt --ask-vault-pass + ansible-playbook playbooks/vault.yaml --tags encrypt --ask-vault-pass + ~~~ \ No newline at end of file diff --git a/installation_instructions.md b/installation_instructions.md new file mode 100644 index 00000000..35116750 --- /dev/null +++ b/installation_instructions.md @@ -0,0 +1,89 @@ + +## Installation Instructions + +### Setup +* **Step 1: Get This Repository** + * In your terminal, navigate to a folder where you would like to store this project, copy/paste the following and hit Enter: + ~~~ + git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git + ~~~ +* **Step 2: Get Red Hat Info** + * In a web browser, navigate to the Red Hat [console](https://console.redhat.com/openshift/install/ibmz/user-provisioned) and copy the OpenShift pull secret and save it for the next step. +* **Step 3: Set Variables** + * In a text editor of your choice, open [inventories/group_vars/all.yaml](env.yaml). + * Fill out variables marked with `X` to match your specific installation. + * This is the most important step in the process. Take the time to make sure everything here is correct. +* **Step 4: Setup Script** + * Navigate to the folder where you cloned the Git Repository in your terminal. + * Run this shell command: + ~~~ + ansible-playbook playbooks/setup.yaml --ask-vault-pass + ~~~ + +### Provisioning +* **Step 5: Running the Main Playbook** + * Navigate to the folder where you cloned the Git repository in your terminal. + * Start the master playbook by running this shell command: + ~~~ + ansible-playbook playbooks/site.yaml --ask-vault-pass + ~~~ + * Alternatively, move more carefully by running one playbook at a time. Here's the list of playbooks to be run in order, also found in [playbooks/site.yaml](playbooks/site.yaml): + * create_kvm_host.yaml + * setup_kvm_host.yaml + * create_bastion.yaml + * setup_bastion.yaml + * create_nodes.yaml + * ocp_verification.yaml + * Watch Ansible as it completes the installation, correcting errors if they arise. + * To look at what is running in detail, open the playbook and/or roles/role-name/tasks/main.yaml + * If the process fails in error: + * Go through the steps in the [troubleshooting](#Troubleshooting) section. + * Use [tags](#Tags) to selectively start from a certain point in the playbook. Each role has a corresponding tag for convenience. For example, to run the httpd and get_ocp roles of the setup_bastion playbook: + ~~~ + ansible-playbook playbooks/setup_bastion.yaml --tags 'httpd,get_ocp' --ask-vault-pass + ~~~ + +### Post-Install Complete +* **Step 6: First-Time Login** + * The last step of the main playbook will print a URL, username and temporary password for first-time login. + * Use a web-browser to type in the URL, which should take you to a sign-in page. Use the provided credentials to sign in. You will have to bypass a warning screen. + * Congratulations! Your OpenShift cluster installation is now complete. + +## Troubleshooting +If you encounter errors while running the main playbook, there are a few things you can do: +1) Double check your variables. +2) Inspect the part that failed by opening the playbook and/or roles at `roles/role_to_inspect/tasks/main.yaml` +3) Google the specific error message. +3) Re-run the role indivually with [tags](#Tags) or with the verbosity '-v' option to get more debugging information (more v's give more info). For example: + ~~~ + ansible-playbook main.yaml --tags get_ocp -vvv --ask-vault-pass + ~~~ +4) Teardown the KVM host with the delete_partition.yaml playbook or teardown troublesome KVM guests with the [teardown](#Teardown) playbooks. +6) E-mail Jacob Emery at jacob.emery@ibm.com +7) If it's a problem with an OpenShift verification step: + * Open the cockpit to monitor the VMs. + * In a web browser, go to https://kvm-host-IP-here:9090 + * Sign-in with your credentials set in the variables file + * Enable administrative access in the top right. + * Open the 'Virtual Machines' tab from the left side toolbar. + * Sometimes it just takes a while, especially if it's lacking resources. Give it some time and then re-reun the playbook/role with [tags](#Tags). + * If that doesn't work, SSH into the bastion as root ("ssh root@bastion-ip-address-here") and then run, "export KUBECONFIG=~/ocpinst/auth/kubeconfig" and then "oc whoami" and make sure it ouputs "system:admin". Then run the shell command from the role you would like to check on manually: i.e. 'oc get nodes', 'oc get co', etc. + * Open the .openshift_install.log file for information on what happened and try to debug the issue. + +## Teardown: +* If you would like to teardown your VMs, first determine whether you would like to do a `full`, `partial`, or `app` teardown, specified below. + * `full`: + * To teardown all the OpenShift KVM guest virtual machines (will not teardown KVM host) run: + ~~~ + ansible-playbook playbooks/teardown.yaml --tags full --ask-vault-pass + ~~~ + * `partial`: + * To teardown all OpenShift KVM guest virtual machines except the bastion (will also not teardown KVM host or extra RHEL app VMs) run: + ~~~ + ansible-playbook teardown.yaml --tags partial + ~~~ + * To start the main.yaml playbook back from that point, run: + ~~~ + ansible-playbook main.yaml --tags 'get_ocp,create_nodes,verification' + ~~~ +* To teardown the KVM host, use the [delete_partition playbook](playbooks/delete_partition.yaml) From 922eaeb996a6aa74cd50e0584c0b3475e1aa68e6 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 01:42:12 -0700 Subject: [PATCH 608/885] Updated structure to align with best practices Signed-off-by: Jacob Emery --- env.yaml | 162 -------------------- inventories/default/group_vars/all.yaml | 193 ++++++++++++++++++++++++ inventories/default/hosts | 17 +++ inventory | 7 - main.yaml | 107 ------------- playbooks/create_bastion.yaml | 7 + playbooks/create_kvm_host.yaml | 29 ++++ playbooks/create_nodes.yaml | 13 ++ playbooks/delete_partition.yaml | 23 +++ playbooks/ocp_verification.yaml | 16 ++ playbooks/setup.yaml | 45 ++++++ playbooks/setup_bastion.yaml | 31 ++++ playbooks/setup_kvm_host.yaml | 40 +++++ playbooks/site.yaml | 9 ++ playbooks/teardown.yaml | 85 +++++++++++ playbooks/test.yaml | 39 +++++ playbooks/vault.yaml | 18 +++ setup.yaml | 10 -- teardown.yaml | 158 ------------------- 19 files changed, 565 insertions(+), 444 deletions(-) delete mode 100644 env.yaml create mode 100644 inventories/default/group_vars/all.yaml create mode 100755 inventories/default/hosts delete mode 100755 inventory delete mode 100644 main.yaml create mode 100644 playbooks/create_bastion.yaml create mode 100644 playbooks/create_kvm_host.yaml create mode 100644 playbooks/create_nodes.yaml create mode 100644 playbooks/delete_partition.yaml create mode 100644 playbooks/ocp_verification.yaml create mode 100644 playbooks/setup.yaml create mode 100644 playbooks/setup_bastion.yaml create mode 100644 playbooks/setup_kvm_host.yaml create mode 100644 playbooks/site.yaml create mode 100644 playbooks/teardown.yaml create mode 100644 playbooks/test.yaml create mode 100644 playbooks/vault.yaml delete mode 100644 setup.yaml delete mode 100644 teardown.yaml diff --git a/env.yaml b/env.yaml deleted file mode 100644 index 90991fb8..00000000 --- a/env.yaml +++ /dev/null @@ -1,162 +0,0 @@ -# This is the home for all your variables. The single source of truth for your specific installation. -# Variables with a X need to be filled in. There's a point below which marks that nothing after it needs to be modified for a default installation. -# This is the most important step in the process, please take your time to make sure these are set correctly. -# A note on YAML syntax: only the lowest level variables in each hierarchy need to be filled out. -# For example, below, don't put anything after "env" or "redhat", but do delete the '#X' and fill in "username" - -env: - redhat: - username: X #Providing your Red Hat login credentials here will auto attach your RHEL subscription. - password: X #If you do not provide it, you will have to do so manually before packages can be installed. - path_to_qcow2: X #Absolute path to RHEL qcow2 file on workstation running Ansible, e.g. /Users/username/Downloads/rhel-8.5-s390x-kvm.qcow2 (If unclear, see README step 2) - pull_secret: 'X' #paste OpenShift pull secret into these single quotes. If unclear, see README step 2. - -# IP addresses for the nodes that Ansible will be run against. -# Feel free to add as many nodes as needed. -# This will automatically fill out the inventory file when setup.yaml is run. - ip: - kvm: X - bastion: X - bootstrap: X - control: - - X - - X - - X - compute: - - X - - X - #Un-comment and fill out list to create infrastructure nodes - #infra: - #- X - #Un-comment and fill out list to create extra RHEL KVM guests for non-cluster applications - #app: - #- X - -# Make sure the total number of each type of node matches up with number of IPs above. - hostname: - kvm: X - bastion: X - bootstrap: X - control: - - X - - X - - X - compute: - - X - - X - #Un-comment and fill out list to create infrastructure nodes - #infra: - #- X - #- X - #Un-comment and fill out list to create extra RHEL KVM guests for non-cluster applications - #app: - #- X - #- X - - networking: - metadata_name: X #e.g. ocpz - base_domain: X #e.g. pbm.ihost.com (Will be combined with metadata_name above to create fully qualified domain names) - interface_name: X #KVM host network interface name: e.g. enc1 or vlan21@enc1 - gateway: X - netmask: X - dns: - setup_on_bastion: true #Set to false if you do not want to setup a DNS server on the bastion because you already have a DNS server elsewhere. - nameserver: X #If above variable is true, then this variable should be the same as env.ip.bastion above. - forwarder: X #For cluster to reach external internet. Can use 8.8.8.8 as a default. - -#To create user on bastion, create and copy ssh keys - access: - login: - kvm: - user: X - pass: X - sudo_pass: X - bastion: - user: X - pass: X - sudo_pass: X - app: - user: X - pass: X - sudo_pass: X - ssh: - ansible_key_comment: "Ansible key" - ocp_key_comment: "OpenShift key" - - -####################################################################################### -# All variables below this point do not need to be changed for a default installation # -####################################################################################### - - -#Packages to be installed on the KVM host, bastion, and extra RHEL VMs for non-cluster applications (only used if app IP and hostnames are defined above). -#Feel free to add more as needed. - pkgs: - kvm: ['@server-product-environment','@hardware-monitoring','@network-file-system-client','@remote-system-management', - '@headless-management','@system-tools','libvirt-devel','libvirt-daemon-kvm','qemu-kvm','virt-manager','genisoimage', - 'libvirt-daemon-config-network','libvirt-client','qemu-img','virt-install','virt-viewer','libvirt-daemon-kvm','libvirt'] - bastion: ['haproxy','httpd','bind','bind-utils','expect','firewalld','mod_ssl'] - app: ['@server-product-environment','@system-tools','@remote-system-management'] - -#Pre-filled values are minimum resource requirements for nodes. - node_resources: - bastion: - disk_size: 30 #in GB - ram: 4096 #in MB - vcpu: 4 #number of virtual CPUs - os_variant: 8.5 #Red Hat Enterprise Linux version - bootstrap: - disk_size: 120 - ram: 16384 - vcpu: 4 - os_variant: 8.5 - control: - disk_size: 120 - ram: 16384 - vcpu: 4 #8 recommended - os_variant: 8.5 - compute: - disk_size: 120 - ram: 8192 - vcpu: 2 #6 recommended - os_variant: 8.5 - infra: #will only be used if you defined infrastructure node IPs and hostnames - disk_size: 120 - ram: 16384 - vcpu: 8 - os_variant: 8.5 - app: #will only be used if you defined extra RHEL VM app IPs and hostnames - disk_size: 80 - vcpu: 8 - ram: 8192 - os_variant: 8.5 - -# If you would like to download the latest stable version of OpenShift, leave as is. -# Otherwise, replace these links with preferred versions. - openshift: - client: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz - installer: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz - -# If you would like to use a version of CoreOS that has been tested with these playbooks, leave as is. -# Otherwise, replace these links with preferred versions. - coreos: - kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-kernel-s390x - initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-initramfs.s390x.img - rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-rootfs.s390x.img - -# If you would like to keep the defaults for the OpenShift install-config file, leave as is. - install_config: - api_version: v1 - compute: - architecture: s390x - hyperthreading: Enabled - control: - architecture: s390x - hyperthreading: Enabled - cluster_network: - cidr: 10.128.0.0/14 - host_prefix: 23 - type: OpenShiftSDN - service_network: 172.30.0.0/16 - fips: "false" #include quotes Note: FIPS is not yet supported for OpenShift on Z. - #OpenShift SSH key is generated via Ansible in ssh_ocp_key_gen role \ No newline at end of file diff --git a/inventories/default/group_vars/all.yaml b/inventories/default/group_vars/all.yaml new file mode 100644 index 00000000..124ca41c --- /dev/null +++ b/inventories/default/group_vars/all.yaml @@ -0,0 +1,193 @@ +# Variables with a X need to be filled in. Some are filled in with defaults. +# A note on YAML syntax: only the lowest level variables in each hierarchicy can be filled out. +# For example below, don't put anything after "env" or "z", but do delete the #X and fill in "cpc_name" + +env: + z: + cpc_name: #X + hmc: + host: #X + auth: + user: #X + pass: #X + lpar: + name: #X + description: #X + access: #will be used in kickstart file to create an admin user and set the root password + user: #X + pass: #X + root_pass: #X + ifl: + count: #X #e.g. 6 + initial_memory: #X #in MB, e.g. 66000 + max_memory: #X #in MB, e.g. 96000 + initial_weight: #X #e.g. 200 + min_weight: #X #e.g. 50 + max_weight: #X #e.g. 500 + networking: + hostname: #X + ip: #X + subnetmask: #X #e.g. 255.255.254 + subnet: #X #e.g. 23 + gateway: #X + nameserver: #X + device1: #X + #device2: #uncomment if you have two network devices + nic: + card1: + name: #X + adapter: #X + port: #X + dev_num: #X #hex, e.g. 0x0600 + #card2: #uncomment this section if you have two NICs + #name: + #adapter: + #port: + #dev_num: + storage_group: + name: #X + type: fcp #only 'fcp' supported right now + pool_path: #X #e.g. /var/lib/libvirt/images + storage_wwpn: + - #X #e.g. 500708680235c3f0 + - #X + dev_num: + - #'X' #in single quotes, e.g. '0001' + - #'X' + vg: + name: #X #e.g. kvm-vg + lv: + name: #X #e.g. kvm-lv + fs: + name: #X #e.g. kvm-fs + type: xfs + lun_name: + - #X #used for boot, e.g. mpatha + - #X #e.g. mpathb + - #X + + ftp: + ip: #X + user: #X + pass: #X + iso_mount_dir: #X + cfgs_dir: #X + + redhat: + username: #X + password: #X + pull_secret: #X #OpenShift pull secret + + bastion: + create: true + name: #X + resources: + disk_size: 30 + ram: 4096 + swap: 4096 + vcpu: 4 + os_variant: 8.5 + networking: + ip: #X + hostname: #X + subnetmask: #X e.g. 255.255.254.0 + gateway: #X + nameserver: #X #if DNS on bastion, should be bastion's IP + interface: #X #e.g. enc1 + base_domain: #X #e.g. pbm.ihost.com + access: + user: #X + pass: #X + root_pass: #X + ocp_ssh_key_comment: #X + options: # True or false, would like to have the bastion host these services? + loadbalancer: True + dns: True + + cluster: + networking: + metadata_name: #X #e.g. ocpkvm + base_domain: #X #e.g. pbm.ihost.com + nameserver: #X + forwarder: #X #can use 1.1.1.1 + nodes: + bootstrap: + disk_size: 120 + ram: 16384 + vcpu: 4 + os_variant: 8.5 + ip: #X + hostname: #X + control: + disk_size: 120 + ram: 16384 + vcpu: 4 + os_variant: 8.5 + ip: + - #X + - #X + - #X + hostname: + - #X + - #X + - #X + compute: + disk_size: 120 + ram: 8192 + vcpu: 2 + os_variant: 8.5 + ip: + - #X + - #X + hostname: + - #X + - #X + infra: + disk_size: 120 + ram: 16384 + vcpu: 2 + os_variant: 8.5 + ip: + - #X + - #X + hostname: + - #X + - #X + +####################################################################################### +# All variables below this point do not need to be changed for a default installation # +####################################################################################### + + language: en_US.UTF-8 + timezone: America/New_York + root_access: false + + pkgs: + galaxy: [ibm.ibm_zhmc, community.general, community.crypto, ansible.posix, community.libvirt] + workstation: [openssh, expect] + kvm: ['@virt', cockpit-machines, libvirt-devel, virt-top, qemu-kvm, 'python3-lxml,cockpit',] + bastion: [haproxy, httpd, bind, bind-utils, expect, firewalld, mod_ssl, python3-policycoreutils] + + openshift: + client: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-client-linux.tar.gz + installer: https://mirror.openshift.com/pub/openshift-v4/s390x/clients/ocp/stable/openshift-install-linux.tar.gz + + coreos: + kernel: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-kernel-s390x + initramfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-initramfs.s390x.img + rootfs: https://mirror.openshift.com/pub/openshift-v4/s390x/dependencies/rhcos/4.9/latest/rhcos-4.9.0-s390x-live-rootfs.s390x.img + + install_config: + api_version: v1 + compute: + architecture: s390x + hyperthreading: Enabled + control: + architecture: s390x + hyperthreading: Enabled + cluster_network: + cidr: 10.128.0.0/14 + host_prefix: 23 + type: OpenShiftSDN + service_network: 172.30.0.0/16 + fips: 'false' diff --git a/inventories/default/hosts b/inventories/default/hosts new file mode 100755 index 00000000..2bdd3cf4 --- /dev/null +++ b/inventories/default/hosts @@ -0,0 +1,17 @@ +# will populate from ansible_setup playbook + +[workstation] +127.0.0.1 ansible_connection=local + +[workstation:vars] +ansible_python_interpreter=/usr/local/bin/python3 +#start of ansible managed block from set_inventory role +[ftp] +9.60.86.17 ansible_user=zemery + +[kvm_host] +bnsf1test ansible_host=9.60.87.132 ansible_user=admin ansible_become_password=ibmzrocks + +[bastion] +bastion ansible_host=9.60.87.139 ansible_user=admin ansible_become_password=ibmzrocks +#end of ansible managed block from set_inventory role diff --git a/inventory b/inventory deleted file mode 100755 index beb05627..00000000 --- a/inventory +++ /dev/null @@ -1,7 +0,0 @@ -# will populate from ansible_setup playbook - -[workstation] -127.0.0.1 ansible_connection=local - -[workstation:vars] -ansible_python_interpreter=/usr/bin/python3 diff --git a/main.yaml b/main.yaml deleted file mode 100644 index 45e2140e..00000000 --- a/main.yaml +++ /dev/null @@ -1,107 +0,0 @@ ---- - -- hosts: workstation - tags: workstation,kvm_host - connection: local - become: false - gather_facts: no - vars_files: - - env.yaml - vars: - - ssh_target: ["{{ env.ip.kvm }}","{{ env.access.login.kvm.user }}","{{ env.access.login.kvm.pass }}"] - roles: - - ssh_key_gen - - ssh_copy_id - - ssh_agent - -- hosts: kvm_host - tags: kvm_host,kvm_prep - become: true - vars_files: - - env.yaml - vars: - - packages: "{{ env.pkgs.kvm}}" - roles: - - check_ssh - - {role: attach_subscription, when: env.redhat.username is defined and env.redhat.password is defined} - - install_packages - - macvtap - - create_bastion - -- hosts: workstation - tags: workstation,bastion - connection: local - become: false - gather_facts: no - vars_files: - - env.yaml - vars: - - ssh_target: ["{{ env.ip.bastion }}","{{ env.access.login.bastion.user }}","{{ env.access.login.bastion.pass }}"] - roles: - - ssh_copy_id - -- hosts: bastion - tags: bastion_setup,bastion - become: true - vars_files: - - env.yaml - vars: - - packages: "{{env.pkgs.bastion}}" - roles: - - check_ssh - - {role: attach_subscription, when: env.redhat.username is defined and env.redhat.password is defined} - - install_packages - - ssh_ocp_key_gen - - set_firewall - - {role: dns, when: env.networking.dns.setup_on_bastion } - - check_dns - - haproxy - - httpd - - {role: get_ocp, become: false } - -- hosts: kvm_host - tags: kvm_host,create_nodes - become: true - gather_facts: no - vars_files: - - env.yaml - roles: - - prep_kvm_guests - - create_bootstrap - - create_control_nodes - - create_compute_nodes - -- hosts: kvm_host - tags: app - become: true - gather_facts: no - vars_files: - - env.yaml - roles: - - {role: create_extra_rhel, when: env.ip.app is defined } - -- hosts: app - tags: app_setup,app - become: true - vars_files: - - env.yaml - vars: - - packages: "{{ env.pkgs.app}}" - roles: - - {role: attach_subscription, when: env.ip.app is defined and env.redhat.username is defined and env.redhat.password is defined} - - {role: install_packages , when: env.ip.app is defined } - -- hosts: bastion - tags: verification,bastion - become: false - environment: - KUBECONFIG: "/home/{{ env.access.login.bastion.user }}/ocpinst/auth/kubeconfig" - gather_facts: yes - vars_files: - - env.yaml - roles: - - wait_for_bootstrap - - approve_certs - - check_nodes - - wait_for_cluster_operators - - wait_for_install_complete diff --git a/playbooks/create_bastion.yaml b/playbooks/create_bastion.yaml new file mode 100644 index 00000000..b45d1ad2 --- /dev/null +++ b/playbooks/create_bastion.yaml @@ -0,0 +1,7 @@ +#Prepare KVM host and create bastion node +- hosts: kvm_host + become: true + vars_files: + - ../vault.yaml + roles: + - { role: create_bastion, when: env.bastion.create == True } \ No newline at end of file diff --git a/playbooks/create_kvm_host.yaml b/playbooks/create_kvm_host.yaml new file mode 100644 index 00000000..50d0a1cf --- /dev/null +++ b/playbooks/create_kvm_host.yaml @@ -0,0 +1,29 @@ +--- + +#Create logical partition +- hosts: workstation + tags: create_lpar + connection: local + vars_files: + - ../vault.yaml + roles: + - create_lpar + +#Template and update RHEL configuration files for KVM host to boot from +- hosts: ftp + tags: update + become: false + gather_facts: false + vars_files: + - ../vault.yaml + roles: + - update_cfgs + +#Boot KVM host on LPAR +- hosts: workstation + tags: create + connection: local + vars_files: + - ../vault.yaml + roles: + - create_kvm_host \ No newline at end of file diff --git a/playbooks/create_nodes.yaml b/playbooks/create_nodes.yaml new file mode 100644 index 00000000..520c7fc3 --- /dev/null +++ b/playbooks/create_nodes.yaml @@ -0,0 +1,13 @@ +--- + +#Create KVM guests +- hosts: kvm_host + become: true + gather_facts: false + vars_files: + - ../vault.yaml + roles: + - prep_kvm_guests + - create_bootstrap + - create_control_nodes + - create_compute_nodes \ No newline at end of file diff --git a/playbooks/delete_partition.yaml b/playbooks/delete_partition.yaml new file mode 100644 index 00000000..64f64150 --- /dev/null +++ b/playbooks/delete_partition.yaml @@ -0,0 +1,23 @@ +--- +# Sample module-level playbook of the IBM Z HMC Collection + +- hosts: workstation + gather_facts: false + vars_files: + - ../vault.yaml + tasks: + - name: Ensure partition does not exist + ibm.ibm_zhmc.zhmc_partition: + hmc_host: "{{ vault.z.hmc.host }}" + hmc_auth: + userid: "{{ env.z.hmc.auth.user }}" + password: "{{ vault.z.hmc.auth.pass }}" + verify: false + cpc_name: "{{ vault.z.cpc_name }}" + name: "{{ env.z.lpar.name }}" + state: absent + register: lpar_delete + + - name: Print result + debug: + var: lpar_delete diff --git a/playbooks/ocp_verification.yaml b/playbooks/ocp_verification.yaml new file mode 100644 index 00000000..efe49218 --- /dev/null +++ b/playbooks/ocp_verification.yaml @@ -0,0 +1,16 @@ +--- + +#Complete OpenShift verification +- hosts: bastion + become: false + environment: + KUBECONFIG: "/home/{{ env.bastion.access.user }}/ocpinst/auth/kubeconfig" + gather_facts: true + vars_files: + - ../vault.yaml + roles: + - wait_for_bootstrap + - approve_certs + - check_nodes + - wait_for_cluster_operators + - wait_for_install_complete \ No newline at end of file diff --git a/playbooks/setup.yaml b/playbooks/setup.yaml new file mode 100644 index 00000000..56a4906d --- /dev/null +++ b/playbooks/setup.yaml @@ -0,0 +1,45 @@ +--- + +- hosts: workstation + tags: workstation + connection: local + become: false + gather_facts: true + pre_tasks: + + - name: Install required Ansible Galaxy collections. + command: ansible-galaxy collection install {{ item }} + loop: "{{ env.pkgs.galaxy }}" + + - name: Install YAML edit custom module + command: ansible-galaxy install kwoodson.yedit + + - name: Get ibm_zhmc collection install location + shell: ansible-galaxy collection list ibm.ibm_zhmc | grep -i ansible | cut -c 3- + register: zhmc_path + + - name: Install zhmcclient requirements + pip: + requirements: "{{zhmc_path.stdout}}/ibm/ibm_zhmc/requirements.txt" + executable: pip3 + extra_args: --upgrade + + roles: + - kwoodson.yedit + - encrypt_vars + +- hosts: workstation + connection: local + become: false + gather_facts: false + vars_files: + ../vault.yaml + vars: + - packages: "{{ env.pkgs.workstation }}" + - ssh_target: ["{{ vault.ftp.ip }}","{{ env.ftp.user }}","{{ vault.ftp.pass }}"] + roles: + - install_packages + - set_inventory + - ssh_key_gen + - ssh_agent + - ssh_copy_id #to FTP server \ No newline at end of file diff --git a/playbooks/setup_bastion.yaml b/playbooks/setup_bastion.yaml new file mode 100644 index 00000000..b01807e6 --- /dev/null +++ b/playbooks/setup_bastion.yaml @@ -0,0 +1,31 @@ +--- + +#Copy SSH key to access bastion +- hosts: workstation + connection: local + become: false + gather_facts: false + vars_files: + - ../vault.yaml + vars: + ssh_target: ["{{ vault.bastion.networking.ip }}","{{ env.bastion.access.user }}","{{ vault.bastion.access.pass }}"] + roles: + - ssh_copy_id + +#Configure bastion node with essential services +- hosts: bastion + become: true + vars: + packages: "{{ env.pkgs.bastion }}" + vars_files: + - ../vault.yaml + roles: + - {role: attach_subscription, when: env.redhat.username is defined and vault.redhat.password is defined} + - install_packages + - ssh_ocp_key_gen + - set_firewall + - {role: dns, when: env.bastion.options.dns } + - check_dns + - { role: haproxy, when: env.bastion.options.loadbalancer } + - httpd + - {role: get_ocp, become: false } \ No newline at end of file diff --git a/playbooks/setup_kvm_host.yaml b/playbooks/setup_kvm_host.yaml new file mode 100644 index 00000000..35dfa668 --- /dev/null +++ b/playbooks/setup_kvm_host.yaml @@ -0,0 +1,40 @@ +--- + +#Copy SSH key to access KVM host +- hosts: workstation + connection: local + become: false + gather_facts: true + vars: + ssh_target: ["{{ vault.z.lpar.networking.ip }}","{{ env.z.lpar.access.user }}","{{ vault.z.lpar.access.pass }}"] + vars_files: + - ../vault.yaml + roles: + - ssh_copy_id + +#Prepare KVM host and create bastion node +- hosts: kvm_host + become: true + vars: + packages: "{{ env.pkgs.kvm}}" + vars_files: + - ../vault.yaml + roles: + - {role: attach_subscription, when: env.redhat.username is defined and vault.redhat.password is defined} + - install_packages + post_tasks: + - name: Enable cockpit console + command: systemctl enable --now cockpit.socket + - name: Start and enable libvirt + service: + name: libvirtd + enabled: yes + state: started + +- hosts: kvm_host + become: true + vars_files: + - ../vault.yaml + roles: + - configure_storage + - macvtap \ No newline at end of file diff --git a/playbooks/site.yaml b/playbooks/site.yaml new file mode 100644 index 00000000..26237cc7 --- /dev/null +++ b/playbooks/site.yaml @@ -0,0 +1,9 @@ +# Master playbook. If you want to do everything all in one, use this. +--- + +- import_playbook: create_kvm_host.yaml +- import_playbook: setup_kvm_host.yaml +- import_playbook: create_bastion.yaml +- import_playbook: setup_bastion.yaml +- import_playbook: create_nodes.yaml +- import_playbook: ocp_verification.yaml \ No newline at end of file diff --git a/playbooks/teardown.yaml b/playbooks/teardown.yaml new file mode 100644 index 00000000..3c4b6f53 --- /dev/null +++ b/playbooks/teardown.yaml @@ -0,0 +1,85 @@ +--- + +# Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. Extra RHEL apps will not be torn down. +# After you run this playbook, run the main playbook from the beginning with no tags ("ansible-playbook main.yaml") + +- hosts: kvm_host + tags: full + become: true + gather_facts: false + vars: + bastion_teardown: true + vars_files: + - ../vault.yaml + post_tasks: + - name: Capture files to delete + find: + paths: "{{ env.z.lpar.storage_group.pool_path }}" + file_type: file + excludes: + - "lost+found" + register: found_files + + - name: delete files in storage pool path except for lost+found + file: + path: "{{ item.path }}" + state: absent + with_items: "{{ found_files['files'] }}" + + - name: remove local workstation from KVM hosts' authorized_keys file + file: + path: "~/.ssh/authorized_keys" + state: absent + + roles: + - teardown_vms + +- hosts: workstation + tags: full + connection: local + become: false + gather_facts: false + vars: + files_to_reset: ['~/.ssh/ansible','~/.ssh/ansible.pub'] # feel free to add as needed + vars_files: + - ../vault.yaml + pre_tasks: + - name: remove bastion from workstation's known_hosts file + lineinfile: + path: "~/.ssh/known_hosts" + regexp: "{{ vault.bastion.networking.ip}}" + state: absent + + - name: remove KVM host from workstation's known_hosts file + lineinfile: + path: "~/.ssh/known_hosts" + regexp: "{{ vault.z.lpar.networking.ip}}" + state: absent + + roles: + - reset_files + +# Use the "partial" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. If extra RHEL VM apps were created, they will not be torn down. +# After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'get_ocp,create_nodes,verification'" + +- hosts: bastion + tags: partial + become: true + gather_facts: false + vars: + files_to_reset: ['/home/{{ env.bastion.access.user }}/ocpinst'] # feel free to add as needed + vars_files: + - ../vault.yaml + roles: + - reset_files + +- hosts: kvm_host + tags: partial + become: true + gather_facts: false + vars: + bastion_teardown: no + vars_files: + - ../vault.yaml + roles: + - teardown_vms \ No newline at end of file diff --git a/playbooks/test.yaml b/playbooks/test.yaml new file mode 100644 index 00000000..ea22db76 --- /dev/null +++ b/playbooks/test.yaml @@ -0,0 +1,39 @@ +--- +- hosts: web-servers + become: true + become_method: sudo + tasks: + - name: Ensure the PGP key is installed + apt_key: + id: AC40B2F7 + url: "http://keyserver.ubuntu.com/pks/lookup?op=get&fingerprint=on&search=0x561F9B9CAC40B2F7" + + - name: Ensure https support for apt is installed + package: + name: apt-transport-https + + - name: Ensure the passenger apt repository is added + apt_repository: + repo: 'deb https://oss-binaries.phusionpassenger.com/apt/passenger raring main' + + - name: Ensure nginx is installed + package: + name: nginx-full + + - name: Ensure passenger is installed + apt: + name: passenger + update_cache: yes + + - name: Ensure the nginx configuration file is set + copy: + src: /app/config/nginx.conf + dest: /etc/nginx/nginx.conf + + - name: Ensure nginx is running + service: + name: nginx + state: started + + + diff --git a/playbooks/vault.yaml b/playbooks/vault.yaml new file mode 100644 index 00000000..751f9c26 --- /dev/null +++ b/playbooks/vault.yaml @@ -0,0 +1,18 @@ +--- + +- hosts: workstation + connection: local + become: false + gather_facts: false + vars: + home_dir: "{{ ansible_config_file.split('/')[0:-1] }}" + vars_files: + - ../vault.yaml + tasks: + - name: Decrypt Vault + tags: decrypt + command: ansible-vault decrypt {{ home_dir | join('/') }}/vault.yaml + + - name: Encrypt Vault + tags: encrypt + command: ansible-vault encrypt {{ home_dir | join('/') }}/vault.yaml diff --git a/setup.yaml b/setup.yaml deleted file mode 100644 index 2133f0b4..00000000 --- a/setup.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- hosts: workstation - tags: workstation, prep - connection: local - become: false - gather_facts: yes - roles: - - install_dependencies - - set_inventory \ No newline at end of file diff --git a/teardown.yaml b/teardown.yaml deleted file mode 100644 index c829e4be..00000000 --- a/teardown.yaml +++ /dev/null @@ -1,158 +0,0 @@ ---- - -# Use the "full" tag when running this playbook to reset your installation back to the point where no VMs were running on the KVM host. Extra RHEL apps will not be torn down. -# After you run this playbook, run the main playbook from the beginning with no tags ("ansible-playbook main.yaml --ask-become-pass") - -- hosts: kvm_host - tags: full - become: true - gather_facts: no - vars: - - bastion_teardown: yes - vars_files: - - env.yaml - post_tasks: - - name: Capture files to delete - find: - paths: /var/lib/libvirt/images - file_type: file - excludes: - - "lost+found" - register: found_files - - name: delete files in /var/lib/libvirt/images except for lost+found - file: - path: "{{ item.path }}" - state: absent - with_items: "{{ found_files['files'] }}" - - name: remove local workstation from KVM hosts' authorized_keys file - file: - path: "~/.ssh/authorized_keys" - state: absent - roles: - - teardown_vms - -- hosts: workstation - tags: full - connection: local - become: false - gather_facts: no - vars: - - files_to_reset: ['~/.ssh/ansible','~/.ssh/ansible.pub'] # feel free to add as needed - vars_files: - - env.yaml - pre_tasks: - - name: remove bastion from workstation's known_hosts file - lineinfile: - path: "~/.ssh/known_hosts" - regexp: "{{ env.ip.bastion}}" - state: absent - - name: remove KVM host from workstation's known_hosts file - lineinfile: - path: "~/.ssh/known_hosts" - regexp: "{{ env.ip.kvm}}" - state: absent - roles: - - reset_files - -# Use the "partial" tag when running this playbook to reset your installation back to the point where just the bastion node was running on the KVM host. If extra RHEL VM apps were created, they will not be torn down. -# After you run this playbook, to start back up from that point, run the main.yaml playbook with "--tags 'get_ocp,create_nodes,verification'" - -- hosts: bastion - tags: partial - become: true - gather_facts: no - vars: - - files_to_reset: ['/home/{{ env.access.login.bastion.user }}/ocpinst'] # feel free to add as needed - vars_files: - - env.yaml - roles: - - reset_files - -- hosts: kvm_host - tags: partial - become: true - gather_facts: no - vars: - - bastion_teardown: no - vars_files: - - env.yaml - roles: - - teardown_vms - -# Use the "app" tag with teardown.yaml to teardown all extra RHEL VM apps running on the KVM host. -# To recreate them, run the main playbook with "--tags app" - -- hosts: workstation - tags: app - connection: local - become: false - gather_facts: no - vars_files: - - env.yaml - tasks: - - name: remove apps from workstation's known_hosts file for idempotency if created - lineinfile: - path: "~/.ssh/known_hosts" - regexp: "{{ env.ip.app[i] }}" - state: absent - with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 - loop_control: - extended: yes - index_var: i - when: env.ip.app is defined - -- hosts: kvm_host - tags: app - become: true - gather_facts: no - vars_files: - - env.yaml - tasks: - - name: Destroy running app nodes. Expect ignored errors if some VMs are already destroyed. - community.libvirt.virt: - name: "{{ item }}" - command: destroy - loop: "{{ env.hostname.app }}" - ignore_errors: true - - - name: Undefine remaining app nodes. Expect ignored errors if some VMs are already undefined. - community.libvirt.virt: - name: "{{ item }}" - command: undefine - loop: "{{ env.hostname.app }}" - ignore_errors: true - - - name: Remove apps qcow2 files for idempotency - tags: create_extra_rhel - file: - path: /var/lib/libvirt/images/{{ item }}.qcow2 - state: absent - loop: "{{env.hostname.app}}" - - - name: Remove seeds for idempotency - tags: create_extra_rhel - file: - path: /var/lib/libvirt/images/{{ item }}-seed.img - state: absent - loop: "{{env.hostname.app}}" - - - name: Remove app meta data for idempotency - tags: create_extra_rhel - file: - path: /var/lib/libvirt/images/tmp/{{ item }}/meta-data - state: absent - loop: "{{ env.hostname.app }}" - - - name: Remove app user data for idempotency - tags: create_extra_rhel - file: - path: /var/lib/libvirt/images/tmp/{{ item }}/user-data - state: absent - loop: "{{ env.hostname.app }}" - - - name: Remove app network config data for idempotency - tags: create_extra_rhel - file: - path: /var/lib/libvirt/images/tmp/{{ item }}/network-config - state: absent - loop: "{{ env.hostname.app }}" \ No newline at end of file From 6f912a6a6aa40eab8ee0eb4f80251674b81f06ec Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 01:43:20 -0700 Subject: [PATCH 609/885] Removed creation of extra RHEL VMs Signed-off-by: Jacob Emery --- roles/create_extra_rhel/tasks/main.yaml | 131 ------------------ .../templates/cloud_init.cfg.j2 | 54 -------- .../templates/network_config_static.cfg.j2 | 10 -- 3 files changed, 195 deletions(-) delete mode 100644 roles/create_extra_rhel/tasks/main.yaml delete mode 100644 roles/create_extra_rhel/templates/cloud_init.cfg.j2 delete mode 100644 roles/create_extra_rhel/templates/network_config_static.cfg.j2 diff --git a/roles/create_extra_rhel/tasks/main.yaml b/roles/create_extra_rhel/tasks/main.yaml deleted file mode 100644 index 77a7472c..00000000 --- a/roles/create_extra_rhel/tasks/main.yaml +++ /dev/null @@ -1,131 +0,0 @@ ---- - -- name: Load in variables from env.yaml - tags: create_extra_rhel,app - include_vars: env.yaml - -- name: Check to see if qcow2 file already exists on KVM host - tags: create_extra_rhel,app - stat: - path: /var/lib/libvirt/images/bastion_base.qcow2 - ignore_errors: true - register: qcow2_check - -- name: Copy RHEL qcow2 file to KVM host if it's not there already. This may take a while. - tags: create_extra_rhel,app - copy: - src: "{{ env.redhat.path_to_qcow2 }}" - dest: /var/lib/libvirt/images/bastion_base.qcow2 - mode: '600' - owner: qemu - group: qemu - when: qcow2_check.stat.exists == false - -- name: Create working directory - tags: create_extra_rhel,app - file: - path: /var/lib/libvirt/images/tmp/{{ item }} - state: directory - mode: '0755' - loop: "{{ env.hostname.app }}" - -- name: Create base image - tags: create_extra_rhel,app - command: "qemu-img create -b /var/lib/libvirt/images/bastion_base.qcow2 -f qcow2 /var/lib/libvirt/images/{{item}}.qcow2 {{env.node_resources.app.disk_size}}G" - register: qemu_create - loop: "{{env.hostname.app}}" - -- name: Set apps qcow2 permissions - tags: create_extra_rhel,app - command: chmod 600 /var/lib/libvirt/images/{{env.hostname.app[i]}}.qcow2 - with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 - loop_control: - extended: yes - index_var: i - -- name: Set apps qcow2 ownership to qemu - tags: create_extra_rhel,app - command: chown qemu:qemu /var/lib/libvirt/images/{{env.hostname.app[i]}}.qcow2 - with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 - loop_control: - extended: yes - index_var: i - -- name: Get info about qemu image creation - tags: create_extra_rhel,app - command: "qemu-img info /var/lib/libvirt/images/{{ item }}.qcow2" - register: qemu_info - loop: "{{env.hostname.app}}" - -- name: Create instance-id - tags: create_extra_rhel,app - shell: "echo \"instance-id: $(uuidgen || echo i-abcdefg)\" > /var/lib/libvirt/images/tmp/{{ item }}/meta-data" - register: uuidgen - loop: "{{ env.hostname.app }}" - -- name: Use cloud_init.cfg.j2 template to make user-data file - tags: create_extra_rhel,app - template: - src: roles/create_extra_rhel/templates/cloud_init.cfg.j2 - dest: /var/lib/libvirt/images/tmp/{{ item }}/user-data - loop: "{{ env.hostname.app }}" - -- name: Use network_config_static.cfg.j2 template to make network-config file - tags: create_extra_rhel,app - template: - src: roles/create_extra_rhel/templates/network_config_static.cfg.j2 - dest: /var/lib/libvirt/images/tmp/{{ env.hostname.app[i] }}/network-config - with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 - loop_control: - extended: yes - index_var: i - -- name: Generate iso file - tags: create_extra_rhel,app - command: genisoimage -output /var/lib/libvirt/images/{{ item }}-seed.img -volid cidata -joliet -rock /var/lib/libvirt/images/tmp/{{ item }}/meta-data /var/lib/libvirt/images/tmp/{{ item }}/network-config /var/lib/libvirt/images/tmp/{{ item }}/user-data - register: gen_iso - loop: "{{ env.hostname.app }}" - -- name: Set apps seed images permissions - tags: create_extra_rhel,app - command: chmod 600 /var/lib/libvirt/images/{{ env.hostname.app[i] }}-seed.img - with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 - loop_control: - extended: yes - index_var: i - -- name: Set apps seed images ownership to qemu - tags: create_extra_rhel,app - command: chown qemu:qemu /var/lib/libvirt/images/{{ env.hostname.app[i] }}-seed.img - with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 - loop_control: - extended: yes - index_var: i - -- name: Boot app - tags: create_extra_rhel,app - command: virt-install - --name {{ item }} \ - --virt-type kvm \ - --memory {{ env.node_resources.app.ram }} \ - --vcpus {{ env.node_resources.app.vcpu }} \ - --boot hd \ - --disk path=/var/lib/libvirt/images/{{ item }}-seed.img,device=cdrom \ - --disk path=/var/lib/libvirt/images/{{ item }}.qcow2,device=disk \ - --graphics none \ - --os-type Linux --os-variant rhel{{env.node_resources.app.os_variant}} \ - --network network=macvtap-net \ - --noautoconsole \ - --noreboot - loop: "{{ env.hostname.app }}" - -- name: Restart apps - tags: create_extra_rhel,app - command: virsh start {{ item }} - loop: "{{ env.hostname.app }}" - -- name: Waiting 3 minutes for automated apps installation and configuration to complete. To monitor, use a web browser to go to https://your-kvm-host-ip-address-here:9090 - tags: create_extra_rhel,app - pause: - minutes: 3 - diff --git a/roles/create_extra_rhel/templates/cloud_init.cfg.j2 b/roles/create_extra_rhel/templates/cloud_init.cfg.j2 deleted file mode 100644 index a273ac28..00000000 --- a/roles/create_extra_rhel/templates/cloud_init.cfg.j2 +++ /dev/null @@ -1,54 +0,0 @@ -#cloud-config -hostname: {{item}} -fqdn: {{item}}.{{ env.networking.base_domain }} -manage_etc_hosts: true -users: - - name: {{ env.access.login.app.user }} - sudo: ALL=(ALL) NOPASSWD:ALL - groups: adm,sys - home: /home/{{ env.access.login.app.user }} - shell: /bin/bash - lock_passwd: false -# allow both password auth and cert auth via ssh (console access can still login) -ssh_pwauth: true -disable_root: false -chpasswd: - list: | - root:{{ env.access.login.app.sudo_pass }} - {{ env.access.login.app.user }}:{{ env.access.login.app.pass }} - expire: False - -#growpart: -# mode: auto -# devices: ['/'] -#disk_setup: -# /dev/vdb: -# table_type: gpt -# layout: True -# overwrite: False -#fs_setup: -# - label: DATA_XFS -# filesystem: xfs -# device: '/dev/vdb' -# partition: auto -# #cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s -#mounts: -# # [ /dev/vdx, /mountpoint, fstype ] -# - [ LABEL=DATA_XFS, /dataxfs, xfs ] - -# 3rd col=fs type, 4th col=permissions, 5th=backup enabled, 6th=fsck order -mount_default_fields: [ None, None, "ext4", "defaults,noatime","0","2"] - -# every boot -bootcmd: - - [ sh, -c, 'echo ran cloud-init again at $(date) | sudo tee -a /root/bootcmd.log' ] - - [ sh, -c, 'echo $(date) instid=$INSTANCE_ID | sudo tee -a /root/bootcmd.log' ] - -# run once for network static IP fix -runcmd: - - [ sh, -c, 'sed -i s/BOOTPROTO=dhcp/BOOTPROTO=static/ /etc/sysconfig/network-scripts/ifcfg-eth0' ] - - [ sh, -c, 'ifdown eth0 && sleep 1 && ifup eth0 && sleep 1 && ip a' ] - - [ sh, -c, 'echo $(date) instid=$INSTANCE_ID | sudo tee -a /root/runcmd.log' ] - -# written to /var/log/cloud-init.log -final_message: "The system is finally up, after $UPTIME seconds" diff --git a/roles/create_extra_rhel/templates/network_config_static.cfg.j2 b/roles/create_extra_rhel/templates/network_config_static.cfg.j2 deleted file mode 100644 index e7571f68..00000000 --- a/roles/create_extra_rhel/templates/network_config_static.cfg.j2 +++ /dev/null @@ -1,10 +0,0 @@ -version: 2 -ethernets: - eth0: - dhcp4: false - # default libvirt network - addresses: [ {{ env.ip.app[i] }} ] - gateway4: {{ env.networking.gateway }} - nameservers: - search: [ {{ env.networking.base_domain }} ] - addresses: [ {{ env.networking.dns.nameserver }},{{ env.networking.dns.forwarder }} ] \ No newline at end of file From 6c3bd19a0ff8adf0046690b18da5dc95977b2dc9 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 01:44:04 -0700 Subject: [PATCH 610/885] Changed defaults to incorporate new directory structure. Signed-off-by: Jacob Emery --- ansible.cfg | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ansible.cfg b/ansible.cfg index d432c9f8..44dee283 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,6 +1,10 @@ [defaults] private_key_file=~/.ssh/ansible -inventory=inventory +inventory=./inventories/default/ +roles_path=./roles +ansible_python_interpreter=/usr/local/bin/python3 +host_key_checking=False +deprecation_warnings=False [inventory] cache=True \ No newline at end of file From ae6e817d2d7b11f97d082e34c387aaab134f51aa Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 01:44:52 -0700 Subject: [PATCH 611/885] Removed some files from gitignore Signed-off-by: Jacob Emery --- .gitignore | 3 --- 1 file changed, 3 deletions(-) diff --git a/.gitignore b/.gitignore index c3c27d86..092dd667 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,3 @@ .DS_Store .iso .vscode -inventory -env.yaml -ansible.cfg \ No newline at end of file From e2f0b1d417824ee0d482bdae64b80fef5a12f743 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 01:46:18 -0700 Subject: [PATCH 612/885] Switched to using FTP server to boot bastion Signed-off-by: Jacob Emery --- roles/create_bastion/tasks/main.yaml | 230 ++++++++---------- .../templates/bastion-ks.cfg.j2 | 61 +++++ .../templates/cloud_init.cfg.j2 | 54 ---- .../templates/network_config_static.cfg.j2 | 10 - 4 files changed, 159 insertions(+), 196 deletions(-) create mode 100644 roles/create_bastion/templates/bastion-ks.cfg.j2 delete mode 100644 roles/create_bastion/templates/cloud_init.cfg.j2 delete mode 100644 roles/create_bastion/templates/network_config_static.cfg.j2 diff --git a/roles/create_bastion/tasks/main.yaml b/roles/create_bastion/tasks/main.yaml index bdaa3c66..513ca0d7 100644 --- a/roles/create_bastion/tasks/main.yaml +++ b/roles/create_bastion/tasks/main.yaml @@ -1,149 +1,115 @@ --- -- name: Load in variables from env.yaml - tags: create_bastion,bastion - include_vars: env.yaml - -- name: Enable cockpit console - tags: create_bastion,bastion - command: systemctl enable --now cockpit.socket - -- name: Remove working directory for idempotency - tags: create_bastion,bastion +- name: Clean up old kickstart for idempotency + tags: create_bastion + become: false file: - path: /var/lib/libvirt/images/tmp + path: "{{ env.ftp.cfgs_dir }}/{{ env.bastion.networking.hostname }}/bastion-ks.cfg" state: absent + delegate_to: "{{vault.ftp.ip}}" -- name: Create working directory - tags: create_bastion,bastion +- name: Create a directory on the FTP server for bastion configuration files. + become: false + tags: create_bastion file: - path: /var/lib/libvirt/images/tmp + path: "{{ env.ftp.cfgs_dir }}/{{ env.bastion.networking.hostname }}" state: directory - mode: '0755' + delegate_to: "{{vault.ftp.ip}}" -- name: Check to see if qcow2 file already exists on KVM host - tags: create_bastion,bastion - stat: - path: /var/lib/libvirt/images/bastion_base.qcow2 - register: qcow2_check +- name: Template kickstart file to FTP server. + tags: create_bastion + become: false + template: + src: "bastion-ks.cfg.j2" + dest: "{{ env.ftp.cfgs_dir }}/{{ env.bastion.networking.hostname }}/bastion-ks.cfg" + delegate_to: "{{vault.ftp.ip}}" + +- name: Update bastion kickstart config file with network parameters + tags: create_bastion + become: false + lineinfile: + path: "{{ env.ftp.cfgs_dir }}/{{ env.bastion.networking.hostname }}/bastion-ks.cfg" + regexp: "Network information" + line: "network --bootproto=static --ip={{vault.bastion.networking.ip}} --gateway={{env.bastion.networking.gateway}} --netmask={{env.bastion.networking.subnetmask}} --noipv6 --nameserver={{env.bastion.networking.nameserver}} --activate --hostname={{env.bastion.networking.hostname}}" + delegate_to: "{{vault.ftp.ip}}" + +- name: Create hash from bastion's root password to input in kickstart file. + tags: create_bastion + shell: echo "{{ vault.bastion.access.root_pass }}" | openssl passwd -6 -in - + register: root_pass_hash + +- name: Add hashed root password to bastion's RHEL kickstart config file. + tags: create_bastion + become: false + lineinfile: + path: "{{ env.ftp.cfgs_dir }}/{{ env.bastion.networking.hostname }}/bastion-ks.cfg" + insertafter: "Root password" + line: "rootpw --iscrypted {{ root_pass_hash.stdout }}" + delegate_to: "{{vault.ftp.ip}}" + +- name: Create hash from bastion user password to input in kickstart file. + tags: create_bastion + shell: echo "{{ vault.bastion.access.pass }}" | openssl passwd -6 -in - + register: user_pass_hash + +- name: Add hashed user password to bastion's RHEL kickstart config file. + tags: create_bastion + become: false + lineinfile: + path: "{{ env.ftp.cfgs_dir }}/{{ env.bastion.networking.hostname }}/bastion-ks.cfg" + insertafter: "Users and Groups Definitions" + line: "user --groups=wheel --name={{env.bastion.access.user}} --password={{user_pass_hash.stdout}} --iscrypted" + delegate_to: "{{vault.ftp.ip}}" + +- name: Create tmp folder + tags: create_bastion + file: + path: "{{ role_path }}/tmp" + state: directory -- name: Copy RHEL qcow2 file to KVM host. This may take a while. - tags: create_bastion,bastion +- name: Fetch completed kickstart file from FTP server + tags: create_bastion + become: false + fetch: + src: "{{ env.ftp.cfgs_dir }}/{{ env.bastion.networking.hostname }}/bastion-ks.cfg" + dest: "{{ role_path }}/tmp/bastion-ks.cfg" + flat: yes + delegate_to: "{{ vault.ftp.ip }}" + +- name: Copy kickstart to KVM host libvirt images directory + tags: create_bastion copy: - src: "{{ env.redhat.path_to_qcow2 }}" - dest: /var/lib/libvirt/images/bastion_base.qcow2 - mode: '600' - owner: qemu - group: qemu - when: qcow2_check.stat.exists == false - register: rhel_qcow2_download + src: "{{ role_path }}/tmp/bastion-ks.cfg" + dest: "{{ env.z.lpar.storage_group.pool_path }}/bastion-ks.cfg" + owner: "{{ env.z.lpar.access.user }}" + mode: '0755' -- name: Remove snapshot for idempotency - tags: create_bastion,bastion +- name: Remove tmp folder + tags: create_bastion, test23 file: - path: /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2 + path: "{{ role_path }}/tmp" state: absent -- name: Create base image - tags: create_bastion,bastion - command: "qemu-img create -b /var/lib/libvirt/images/bastion_base.qcow2 -f qcow2 /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2 {{env.node_resources.bastion.disk_size}}G" - register: qemu_create - -- name: Print result of creation of base image - tags: create_bastion,bastion - debug: - var: qemu_create - -- name: Set bastion qcow2 permissions - tags: create_bastion,bastion - command: chmod 600 /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2 - -- name: Set bastion qcow2 ownership to qemu - tags: create_bastion,bastion - command: chown qemu:qemu /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2 - -- name: Get info about qemu image creation - tags: create_bastion,bastion - command: "qemu-img info /var/lib/libvirt/images/{{env.hostname.bastion}}-snapshot-cloudimg.qcow2" - register: qemu_info - -- name: Print output from qemu image creation information - tags: create_bastion,bastion - debug: - var: qemu_info - -- name: Create instance-id - tags: create_bastion,bastion - shell: "echo \"instance-id: $(uuidgen || echo i-abcdefg)\" > /var/lib/libvirt/images/tmp/meta-data" - register: uuidgen - -- name: Print output from uuidgen command - tags: create_bastion,bastion - debug: - var: uuidgen - -- name: Use cloud_init.cfg.j2 template to make user-data file - tags: create_bastion,bastion - template: - src: cloud_init.cfg.j2 - dest: /var/lib/libvirt/images/tmp/user-data - -- name: Use network_config_static.cfg.j2 template to make network-config file - tags: create_bastion,bastion - template: - src: network_config_static.cfg.j2 - dest: /var/lib/libvirt/images/tmp/network-config - -- name: Generate iso file - tags: create_bastion,bastion - command: genisoimage -output /var/lib/libvirt/images/{{env.hostname.bastion}}-seed.img -volid cidata -joliet -rock /var/lib/libvirt/images/tmp/meta-data /var/lib/libvirt/images/tmp/network-config /var/lib/libvirt/images/tmp/user-data - register: gen_iso - -- name: Set bastion seed image permissions - tags: create_bastion,bastion - command: chmod 600 /var/lib/libvirt/images/{{env.hostname.bastion}}-seed.img - -- name: Set bastion seed image ownership to qemu - tags: create_bastion,bastion - command: chown qemu:qemu /var/lib/libvirt/images/{{env.hostname.bastion}}-seed.img - -- name: Print output from generating iso - tags: create_bastion,bastion - debug: - var: gen_iso - -- name: Check if bastion already exists. Expect an ignored error if it doesn't exist. - tags: create_bastion,bastion - community.libvirt.virt: - name: "{{ env.hostname.bastion }}" - command: status - register: bastion_check - ignore_errors: true - -- name: Boot bastion - tags: create_bastion,bastion - command: virt-install - --name {{ env.hostname.bastion }} \ - --virt-type kvm \ - --memory {{ env.node_resources.bastion.ram }} \ - --vcpus {{ env.node_resources.bastion.vcpu }} \ - --boot hd \ - --disk path=/var/lib/libvirt/images/{{ env.hostname.bastion }}-seed.img,device=cdrom \ - --disk path=/var/lib/libvirt/images/{{ env.hostname.bastion }}-snapshot-cloudimg.qcow2,device=disk \ - --graphics none \ - --os-type Linux --os-variant rhel{{env.node_resources.bastion.os_variant}} \ +- name: Split iso_mount_dir variable on / for use in virt-install location parameter + tags: create_bastion + set_fact: + ins_dir: "{{ env.ftp.iso_mount_dir.split('/') }}" + +- name: Boot and kickstart bastion + tags: create_bastion + shell: | + virt-install \ + --name {{ env.bastion.networking.hostname }} \ + --memory={{ env.bastion.resources.ram }} \ + --vcpus={{ env.bastion.resources.vcpu }} \ + --location ftp://{{ env.ftp.user }}:{{ vault.ftp.pass }}@{{ vault.ftp.ip }}/{{ ins_dir[-1] }} \ + --disk pool=default,size={{ env.bastion.resources.disk_size }} \ --network network=macvtap-net \ - --noautoconsole \ - --noreboot - when: bastion_check.failed == true - -- name: Restart bastion - tags: create_bastion,bastion - command: virsh start {{ env.hostname.bastion }} - when: bastion_check.failed == true + --graphics=none \ + --noautoconsole --wait=-1 \ + --initrd-inject {{ env.z.lpar.storage_group.pool_path }}/bastion-ks.cfg --extra-args "inst.ks=ftp://{{ env.ftp.user }}:{{ vault.ftp.pass }}@{{ vault.ftp.ip }}/{{ env.bastion.networking.hostname }}/bastion-ks.cfg ip={{ vault.bastion.networking.ip }}::{{ env.bastion.networking.gateway }}:{{ env.bastion.networking.subnetmask }}:{{ env.bastion.networking.hostname }}:enc1:none" -- name: Waiting 3 minutes for automated bastion installation and configuration to complete. To monitor, use a web browser to go to https://your-kvm-host-ip-address-here:9090, sign in as 'root' and use the password you set for env.access.login.kvm.sudo_pass in env.yaml, then go to the 'Virtual Machines' tab and click on the bastion's hostname. - tags: create_bastion,bastion +- name: Waiting 3 minutes for automated bastion installation and configuration to complete. To monitor, use a web browser to go to https://your-kvm-host-ip-address-here:9090, sign in, then go to the 'Virtual Machines' tab and click on the bastion's hostname. + tags: create_bastion pause: - minutes: 3 - when: bastion_check.failed == true \ No newline at end of file + minutes: 3 \ No newline at end of file diff --git a/roles/create_bastion/templates/bastion-ks.cfg.j2 b/roles/create_bastion/templates/bastion-ks.cfg.j2 new file mode 100644 index 00000000..3987adc8 --- /dev/null +++ b/roles/create_bastion/templates/bastion-ks.cfg.j2 @@ -0,0 +1,61 @@ +# Template for bastion kickstart configuration file. Some parts come from the create_bastion role. + +#version=DEVEL +%pre --log=/root/pre.log +%end + +# Reboot after installation +reboot + +# Use text mode install +text + +# Run the Setup Agent on first boot +firstboot --enable + +# Keyboard layouts +keyboard --vckeymap=us --xlayouts='us' + +# System language +lang {{ env.language }} + +# Network information (will fill in during create_bastion role) +#network --bootproto=dhcp --activate --hostname= + +# Firewall and SELinux +firewall --enabled --http --ftp --smtp --ssh --port=443,9090 +selinux --enforcing + +# Root password + +# System timezone +timezone {{ env.timezone }} + +#Users and Groups Definitions (will fill in during create_bastion role) + +# The following is the partition information you requested +ignoredisk --only-use=vda + +# System bootloader configuration +bootloader --append="crashkernel=auto" --location=mbr --boot-drive=vda + +# Partition clearing information +clearpart --all --initlabel --drives=vda + +# Disk partitioning information +part /boot --fstype="xfs" --asprimary --ondisk=vda --size=1024 +part pv.01 --fstype="lvmpv" --grow --size=1 --ondisk=vda +volgroup vgsystem --pesize=4096 pv.01 +logvol swap --fstype=swap --name=swap --vgname=vgsystem --size={{env.bastion.resources.swap}} +logvol / --fstype=xfs --name=root --vgname=vgsystem --size=1 --grow + +#packages selection +%packages --multilib --ignoremissing +@^minimal +%end + +%addon com_redhat_kdump --disable +%end + +%post --log=/root/post.log +%end diff --git a/roles/create_bastion/templates/cloud_init.cfg.j2 b/roles/create_bastion/templates/cloud_init.cfg.j2 deleted file mode 100644 index 345b3dc1..00000000 --- a/roles/create_bastion/templates/cloud_init.cfg.j2 +++ /dev/null @@ -1,54 +0,0 @@ -#cloud-config -hostname: {{env.hostname.bastion}} -fqdn: {{env.hostname.bastion}}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }} -manage_etc_hosts: true -users: - - name: {{ env.access.login.bastion.user }} - sudo: ALL=(ALL) NOPASSWD:ALL - groups: adm,sys,wheel - home: /home/{{ env.access.login.bastion.user }} - shell: /bin/bash - lock_passwd: false -# allow both password auth and cert auth via ssh (console access can still login) -ssh_pwauth: true -disable_root: false -chpasswd: - list: | - root:{{ env.access.login.bastion.sudo_pass }} - {{ env.access.login.bastion.user }}:{{ env.access.login.bastion.pass }} - expire: False - -#growpart: -# mode: auto -# devices: ['/'] -#disk_setup: -# /dev/vdb: -# table_type: gpt -# layout: True -# overwrite: False -#fs_setup: -# - label: DATA_XFS -# filesystem: xfs -# device: '/dev/vdb' -# partition: auto -# #cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s -#mounts: -# # [ /dev/vdx, /mountpoint, fstype ] -# - [ LABEL=DATA_XFS, /dataxfs, xfs ] - -# 3rd col=fs type, 4th col=permissions, 5th=backup enabled, 6th=fsck order -mount_default_fields: [ None, None, "ext4", "defaults,noatime","0","2"] - -# every boot -bootcmd: - - [ sh, -c, 'echo ran cloud-init again at $(date) | sudo tee -a /root/bootcmd.log' ] - - [ sh, -c, 'echo $(date) instid=$INSTANCE_ID | sudo tee -a /root/bootcmd.log' ] - -# run once for network static IP fix -runcmd: - - [ sh, -c, 'sed -i s/BOOTPROTO=dhcp/BOOTPROTO=static/ /etc/sysconfig/network-scripts/ifcfg-eth0' ] - - [ sh, -c, 'ifdown eth0 && sleep 1 && ifup eth0 && sleep 1 && ip a' ] - - [ sh, -c, 'echo $(date) instid=$INSTANCE_ID | sudo tee -a /root/runcmd.log' ] - -# written to /var/log/cloud-init.log -final_message: "The system is finally up, after $UPTIME seconds" diff --git a/roles/create_bastion/templates/network_config_static.cfg.j2 b/roles/create_bastion/templates/network_config_static.cfg.j2 deleted file mode 100644 index 0846767a..00000000 --- a/roles/create_bastion/templates/network_config_static.cfg.j2 +++ /dev/null @@ -1,10 +0,0 @@ -version: 2 -ethernets: - eth0: - dhcp4: false - # default libvirt network - addresses: [ {{ env.ip.bastion }} ] - gateway4: {{ env.networking.gateway }} - nameservers: - search: [ {{ env.networking.base_domain }} ] - addresses: [ {{ env.networking.dns.nameserver }},{{ env.networking.dns.forwarder }} ] \ No newline at end of file From 63f955d00c158520c295ecc4c35a091a936445cb Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 01:47:26 -0700 Subject: [PATCH 613/885] Reworked setup playbook Signed-off-by: Jacob Emery --- roles/install_dependencies/tasks/main.yaml | 27 ---------------------- roles/install_packages/tasks/main.yaml | 18 ++++++++++++--- 2 files changed, 15 insertions(+), 30 deletions(-) delete mode 100644 roles/install_dependencies/tasks/main.yaml diff --git a/roles/install_dependencies/tasks/main.yaml b/roles/install_dependencies/tasks/main.yaml deleted file mode 100644 index f3cc77b7..00000000 --- a/roles/install_dependencies/tasks/main.yaml +++ /dev/null @@ -1,27 +0,0 @@ ---- - -- name: Gather facts to get OS family to see which task to run - ansible.builtin.gather_facts: - -- name: Check for latest versions then install dependencies and packages for Mac workstations, skip if not Mac. - shell: "{{ item }}" - loop: - - ansible-galaxy collection install community.general - - ansible-galaxy collection install community.crypto - - ansible-galaxy collection install ansible.posix - - ansible-galaxy collection install community.libvirt - - brew install openssh - - brew install expect - when: ansible_facts['os_family'] == "Darwin" - -- name: Check for latest versions then install dependencies and packages for Debian workstations, skip if not Debian. - shell: "{{ item }}" - loop: - - ansible-galaxy collection install community.general - - ansible-galaxy collection install community.crypto - - ansible-galaxy collection install ansible.posix - - ansible-galaxy collection install community.libvirt - - sudo yum install openssh -y - - sudo yum install expect -y - when: ansible_facts['os_family'] == "RedHat" or ansible_facts['os_family'] == "Debian" - \ No newline at end of file diff --git a/roles/install_packages/tasks/main.yaml b/roles/install_packages/tasks/main.yaml index 3e09dbba..16f54999 100644 --- a/roles/install_packages/tasks/main.yaml +++ b/roles/install_packages/tasks/main.yaml @@ -1,14 +1,26 @@ --- -- name: print the list of packages to be installed and updated +- name: Print the list of packages to be installed and updated. tags: install_packages debug: var: packages -- name: installing required packages. This may take a while, depending on the number of packages to be installed. +- name: Installing required packages for Linux machines. tags: install_packages + become: true ansible.builtin.package: name: "{{ item }}" state: latest update_cache: yes - loop: "{{ packages }}" \ No newline at end of file + loop: "{{ packages }}" + when: ansible_os_family != 'Darwin' + +- name: Installing required packages for Mac machines. + tags: install_packages + become: false + community.general.homebrew: + name: "{{ item }}" + state: latest + loop: "{{ packages }}" + when: ansible_os_family == 'Darwin' + \ No newline at end of file From a08f6153b5a0a843e7a406411627b905edb1a6a4 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 01:48:32 -0700 Subject: [PATCH 614/885] Added provisioning of KVM host Signed-off-by: Jacob Emery --- roles/configure_storage/tasks/main.yaml | 58 +++++++ .../configure_storage/templates/vdisk.xml.j2 | 12 ++ roles/create_kvm_host/files/hmccreds.yaml | 19 +++ roles/create_kvm_host/files/os_messages.py | 158 ++++++++++++++++++ roles/create_kvm_host/tasks/main.yaml | 67 ++++++++ .../templates/hmccreds.yaml.j2 | 19 +++ roles/update_cfgs/tasks/main.yaml | 96 +++++++++++ roles/update_cfgs/templates/kvm_host.cfg.j2 | 66 ++++++++ roles/update_cfgs/templates/kvm_host.ins.j2 | 4 + roles/update_cfgs/templates/kvm_host.prm.j2 | 6 + 10 files changed, 505 insertions(+) create mode 100644 roles/configure_storage/tasks/main.yaml create mode 100644 roles/configure_storage/templates/vdisk.xml.j2 create mode 100644 roles/create_kvm_host/files/hmccreds.yaml create mode 100755 roles/create_kvm_host/files/os_messages.py create mode 100644 roles/create_kvm_host/tasks/main.yaml create mode 100644 roles/create_kvm_host/templates/hmccreds.yaml.j2 create mode 100644 roles/update_cfgs/tasks/main.yaml create mode 100644 roles/update_cfgs/templates/kvm_host.cfg.j2 create mode 100644 roles/update_cfgs/templates/kvm_host.ins.j2 create mode 100644 roles/update_cfgs/templates/kvm_host.prm.j2 diff --git a/roles/configure_storage/tasks/main.yaml b/roles/configure_storage/tasks/main.yaml new file mode 100644 index 00000000..472a7040 --- /dev/null +++ b/roles/configure_storage/tasks/main.yaml @@ -0,0 +1,58 @@ +--- + +- name: create empty list for lun names + tags: configure_storage + set_fact: + lun_list: [] + +- name: Fill list of lun names + tags: configure_storage + set_fact: + lun_list: "{{lun_list}} + [ '/dev/mapper/{{ item }}' ]" + cacheable: yes + loop: "{{ env.z.lpar.storage_group.lun_name[1:] }}" + +- name: Create a volume group. + tags: configure_storage + community.general.lvg: + pvs: "{{ lun_list }}" + state: present + vg: "{{ env.z.lpar.storage_group.vg.name }}" + +- name: Create a logical volume. + tags: configure_storage + community.general.lvol: + vg: "{{ env.z.lpar.storage_group.vg.name }}" + lv: "{{ env.z.lpar.storage_group.lv.name }}" + size: 100%VG + +#- name: Create directory for filesystem, if does not exist. +# tags: configure_storage +# file: +# path: "{{ env.z.lpar.storage_group.pool_path }}" +# state: directory +# mode: '0755' + +- name: Format the filesystem. + tags: configure_storage + community.general.filesystem: + fstype: "{{env.z.lpar.storage_group.fs.type}}" + dev: /dev/{{ env.z.lpar.storage_group.vg.name }}/{{ env.z.lpar.storage_group.lv.name }} + +- name: Mount the logical volume on the filesystem. + tags: configure_storage + mount: + path: "{{ env.z.lpar.storage_group.pool_path }}" + src: /dev/{{ env.z.lpar.storage_group.vg.name }}/{{ env.z.lpar.storage_group.lv.name }} + fstype: "{{ env.z.lpar.storage_group.fs.type }}" + state: mounted + +- name: Template storage pool from XML file + tags: configure_storage + template: + src: vdisk.xml.j2 + dest: "~/{{ env.z.lpar.storage_group.fs.name }}-vdisk.xml" + +- name: Create virsh storage pool + tags: configure_storage + command: virsh pool-define {{ env.z.lpar.storage_group.fs.name }}-vdisk.xml \ No newline at end of file diff --git a/roles/configure_storage/templates/vdisk.xml.j2 b/roles/configure_storage/templates/vdisk.xml.j2 new file mode 100644 index 00000000..f6de4786 --- /dev/null +++ b/roles/configure_storage/templates/vdisk.xml.j2 @@ -0,0 +1,12 @@ + + default + 2da14d33-c8a8-4554-98f9-37fc6d9b0f90 + 0 + 0 + 0 + + + + {{env.z.lpar.storage_group.pool_path}} + + \ No newline at end of file diff --git a/roles/create_kvm_host/files/hmccreds.yaml b/roles/create_kvm_host/files/hmccreds.yaml new file mode 100644 index 00000000..6fca3669 --- /dev/null +++ b/roles/create_kvm_host/files/hmccreds.yaml @@ -0,0 +1,19 @@ +examples: + api_version: + hmc: 9.60.86.110 + verify_cert: false + show_os_messages: + hmc: 9.60.86.110 + cpcname: P0007DE8 + partname: Distrib-KVM01 + verify_cert: false +"9.60.86.110": + userid: jacob.emery@ibm.com + password: vpb-ubp_CGZ9cnz8ctk + verify_cert: false +cpcs: + P0007DE8: + hmc_host: 9.60.86.110 + hmc_userid: jacob.emery@ibm.com + hmc_password: vpb-ubp_CGZ9cnz8ctk + verify_cert: false diff --git a/roles/create_kvm_host/files/os_messages.py b/roles/create_kvm_host/files/os_messages.py new file mode 100755 index 00000000..aa053a13 --- /dev/null +++ b/roles/create_kvm_host/files/os_messages.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python +# Copyright 2017-2021 IBM Corp. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +#Example that shows the OS messages of the OS in a Partition or LPAR. +""" + +import sys +import logging +import yaml +import requests +import zhmcclient + +# Print metadata for each OS message, before each message +PRINT_METADATA = False + +requests.packages.urllib3.disable_warnings() + +if len(sys.argv) != 2: + print("Usage: %s hmccreds.yaml" % sys.argv[0]) + sys.exit(2) +hmccreds_file = sys.argv[1] + +with open(hmccreds_file, 'r') as fp: + hmccreds = yaml.safe_load(fp) + +examples = hmccreds.get("examples", None) +if examples is None: + print("examples not found in credentials file %s" % \ + (hmccreds_file)) + sys.exit(1) + +show_os_messages = examples.get("show_os_messages", None) +if show_os_messages is None: + print("show_os_messages not found in credentials file %s" % \ + (hmccreds_file)) + sys.exit(1) + +loglevel = show_os_messages.get("loglevel", None) +if loglevel is not None: + level = getattr(logging, loglevel.upper(), None) + if level is None: + print("Invalid value for loglevel in credentials file %s: %s" % \ + (hmccreds_file, loglevel)) + sys.exit(1) + logmodule = show_os_messages.get("logmodule", None) + if logmodule is None: + logmodule = '' # root logger + print("Logging for module %s with level %s" % (logmodule, loglevel)) + handler = logging.StreamHandler() + format_string = '%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s' + handler.setFormatter(logging.Formatter(format_string)) + logger = logging.getLogger(logmodule) + logger.addHandler(handler) + logger.setLevel(level) + +hmc = show_os_messages["hmc"] +cpcname = show_os_messages["cpcname"] +partname = show_os_messages["partname"] + +cred = hmccreds.get(hmc, None) +if cred is None: + print("Credentials for HMC %s not found in credentials file %s" % \ + (hmc, hmccreds_file)) + sys.exit(1) + +userid = cred['userid'] +password = cred['password'] + +print(__doc__) + +print("Using HMC %s with userid %s ..." % (hmc, userid)) +session = zhmcclient.Session(hmc, userid, password, verify_cert=False) +cl = zhmcclient.Client(session) + +timestats = show_os_messages.get("timestats", False) +if timestats: + session.time_stats_keeper.enable() + +try: + cpc = cl.cpcs.find(name=cpcname) +except zhmcclient.NotFound: + print("Could not find CPC %s on HMC %s" % (cpcname, hmc)) + sys.exit(1) + +try: + if cpc.dpm_enabled: + partkind = "partition" + partition = cpc.partitions.find(name=partname) + else: + partkind = "LPAR" + partition = cpc.lpars.find(name=partname) +except zhmcclient.NotFound: + print("Could not find %s %s on CPC %s" % (partkind, partname, cpcname)) + sys.exit(1) + +#break_id = show_os_messages.get("breakid", None) +#if break_id: +# print("Breaking upon receipt of message with ID %s ..." % break_id) + +print("Opening OS message channel for %s %s on CPC %s ..." % + (partkind, partname, cpcname)) +topic = partition.open_os_message_channel(include_refresh_messages=True) +print("OS message channel topic: %s" % topic) + +receiver = zhmcclient.NotificationReceiver(topic, hmc, userid, password) +print("Showing OS messages (including refresh messages) ...") +sys.stdout.flush() + +try: + for headers, message in receiver.notifications(): + # print("# HMC notification #%s:" % headers['session-sequence-nr']) + # sys.stdout.flush() + os_msg_list = message['os-messages'] + for os_msg in os_msg_list: + if PRINT_METADATA: + msg_id = os_msg['message-id'] + held = os_msg['is-held'] + priority = os_msg['is-priority'] + prompt = os_msg.get('prompt-text', None) + print("# OS message %s (held: %s, priority: %s, prompt: %r):" % + (msg_id, held, priority, prompt)) + msg_txt = os_msg['message-text'].strip('\n') + print(msg_txt) + sys.stdout.flush() +# if msg_id == break_id: +# raise NameError +#except KeyboardInterrupt: +# print("Keyboard interrupt - leaving receiver loop") +# sys.stdout.flush() +#except NameError: +# print("Message with ID %s occurred - leaving receiver loop" % break_id) +# sys.stdout.flush() +finally: + print("Closing receiver...") + sys.stdout.flush() + receiver.close() + +print("Logging off...") +sys.stdout.flush() +session.logoff() + +if timestats: + print(session.time_stats_keeper) + +print("Done.") diff --git a/roles/create_kvm_host/tasks/main.yaml b/roles/create_kvm_host/tasks/main.yaml new file mode 100644 index 00000000..cdc04ce8 --- /dev/null +++ b/roles/create_kvm_host/tasks/main.yaml @@ -0,0 +1,67 @@ +--- + +- name: Start LPAR + tags: create_kvm_host + ibm.ibm_zhmc.zhmc_partition: + hmc_host: "{{ vault.z.hmc.host }}" + hmc_auth: + userid: "{{ env.z.hmc.auth.user }}" + password: "{{ vault.z.hmc.auth.pass }}" + verify: false + cpc_name: "{{ vault.z.cpc_name }}" + name: "{{ env.z.lpar.name }}" + properties: + boot_ftp_host: "{{ vault.ftp.ip }}" + boot_ftp_username: "{{ env.ftp.user }}" + boot_ftp_password: "{{ vault.ftp.pass }}" + boot_ftp_insfile: "{{ env.z.lpar.name }}/kvm_host.ins" + boot_device: "ftp" + state: active + register: _create_instances + async: 600 + poll: 0 + +- name: Wait for creation to finish + tags: create_kvm_host + async_status: + jid: "{{ _create_instances.ansible_job_id }}" + register: _jobs + until: _jobs.finished + delay: 15 # Check every 15 seconds + retries: 40 # Retry up to 40 times + +#- name: Template hmccreds.yaml for use with os_messages.py +# tags: create_kvm_host, test2 +# template: +# src: hmccreds.yaml.j2 +# dest: "{{ role_path }}/files/hmccreds.yaml" + +#- name: Execute os_messages.py +# tags: create_kvm_host, test2 +# command: "{{ role_path }}/files/os_messages.py {{ role_path }}/files/hmcclient.yaml" +# register: os_messages_output + +#- name: Show LPAR OS messages from HMC. +# tags: create_kvm_host, test2 +# debug: +# msg: "{{ os_messages_output }}" +# until: "'login:' in os_messages_output" + +- name: Wait 7 minutes for automated KVM installation and configuration to complete. + tags: create_kvm_host + pause: + minutes: 7 + +#- name: Change LPAR's boot source to storage adapter instead of FTP for future booting +# tags: create_kvm_host +# ibm.ibm_zhmc.zhmc_partition: +# hmc_host: "{{ vault.z.hmc.host }}" +# hmc_auth: +# userid: "{{ env.z.hmc.auth.user }}" +# password: "{{ vault.z.hmc.auth.pass }}" +# verify: false +# cpc_name: "{{ vault.z.cpc_name }}" +# name: "{{ env.z.lpar.name }}" +# properties: +# boot_device: "storage-adapter" +# state: active \ No newline at end of file diff --git a/roles/create_kvm_host/templates/hmccreds.yaml.j2 b/roles/create_kvm_host/templates/hmccreds.yaml.j2 new file mode 100644 index 00000000..a71e448c --- /dev/null +++ b/roles/create_kvm_host/templates/hmccreds.yaml.j2 @@ -0,0 +1,19 @@ +examples: + api_version: + hmc: {{ vault.z.lpar.networking.ip }} + verify_cert: false + show_os_messages: + hmc: {{ vault.z.lpar.networking.ip }} + cpcname: {{ vault.z.cpc_name }} + partname: {{ env.z.lpar.name }} + verify_cert: false +"{{ vault.z.lpar.networking.ip }}": + userid: {{ env.z.hmc.auth.user }} + password: {{ vault.z.hmc.auth.pass }} + verify_cert: false +cpcs: + {{ vault.z.cpc_name }}: + hmc_host: {{ vault.z.lpar.networking.ip }} + hmc_userid: {{ env.z.hmc.auth.user }} + hmc_password: {{ vault.z.hmc.auth.pass }} + verify_cert: false diff --git a/roles/update_cfgs/tasks/main.yaml b/roles/update_cfgs/tasks/main.yaml new file mode 100644 index 00000000..ed73d515 --- /dev/null +++ b/roles/update_cfgs/tasks/main.yaml @@ -0,0 +1,96 @@ +--- + +### Setup + +- name: Create directory for KVM host's RHEL configuration files for installation. + tags: update_cfgs + file: + path: '{{ env.ftp.cfgs_dir }}/{{ env.z.lpar.name }}' + state: directory + +- name: Clean-up old cfg files. + tags: update_cfgs + shell: rm -rf {{ env.ftp.cfgs_dir }}/{{ env.z.lpar.name }}/kvm_host.* + +### Templating out RHEL configuration files for the KVM host to pull from the FTP server + +- name: Split iso_mount_dir variable on / for use in template + tags: update_cfgs + set_fact: + ins_dir: "{{ env.ftp.iso_mount_dir.split('/') }}" + +- name: Template RHEL configuration files out to FTP server. + tags: update_cfgs + template: + src: "{{ item }}.j2" + dest: "{{ env.ftp.cfgs_dir }}/{{ env.z.lpar.name }}/{{ item }}" + loop: + - kvm_host.prm + - kvm_host.ins + - kvm_host.cfg + +### Updating additional parameters in RHEL configuration files that are more variable. + +- name: Add FCP storage worldwide port numbers to KVM host's RHEL prm configuration file. + tags: update_cfgs + lineinfile: + path: '{{env.ftp.cfgs_dir}}/{{ env.z.lpar.name }}/kvm_host.prm' + insertafter: 'inst.repo' + line: rd.zfcp=0.0.{{ env.z.lpar.storage_group.dev_num[i] }},0x{{ env.z.lpar.storage_group.storage_wwpn[i] }},0x0000000000000000 + with_sequence: start=0 end={{(env.z.lpar.storage_group.dev_num | length) - 1}} stride=1 + loop_control: + extended: yes + index_var: i + +- name: Create list from nic1 device number, incremented twice for input in KVM host prm file + set_fact: + nic_child_list: + - "{{ '%04x' % ( env.z.lpar.networking.nic.card1.dev_num | int ) }}" + - "{{ '%04x' % ( env.z.lpar.networking.nic.card1.dev_num | int + 1 ) }}" + - "{{ '%04x' % ( env.z.lpar.networking.nic.card1.dev_num | int + 2 ) }}" + +- name: Add network device information to KVM host's RHEL prm file + lineinfile: + path: "{{env.ftp.cfgs_dir}}/{{ env.z.lpar.name }}/kvm_host.prm" + insertafter: "ro ramdisk_size" + line: "rd.znet=qeth,0.0.{{nic_child_list[0]}},0.0.{{nic_child_list[1]}},0.0.{{nic_child_list[2]}},layer2=1,portno={{ env.z.lpar.networking.nic.card1.port }}" + +- name: Create hash from KVM host root password to input in kickstart file + tags: update_cfgs + shell: echo "{{ vault.z.lpar.access.root_pass }}" | openssl passwd -6 -in - + register: root_pass_hash + +- name: Add hashed root password to KVM host's RHEL kickstart config file + tags: update_cfgs + lineinfile: + path: "{{ env.ftp.cfgs_dir }}/{{ env.z.lpar.name }}/kvm_host.cfg" + insertafter: "Root password" + line: "rootpw --iscrypted {{ root_pass_hash.stdout }}" + +- name: Create hash from KVM user password to input in kickstart file + tags: update_cfgs + shell: echo "{{ vault.z.lpar.access.pass }}" | openssl passwd -6 -in - + register: user_pass_hash + +- name: Add hashed user password to KVM host's RHEL kickstart config file + tags: update_cfgs + lineinfile: + path: "{{ env.ftp.cfgs_dir }}/{{ env.z.lpar.name }}/kvm_host.cfg" + insertafter: "Users and Groups Definitions" + line: "user --groups=wheel --name={{ env.z.lpar.access.user }} --password={{ user_pass_hash.stdout }} --iscrypted" + +- name: Add network information in KVM hosts's RHEL kickstart file when there is only one network card defined. + tags: update_cfgs + lineinfile: + path: "{{ env.ftp.cfgs_dir }}/{{ env.z.lpar.name }}/kvm_host.cfg" + insertafter: "Network information" + line: network --bootproto=static --device={{ env.z.lpar.networking.device1 }} --gateway={{ env.z.lpar.networking.gateway }} --ip={{ vault.z.lpar.networking.ip }} --nameserver={{ env.z.lpar.networking.nameserver }} --netmask={{ env.z.lpar.networking.subnetmask }} --noipv6 --activate --hostname={{ env.z.lpar.networking.hostname }} + when: env.z.lpar.networking.nic.card2 is not defined + +- name: Add network information in KVM hosts's RHEL kickstart file when there are two network cards defined. + tags: update_cfgs + lineinfile: + path: "{{ env.ftp.cfgs_dir }}/{{ env.z.lpar.name }}/kvm_host.cfg" + insertafter: "Network information" + line: "network --bootproto=static --device={{ env.z.lpar.networking.device1 }} --bondslaves={{ env.z.lpar.networking.nic.card1.name }},{{ env.z.lpar.networking.nic.card2.name }} --bondopts=mode=active-backup;primary={{ env.z.lpar.networking.nic.card1 }} --gateway={{ env.z.lpar.networking.gateway }} --ip={{ vault.z.lpar.networking.ip }} --nameserver={{ env.z.lpar.networking.nameserver }} --netmask={{ env.z.lpar.networking.subnetmask }} --noipv6 --activate --hostname={{ env.z.lpar.networking.hostname }}" + when: env.z.lpar.networking.nic.card2.name is defined \ No newline at end of file diff --git a/roles/update_cfgs/templates/kvm_host.cfg.j2 b/roles/update_cfgs/templates/kvm_host.cfg.j2 new file mode 100644 index 00000000..9baa45d8 --- /dev/null +++ b/roles/update_cfgs/templates/kvm_host.cfg.j2 @@ -0,0 +1,66 @@ +# Template for KVM host kickstart config file. Some parts come from the update_cfgs role. + +%pre --log=/root/pre.log +dd if=/dev/zero of=/dev/mapper/mpatha bs=512 count=10 +dd if=/dev/zero of=/dev/mapper/mpathb bs=512 count=10 +dd if=/dev/zero of=/dev/mapper/mpathc bs=512 count=10 +dd if=/dev/zero of=/dev/mapper/mpathd bs=512 count=10 +dd if=/dev/zero of=/dev/mapper/mpathe bs=512 count=10 +dd if=/dev/zero of=/dev/mapper/mpathf bs=512 count=10 +dd if=/dev/zero of=/dev/mapper/mpathg bs=512 count=10 +dd if=/dev/zero of=/dev/mapper/mpathh bs=512 count=10 +%end + +# Reboot after installation +reboot + +# Use network installation +url --url=ftp://{{env.ftp.user}}:{{vault.ftp.pass}}@{{vault.ftp.ip}}/{{ ins_dir[-1] }} + +# Use text mode install +text + +# Run the Setup Agent on first boot +firstboot --enable + +# Keyboard layouts +keyboard --vckeymap=us --xlayouts='us' + +# System language +lang {{ env.language }} + +# Network information (will fill in during update_cfgs role) + +# Firewall and SELinux +firewall --enabled --http --ftp --smtp --ssh --port=443,9090,123 +selinux --enforcing + +# Root password (will fill in during update_cfgs role) + +# System timezone +timezone {{ env.timezone }} + +#Users and Groups Definitions (will fill in during update_cfgs role) + +# The following is the partition information you requested +ignoredisk --only-use={{ env.z.lpar.storage_group.lun_name[0] }} + +# System bootloader configuration +bootloader --append="crashkernel=auto" --location=mbr --boot-drive={{ env.z.lpar.storage_group.lun_name[0] }} + +# Partition clearing information +clearpart --all --initlabel --drives={{ env.z.lpar.storage_group.lun_name[0] }} + +# Disk partitioning information +autopart --type=lvm + +# packages selection +%packages --multilib --ignoremissing +@^minimal +%end + +%addon com_redhat_kdump --disable +%end + +%post --log=/root/post.log +%end \ No newline at end of file diff --git a/roles/update_cfgs/templates/kvm_host.ins.j2 b/roles/update_cfgs/templates/kvm_host.ins.j2 new file mode 100644 index 00000000..4c379a05 --- /dev/null +++ b/roles/update_cfgs/templates/kvm_host.ins.j2 @@ -0,0 +1,4 @@ +../{{ ins_dir[-1] }}/images/kernel.img 0x00000000 +../{{ ins_dir[-1] }}/images/initrd.img 0x02000000 +kvm_host.prm 0x00010480 +../{{ ins_dir[-1] }}/images/initrd.addrsize 0x00010408 \ No newline at end of file diff --git a/roles/update_cfgs/templates/kvm_host.prm.j2 b/roles/update_cfgs/templates/kvm_host.prm.j2 new file mode 100644 index 00000000..00917f36 --- /dev/null +++ b/roles/update_cfgs/templates/kvm_host.prm.j2 @@ -0,0 +1,6 @@ +ro ramdisk_size=40000 cio_ignore=all,!condev +ip={{vault.z.lpar.networking.ip}}::{{env.z.lpar.networking.gateway}}:{{env.z.lpar.networking.subnet}}:{{env.z.lpar.networking.hostname}}:{{env.z.lpar.networking.device1}}:none +nameserver={{ env.z.lpar.networking.nameserver }} +inst.repo=ftp://{{env.ftp.user}}:{{vault.ftp.pass}}@{{vault.ftp.ip}}/{{ ins_dir[-1] }} +inst.ks=ftp://{{env.ftp.user}}:{{vault.ftp.pass}}@{{vault.ftp.ip}}/{{ env.z.lpar.name }}/kvm_host.cfg +inst.cmdline \ No newline at end of file From 291598ee4891223a2ed9c44aef6b511744155e45 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 01:49:24 -0700 Subject: [PATCH 615/885] Added ability to create LPAR profile, attach storage, NIC Signed-off-by: Jacob Emery --- roles/create_lpar/tasks/main.yaml | 87 +++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 roles/create_lpar/tasks/main.yaml diff --git a/roles/create_lpar/tasks/main.yaml b/roles/create_lpar/tasks/main.yaml new file mode 100644 index 00000000..35f9f161 --- /dev/null +++ b/roles/create_lpar/tasks/main.yaml @@ -0,0 +1,87 @@ +--- + +#Create LPAR Profile +- name: Create logical partition + ibm.ibm_zhmc.zhmc_partition: + hmc_host: "{{ vault.z.hmc.host }}" + hmc_auth: + userid: "{{ env.z.hmc.auth.user }}" + password: "{{ vault.z.hmc.auth.pass }}" + verify: false + cpc_name: "{{ vault.z.cpc_name }}" + name: "{{ env.z.lpar.name }}" + state: stopped + properties: + description: "{{ env.z.lpar.description }}" + ifl_processors: "{{ env.z.lpar.ifl.count }}" + initial_memory: "{{ env.z.lpar.ifl.initial_memory }}" + maximum_memory: "{{ env.z.lpar.ifl.max_memory }}" + minimum_ifl_processing_weight: "{{ env.z.lpar.ifl.min_weight }}" + maximum_ifl_processing_weight: "{{ env.z.lpar.ifl.max_weight }}" + initial_ifl_processing_weight: "{{ env.z.lpar.ifl.initial_weight }}" + register: create_lpar + +# Attach storge group +- name: Ensure storage group is attached to partition. + ibm.ibm_zhmc.zhmc_storage_group_attachment: + hmc_host: "{{ vault.z.hmc.host }}" + hmc_auth: + userid: "{{ env.z.hmc.auth.user }}" + password: "{{ vault.z.hmc.auth.pass }}" + verify: false + cpc_name: "{{ vault.z.cpc_name }}" + storage_group_name: "{{ env.z.lpar.storage_group.name }}" + partition_name: "{{ env.z.lpar.name }}" + state: attached + register: sglparattach + +- name: Print the result + debug: + var: sglparattach + +#Attach Network Adapter +- name: Ensure NIC1 exists in the partition + ibm.ibm_zhmc.zhmc_nic: + hmc_host: "{{ vault.z.hmc.host }}" + hmc_auth: + userid: "{{ env.z.hmc.auth.user }}" + password: "{{ vault.z.hmc.auth.pass }}" + verify: false + cpc_name: "{{ vault.z.cpc_name }}" + partition_name: "{{ env.z.lpar.name }}" + name: "{{ env.z.lpar.networking.nic.card1.name }}" + state: present + properties: + adapter_name: "{{ env.z.lpar.networking.nic.card1.adapter }}" + adapter_port: "{{ env.z.lpar.networking.nic.card1.port }}" + description: "The port1 to our data network" + device_number: "{{ '%04x' % env.z.lpar.networking.nic.card1.dev_num | int }}" + register: nic1 + +- name: Print the result + debug: + var: nic1 + +- name: Ensure NIC2 exists in the partition, if defined. + ibm.ibm_zhmc.zhmc_nic: + hmc_host: "{{ vault.z.hmc.host }}" + hmc_auth: + userid: "{{ env.z.hmc.auth.user }}" + password: "{{ vault.z.hmc.auth.pass }}" + verify: false + cpc_name: "{{ vault.z.cpc_name }}" + partition_name: "{{ env.z.lpar.name }}" + name: "{{ env.z.lpar.networking.nic.card2.name }}" + state: present + properties: + adapter_name: "{{ env.z.lpar.networking.nic.card2.adapter }}" + adapter_port: "{{ env.z.lpar.networking.nic.card2.port }}" + description: "The port2 to our data network" + device_number: “{{ '%04x' % ( env.z.lpar.networking.nic.card1.dev_num | int ) }}” + register: nic2 + when: env.z.lpar.networking.nic.card2 is defined + +- name: Print the result + debug: + var: nic2 + when: env.z.lpar.networking.nic.card2 is defined From 6a669b72bf17ca99db8aeb933406aaabce5dd9d4 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 01:50:05 -0700 Subject: [PATCH 616/885] Use Ansible vault to encrypt sensitive data Signed-off-by: Jacob Emery --- roles/encrypt_vars/tasks/main.yaml | 53 + roles/encrypt_vars/templates/vault.yaml.j2 | 25 + roles/kwoodson.yedit/.gitignore | 2 + roles/kwoodson.yedit/README.md | 74 ++ roles/kwoodson.yedit/defaults/main.yml | 1 + roles/kwoodson.yedit/library/.gitignore | 1 + roles/kwoodson.yedit/library/yedit.py | 1006 +++++++++++++++++ .../kwoodson.yedit/meta/.galaxy_install_info | 2 + roles/kwoodson.yedit/meta/main.yml | 18 + roles/kwoodson.yedit/tasks/main.yml | 1 + .../tests/integration/.gitignore | 3 + .../tests/integration/files/kube-manager.yaml | 39 + .../tests/integration/files/playbook.yml | 5 + .../tests/integration/yedit.yml | 347 ++++++ roles/kwoodson.yedit/tests/main.yml | 1 + roles/kwoodson.yedit/tests/test.py | 14 + roles/kwoodson.yedit/tests/unit/.gitignore | 1 + roles/kwoodson.yedit/tests/unit/test_yedit.py | 377 ++++++ roles/kwoodson.yedit/vars/main.yml | 1 + 19 files changed, 1971 insertions(+) create mode 100644 roles/encrypt_vars/tasks/main.yaml create mode 100644 roles/encrypt_vars/templates/vault.yaml.j2 create mode 100644 roles/kwoodson.yedit/.gitignore create mode 100644 roles/kwoodson.yedit/README.md create mode 100644 roles/kwoodson.yedit/defaults/main.yml create mode 100644 roles/kwoodson.yedit/library/.gitignore create mode 100644 roles/kwoodson.yedit/library/yedit.py create mode 100644 roles/kwoodson.yedit/meta/.galaxy_install_info create mode 100644 roles/kwoodson.yedit/meta/main.yml create mode 100644 roles/kwoodson.yedit/tasks/main.yml create mode 100644 roles/kwoodson.yedit/tests/integration/.gitignore create mode 100644 roles/kwoodson.yedit/tests/integration/files/kube-manager.yaml create mode 100644 roles/kwoodson.yedit/tests/integration/files/playbook.yml create mode 100755 roles/kwoodson.yedit/tests/integration/yedit.yml create mode 100644 roles/kwoodson.yedit/tests/main.yml create mode 100755 roles/kwoodson.yedit/tests/test.py create mode 100644 roles/kwoodson.yedit/tests/unit/.gitignore create mode 100755 roles/kwoodson.yedit/tests/unit/test_yedit.py create mode 100644 roles/kwoodson.yedit/vars/main.yml diff --git a/roles/encrypt_vars/tasks/main.yaml b/roles/encrypt_vars/tasks/main.yaml new file mode 100644 index 00000000..59716ecb --- /dev/null +++ b/roles/encrypt_vars/tasks/main.yaml @@ -0,0 +1,53 @@ +--- + +- name: Find where the ansible.cfg file is to find project's home directory + set_fact: + home_dir: "{{ ansible_config_file.split('/')[0:-1] }}" + +- name: Check if Ansible vault already exists + tags: encrypt_vars + stat: + path: "{{ inventory_dir}}/vars/vault.yaml" + register: vault_status + +- name: Template Ansible Vault file from variables file + tags: encrypt_vars + template: + src: vault.yaml.j2 + dest: "{{home_dir | join('/') }}/vault.yaml" + when: vault_status.stat.exists is false + +- name: Find where the ansible.cfg file is to find project's home directory for use in next step + set_fact: + home_dir: "{{ ansible_config_file.split('/')[0:-1] }}" + when: vault_status.stat.exists is false + +- name: Encrypt sensitive data from variables file. + tags: encrypt_vars + command: "ansible-vault encrypt {{home_dir | join('/') }}/vault.yaml" + when: vault_status.stat.exists is false + +- name: Redact sensitive data in variables file. + tags: encrypt_vars + yedit: + src: "{{ inventory_dir }}/group_vars/all.yaml" + key: "{{ item }}" + value: REDACTED + loop: + - env.z.cpc_name + - env.z.hmc.host + - env.z.hmc.auth.pass + - env.z.lpar.access.pass + - env.z.lpar.access.root_pass + - env.z.lpar.networking.ip + - env.ftp.ip + - env.ftp.pass + - env.redhat.password + - env.redhat.pull_secret + - env.bastion.networking.ip + - env.bastion.access.pass + - env.bastion.access.root_pass + loop_control: + label: redacted + when: vault_status.stat.exists is false + \ No newline at end of file diff --git a/roles/encrypt_vars/templates/vault.yaml.j2 b/roles/encrypt_vars/templates/vault.yaml.j2 new file mode 100644 index 00000000..fa3178cf --- /dev/null +++ b/roles/encrypt_vars/templates/vault.yaml.j2 @@ -0,0 +1,25 @@ +vault: + z: + cpc_name: {{ env.z.cpc_name }} + hmc: + host: {{ env.z.hmc.host }} + auth: + pass: {{ env.z.hmc.auth.pass }} + lpar: + access: + pass: {{ env.z.lpar.access.pass }} + root_pass: {{ env.z.lpar.access.root_pass }} + networking: + ip: {{ env.z.lpar.networking.ip }} + ftp: + ip: {{ env.ftp.ip }} + pass: {{ env.ftp.pass }} + redhat: + password: {{ env.redhat.password }} + pull_secret: '{{ env.redhat.pull_secret }}' + bastion: + networking: + ip: {{ env.bastion.networking.ip }} + access: + pass: {{ env.bastion.access.pass }} + root_pass: {{ env.bastion.access.root_pass }} \ No newline at end of file diff --git a/roles/kwoodson.yedit/.gitignore b/roles/kwoodson.yedit/.gitignore new file mode 100644 index 00000000..565fcb21 --- /dev/null +++ b/roles/kwoodson.yedit/.gitignore @@ -0,0 +1,2 @@ +# VIM tmp files +*.swp diff --git a/roles/kwoodson.yedit/README.md b/roles/kwoodson.yedit/README.md new file mode 100644 index 00000000..4e110030 --- /dev/null +++ b/roles/kwoodson.yedit/README.md @@ -0,0 +1,74 @@ +// vim: ft=asciidoc + += yedit repository +:toc: macro +:toc-title: + +toc::[] + +== Ansible Role: Yedit + +This repository contains an ansible module for modifying yaml files. + +I didn't see a good method of editing yaml files and config managing them through ansible. This is my attempt. + +== Install + +You can install via Ansible Galaxy: + + $ ansible-galaxy install kwoodson.yedit + +If you do this, you should also add a `requirements.yml` so other users of your playbook know what dependencies to install: + +```yaml +--- +- src: kwoodson.yedit +``` + +You can then reference it in a play by importing it before use: + +```yaml + roles: + - kwoodson.yedit + - role-that-uses-yedit +``` + +== Examples + +Sometimes it is necesarry to config manage .yml files. +[source,yaml] +---- +- hosts: localhost + gather_facts: no + roles: + - kwoodson.yedit + tasks: + - name: manage yaml files + yedit: + src: /tmp/test.yaml + key: a.b.c + value: + d: + e: + f: + this is a test + + - name: get a specific value + yedit: + src: /tmp/test.yaml + state: list + key: a.b.c.d.e.f + register: yeditout + - debug: var=yeditout +---- + +== Development + +As this is a role, just copy it into any roles directory recognized by Ansible. For details, see http://docs.ansible.com/ansible/latest/index.html[Ansible documentation]: + +* http://docs.ansible.com/ansible/devel/playbooks_reuse_roles.html#embedding-modules-and-plugins-in-roles[Embedding Modules and Plugins In Roles] +* http://docs.ansible.com/ansible/latest/intro_configuration.html#module-utils[module_utils] + +== Documentation + +Full documentation is available inline https://github.com/kwoodson/ansible-role-yedit/blob/master/library/yedit.py#L15[here]. diff --git a/roles/kwoodson.yedit/defaults/main.yml b/roles/kwoodson.yedit/defaults/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/roles/kwoodson.yedit/defaults/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/kwoodson.yedit/library/.gitignore b/roles/kwoodson.yedit/library/.gitignore new file mode 100644 index 00000000..0d20b648 --- /dev/null +++ b/roles/kwoodson.yedit/library/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/roles/kwoodson.yedit/library/yedit.py b/roles/kwoodson.yedit/library/yedit.py new file mode 100644 index 00000000..efd5fe13 --- /dev/null +++ b/roles/kwoodson.yedit/library/yedit.py @@ -0,0 +1,1006 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +# pylint: disable=wrong-import-order,wrong-import-position,unused-import + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +ANSIBLE_METADATA = {'metadata_version': '1.1', + 'status': ['preview'], + 'supported_by': 'core'} + +DOCUMENTATION = ''' +--- +module: yedit +version_added: "2.6" +short_description: Create, modify, and idempotently manage yaml files. +description: + - Modify yaml files programmatically. +options: + state: + description: + - State represents whether to create, modify, delete, or list yaml + required: true + default: present + choices: ["present", "absent", "list"] + aliases: [] + debug: + description: + - Turn on debug information. + required: false + default: false + type: bool + aliases: [] + src: + description: + - The file that is the target of the modifications. + required: false + aliases: [] + content: + description: + - Content represents the yaml content you desire to work with. This + - could be the file contents to write or the inmemory data to modify. + required: false + aliases: [] + content_type: + description: + - The python type of the content parameter. + required: false + choices: ['yaml', 'json'] + default: yaml + aliases: [] + key: + description: + - The path to the value you wish to modify. Emtpy string means the top of + - the document. + required: false + default: '' + aliases: [] + value: + description: + - The incoming value of parameter 'key'. + required: false + default: + aliases: [] + edits: + description: + - A list of edits to perform. These follow the same format as a single edit + required: false + aliases: [] + value_type: + description: + - The python type of the incoming value. + required: false + default: '' + aliases: [] + update: + description: + - Whether the update should be performed on a dict/hash or list/array + - object. + required: false + default: false + aliases: [] + type: bool + append: + description: + - Whether to append to an array/list. When the key does not exist or is + - null, a new array is created. When the key is of a non-list type, + - nothing is done. + required: false + default: false + aliases: [] + type: bool + insert: + description: + - Whether to insert to an array/list. When the key does not exist or is + - null, a new array is created. When the key is of a non-list type, + - nothing is done. + required: false + default: false + aliases: [] + type: bool + index: + description: + - Used in conjunction with the update or insert parameter. This will update / insert to a + - specific index in an array/list. + required: false + aliases: [] + curr_value: + description: + - Used in conjunction with the update parameter. This is the current + - value of 'key' in the yaml file. + required: false + default: None + aliases: [] + curr_value_format: + description: + - Format of the incoming current value. + choices: ["yaml", "json", "str"] + required: false + default: yaml + aliases: [] + backup_ext: + description: + - The backup file's appended string. + required: false + aliases: [] + backup: + description: + - Whether to make a backup copy of the current file when performing an + - edit. + required: false + default: false + aliases: [] + type: bool + separator: + description: + - The separator being used when parsing strings. + required: false + default: '.' + aliases: [] +author: +- "Kenny Woodson " +extends_documentation_fragment: [] +''' + +EXAMPLES = ''' +# Simple insert of key, value +- name: insert simple key, value + yedit: + src: somefile.yml + key: test + value: somevalue + state: present +# Results: +# test: somevalue + +# Multilevel insert of key, value +- name: insert simple key, value + yedit: + src: somefile.yml + key: a.b.c + value: d + state: present +# Results: +# a: +# b: +# c: d +# +# multiple edits at the same time +- name: perform multiple edits + yedit: + src: somefile.yml + edits: + - key: a.b.c + value: d + - key: a.b.c.d + value: e + state: present +# Results: +# a: +# b: +# c: +# d: e +''' + +import copy # noqa: F401 +import fcntl # noqa: F401 +import json # noqa: F401 +import os # noqa: F401 +import re # noqa: F401 +import shutil # noqa: F401 +import time # noqa: F401 + +try: + import ruamel.yaml as yaml # noqa: F401 +except ImportError: + import yaml # noqa: F401 + +from ansible.module_utils.basic import AnsibleModule + + +class YeditException(Exception): + ''' Exception class for Yedit ''' + pass + + +# pylint: disable=too-many-public-methods,too-many-instance-attributes +class Yedit(object): + ''' Class to modify yaml files ''' + re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" + re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)" + com_sep = set(['.', '#', '|', ':']) + + # pylint: disable=too-many-arguments + def __init__(self, + filename=None, + content=None, + content_type='yaml', + separator='.', + backup_ext=".{0}".format(time.strftime("%Y%m%dT%H%M%S")), + backup=False): + self.content = content + self._separator = separator + self.filename = filename + self.__yaml_dict = content + self.content_type = content_type + self.backup = backup + self.backup_ext = backup_ext + self.load(content_type=self.content_type) + if self.__yaml_dict is None: + self.__yaml_dict = {} + + @property + def separator(self): + ''' getter method for separator ''' + return self._separator + + @separator.setter + def separator(self, inc_sep): + ''' setter method for separator ''' + self._separator = inc_sep + + @property + def yaml_dict(self): + ''' getter method for yaml_dict ''' + return self.__yaml_dict + + @yaml_dict.setter + def yaml_dict(self, value): + ''' setter method for yaml_dict ''' + self.__yaml_dict = value + + @staticmethod + def parse_key(key, sep='.'): + '''parse the key allowing the appropriate separator''' + common_separators = list(Yedit.com_sep - set([sep])) + return re.findall(Yedit.re_key.format(''.join(common_separators)), key) + + @staticmethod + def valid_key(key, sep='.'): + '''validate the incoming key''' + common_separators = list(Yedit.com_sep - set([sep])) + if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key): + return False + + return True + + # pylint: disable=too-many-return-statements,too-many-branches + @staticmethod + def remove_entry(data, key, index=None, value=None, sep='.'): + ''' remove data at location key ''' + if key == '' and isinstance(data, dict): + if value is not None: + data.pop(value) + elif index is not None: + raise YeditException("remove_entry for a dictionary does not have an index {0}".format(index)) + else: + data.clear() + + return True + + elif key == '' and isinstance(data, list): + ind = None + if value is not None: + try: + ind = data.index(value) + except ValueError: + return False + elif index is not None: + ind = index + else: + del data[:] + + if ind is not None: + data.pop(ind) + + return True + + if not (key and Yedit.valid_key(key, sep)) and \ + isinstance(data, (list, dict)): + return None + + key_indexes = Yedit.parse_key(key, sep) + for arr_ind, dict_key in key_indexes[:-1]: + if dict_key and isinstance(data, dict): + data = data.get(dict_key) + elif (arr_ind and isinstance(data, list) and + int(arr_ind) <= len(data) - 1): + data = data[int(arr_ind)] + else: + return None + + # process last index for remove + # expected list entry + if key_indexes[-1][0]: + if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501 + del data[int(key_indexes[-1][0])] + return True + + # expected dict entry + elif key_indexes[-1][1]: + if isinstance(data, dict): + del data[key_indexes[-1][1]] + return True + + @staticmethod + def add_entry(data, key, item=None, sep='.'): + ''' Get an item from a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + key = a#b + return c + ''' + if key == '': + pass + elif (not (key and Yedit.valid_key(key, sep)) and + isinstance(data, (list, dict))): + return None + + key_indexes = Yedit.parse_key(key, sep) + for arr_ind, dict_key in key_indexes[:-1]: + if dict_key: + if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501 + data = data[dict_key] + continue + + elif data and not isinstance(data, dict): + raise YeditException("Unexpected item type found while going through key " + + "path: {0} (at key: {1})".format(key, dict_key)) + + data[dict_key] = {} + data = data[dict_key] + + elif (arr_ind and isinstance(data, list) and + int(arr_ind) <= len(data) - 1): + data = data[int(arr_ind)] + else: + raise YeditException("Unexpected item type found while going through key path: {0}".format(key)) + + if key == '': + data = item + + # process last index for add + # expected list entry + elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data): # noqa: E501 + # key is next element in array so append + if int(key_indexes[-1][0]) > len(data)-1: + data.append(item) + else: + data[int(key_indexes[-1][0])] = item + + # expected dict entry + elif key_indexes[-1][1] and isinstance(data, dict): + data[key_indexes[-1][1]] = item + + # didn't add/update to an existing list, nor add/update key to a dict + # so we must have been provided some syntax like a.b.c[] = "data" for a + # non-existent array + else: + raise YeditException("Error adding to object at path: {0}".format(key)) + + return data + + @staticmethod + def get_entry(data, key, sep='.'): + ''' Get an item from a dictionary with key notation a.b.c + d = {'a': {'b': 'c'}}} + key = a.b + return c + ''' + if key == '': + pass + elif (not (key and Yedit.valid_key(key, sep)) and + isinstance(data, (list, dict))): + return None + + key_indexes = Yedit.parse_key(key, sep) + for arr_ind, dict_key in key_indexes: + if dict_key and isinstance(data, dict): + data = data.get(dict_key) + elif (arr_ind and isinstance(data, list) and + int(arr_ind) <= len(data) - 1): + data = data[int(arr_ind)] + else: + return None + + return data + + @staticmethod + def _write(filename, contents): + ''' Actually write the file contents to disk. This helps with mocking. ''' + + tmp_filename = filename + '.yedit' + + with open(tmp_filename, 'w') as yfd: + fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB) + yfd.write(contents) + yfd.flush() # flush internal buffers + os.fsync(yfd.fileno()) # ensure buffer content reached disk + fcntl.flock(yfd, fcntl.LOCK_UN) + + os.rename(tmp_filename, filename) + # While the rename is atomic, we also need to ensure, that the updated + # directory entry has reached the disk too. + # NOTE: this might fail on Windows systems. + dfd = None + try: + dfd = os.open(os.path.join(os.path.realpath('.'), os.path.dirname(filename)), os.O_DIRECTORY) + os.fsync(dfd) + finally: + if dfd: + os.close(dfd) + + def write(self): + ''' write to file ''' + if not self.filename: + raise YeditException('Please specify a filename.') + + if self.backup and self.file_exists(): + shutil.copy(self.filename, '{0}{1}'.format(self.filename, self.backup_ext)) + + # Try to set format attributes if supported + try: + self.yaml_dict.fa.set_block_style() + except AttributeError: + pass + + # Try to use RoundTripDumper if supported. + if self.content_type == 'yaml': + try: + Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) + except AttributeError: + Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False)) + elif self.content_type == 'json': + Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True)) + else: + raise YeditException('Unsupported content_type: {0}.'.format(self.content_type) + + 'Please specify a content_type of yaml or json.') + + return (True, self.yaml_dict) + + def read(self): + ''' read from file ''' + # check if it exists + if self.filename is None or not self.file_exists(): + return None + + contents = None + with open(self.filename) as yfd: + contents = yfd.read() + + return contents + + def file_exists(self): + ''' return whether file exists ''' + if os.path.exists(self.filename): + return True + + return False + + def load(self, content_type='yaml'): + ''' return yaml file ''' + contents = self.read() + + if not contents and not self.content: + return None + + if self.content: + if isinstance(self.content, dict): + self.yaml_dict = self.content + return self.yaml_dict + elif isinstance(self.content, str): + contents = self.content + + # check if it is yaml + try: + if content_type == 'yaml' and contents: + # Try to set format attributes if supported + try: + self.yaml_dict.fa.set_block_style() + except AttributeError: + pass + + # Try to use RoundTripLoader if supported. + try: + self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader) + except AttributeError: + self.yaml_dict = yaml.safe_load(contents) + + # Try to set format attributes if supported + try: + self.yaml_dict.fa.set_block_style() + except AttributeError: + pass + + elif content_type == 'json' and contents: + self.yaml_dict = json.loads(contents) + except yaml.YAMLError as err: + # Error loading yaml or json + raise YeditException('Problem with loading yaml file. {0}'.format(err)) + + return self.yaml_dict + + def get(self, key): + ''' get a specified key''' + try: + entry = Yedit.get_entry(self.yaml_dict, key, self.separator) + except KeyError: + entry = None + + return entry + + def pop(self, path, key_or_item): + ''' remove a key, value pair from a dict or an item for a list''' + try: + entry = Yedit.get_entry(self.yaml_dict, path, self.separator) + except KeyError: + entry = None + + if entry is None: + return (False, self.yaml_dict) + + if isinstance(entry, dict): + # AUDIT:maybe-no-member makes sense due to fuzzy types + # pylint: disable=maybe-no-member + if key_or_item in entry: + entry.pop(key_or_item) + return (True, self.yaml_dict) + return (False, self.yaml_dict) + + elif isinstance(entry, list): + # AUDIT:maybe-no-member makes sense due to fuzzy types + # pylint: disable=maybe-no-member + ind = None + try: + ind = entry.index(key_or_item) + except ValueError: + return (False, self.yaml_dict) + + entry.pop(ind) + return (True, self.yaml_dict) + + return (False, self.yaml_dict) + + def delete(self, path, index=None, value=None): + ''' remove path from a dict''' + try: + entry = Yedit.get_entry(self.yaml_dict, path, self.separator) + except KeyError: + entry = None + + if entry is None: + return (False, self.yaml_dict) + + result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator) + if not result: + return (False, self.yaml_dict) + + return (True, self.yaml_dict) + + def exists(self, path, value): + ''' check if value exists at path''' + try: + entry = Yedit.get_entry(self.yaml_dict, path, self.separator) + except KeyError: + entry = None + + if isinstance(entry, list): + if value in entry: + return True + return False + + elif isinstance(entry, dict): + if isinstance(value, dict): + rval = False + for key, val in value.items(): + if entry[key] != val: + rval = False + break + else: + rval = True + return rval + + return value in entry + + return entry == value + + def append(self, path, value): + '''append value to a list''' + try: + entry = Yedit.get_entry(self.yaml_dict, path, self.separator) + except KeyError: + entry = None + + if entry is None: + self.put(path, []) + entry = Yedit.get_entry(self.yaml_dict, path, self.separator) + if not isinstance(entry, list): + return (False, self.yaml_dict) + + # AUDIT:maybe-no-member makes sense due to loading data from + # a serialized format. + # pylint: disable=maybe-no-member + entry.append(value) + return (True, self.yaml_dict) + + def insert(self, path, value, index=0): + '''insert value to a list''' + try: + entry = Yedit.get_entry(self.yaml_dict, path, self.separator) + except KeyError: + entry = None + + if entry is None: + self.put(path, []) + entry = Yedit.get_entry(self.yaml_dict, path, self.separator) + if not isinstance(entry, list): + return (False, self.yaml_dict) + + entry.insert(index, value) + return (True, self.yaml_dict) + + # pylint: disable=too-many-arguments + def update(self, path, value, index=None, curr_value=None): + ''' put path, value into a dict ''' + try: + entry = Yedit.get_entry(self.yaml_dict, path, self.separator) + except KeyError: + entry = None + + if isinstance(entry, dict): + # AUDIT:maybe-no-member makes sense due to fuzzy types + # pylint: disable=maybe-no-member + if not isinstance(value, dict): + raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' + + 'value=[{0}] type=[{1}]'.format(value, type(value))) + + entry.update(value) + return (True, self.yaml_dict) + + elif isinstance(entry, list): + # AUDIT:maybe-no-member makes sense due to fuzzy types + # pylint: disable=maybe-no-member + ind = None + if curr_value: + try: + ind = entry.index(curr_value) + except ValueError: + return (False, self.yaml_dict) + + elif index is not None: + ind = index + + if ind is not None and entry[ind] != value: + entry[ind] = value + return (True, self.yaml_dict) + + # see if it exists in the list + try: + ind = entry.index(value) + except ValueError: + # doesn't exist, append it + entry.append(value) + return (True, self.yaml_dict) + + # already exists, return + if ind is not None: + return (False, self.yaml_dict) + return (False, self.yaml_dict) + + def put(self, path, value): + ''' put path, value into a dict ''' + try: + entry = Yedit.get_entry(self.yaml_dict, path, self.separator) + except KeyError: + entry = None + + if entry == value: + return (False, self.yaml_dict) + + # deepcopy didn't work + # Try to use ruamel.yaml and fallback to pyyaml + try: + tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, + default_flow_style=False), + yaml.RoundTripLoader) + except AttributeError: + tmp_copy = copy.deepcopy(self.yaml_dict) + + # set the format attributes if available + try: + tmp_copy.fa.set_block_style() + except AttributeError: + pass + + result = Yedit.add_entry(tmp_copy, path, value, self.separator) + if result is None: + return (False, self.yaml_dict) + + # When path equals "" it is a special case. + # "" refers to the root of the document + # Only update the root path (entire document) when its a list or dict + if path == '': + if isinstance(result, list) or isinstance(result, dict): + self.yaml_dict = result + return (True, self.yaml_dict) + + return (False, self.yaml_dict) + + self.yaml_dict = tmp_copy + + return (True, self.yaml_dict) + + def create(self, path, value): + ''' create a yaml file ''' + if not self.file_exists(): + # deepcopy didn't work + # Try to use ruamel.yaml and fallback to pyyaml + try: + tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, + default_flow_style=False), + yaml.RoundTripLoader) + except AttributeError: + tmp_copy = copy.deepcopy(self.yaml_dict) + + # set the format attributes if available + try: + tmp_copy.fa.set_block_style() + except AttributeError: + pass + + result = Yedit.add_entry(tmp_copy, path, value, self.separator) + if result is not None: + self.yaml_dict = tmp_copy + return (True, self.yaml_dict) + + return (False, self.yaml_dict) + + @staticmethod + def get_curr_value(invalue, val_type): + '''return the current value''' + if invalue is None: + return None + + curr_value = invalue + if val_type == 'yaml': + curr_value = yaml.safe_load(str(invalue)) + elif val_type == 'json': + curr_value = json.loads(invalue) + + return curr_value + + @staticmethod + def parse_value(inc_value, vtype=''): + '''determine value type passed''' + true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', + 'on', 'On', 'ON', ] + false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', + 'off', 'Off', 'OFF'] + + # It came in as a string but you didn't specify value_type as string + # we will convert to bool if it matches any of the above cases + if isinstance(inc_value, str) and 'bool' in vtype: + if inc_value not in true_bools and inc_value not in false_bools: + raise YeditException('Not a boolean type. str=[{0}] vtype=[{1}]'.format(inc_value, vtype)) + elif isinstance(inc_value, bool) and 'str' in vtype: + inc_value = str(inc_value) + + # There is a special case where '' will turn into None after yaml loading it so skip + if isinstance(inc_value, str) and inc_value == '': + pass + # If vtype is not str then go ahead and attempt to yaml load it. + elif isinstance(inc_value, str) and 'str' not in vtype: + try: + inc_value = yaml.safe_load(inc_value) + except Exception: + raise YeditException('Could not determine type of incoming value. ' + + 'value=[{0}] vtype=[{1}]'.format(type(inc_value), vtype)) + + return inc_value + + @staticmethod + def process_edits(edits, yamlfile): + '''run through a list of edits and process them one-by-one''' + results = [] + for edit in edits: + value = Yedit.parse_value(edit['value'], edit.get('value_type', '')) + if edit.get('action') == 'update': + # pylint: disable=line-too-long + curr_value = Yedit.get_curr_value( + Yedit.parse_value(edit.get('curr_value')), + edit.get('curr_value_format')) + + rval = yamlfile.update(edit['key'], + value, + edit.get('index'), + curr_value) + + elif edit.get('action') == 'append': + rval = yamlfile.append(edit['key'], value) + + elif edit.get('action') == 'insert': + rval = yamlfile.insert(edit['key'], value, edit['index']) + + else: + rval = yamlfile.put(edit['key'], value) + + if rval[0]: + results.append({'key': edit['key'], 'edit': rval[1]}) + + return {'changed': len(results) > 0, 'results': results} + + # pylint: disable=too-many-return-statements,too-many-branches + @staticmethod + def run_ansible(params): + '''perform the idempotent crud operations''' + yamlfile = Yedit(filename=params['src'], + backup=params['backup'], + content_type=params['content_type'], + backup_ext=params['backup_ext'], + separator=params['separator']) + + state = params['state'] + + if params['src']: + rval = yamlfile.load() + + if yamlfile.yaml_dict is None and state != 'present': + return {'failed': True, + 'msg': 'Error opening file [{0}]. Verify that the '.format(params['src']) + + 'file exists, that it is has correct permissions, and is valid yaml.'} + + if state == 'list': + if params['content']: + content = Yedit.parse_value(params['content'], params['content_type']) + yamlfile.yaml_dict = content + + if params['key']: + rval = yamlfile.get(params['key']) + + return {'changed': False, 'result': rval, 'state': state} + + elif state == 'absent': + if params['content']: + content = Yedit.parse_value(params['content'], params['content_type']) + yamlfile.yaml_dict = content + + if params['update']: + rval = yamlfile.pop(params['key'], params['value']) + else: + rval = yamlfile.delete(params['key'], params['index'], params['value']) + + if rval[0] and params['src']: + yamlfile.write() + + return {'changed': rval[0], 'result': rval[1], 'state': state} + + elif state == 'present': + # check if content is different than what is in the file + if params['content']: + content = Yedit.parse_value(params['content'], params['content_type']) + + # We had no edits to make and the contents are the same + if yamlfile.yaml_dict == content and \ + params['value'] is None: + return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} + + yamlfile.yaml_dict = content + + # If we were passed a key, value then + # we enapsulate it in a list and process it + # Key, Value passed to the module : Converted to Edits list # + edits = [] + _edit = {} + if params['value'] is not None: + _edit['value'] = params['value'] + _edit['value_type'] = params['value_type'] + _edit['key'] = params['key'] + + if params['update']: + _edit['action'] = 'update' + _edit['curr_value'] = params['curr_value'] + _edit['curr_value_format'] = params['curr_value_format'] + _edit['index'] = params['index'] + + elif params['append']: + _edit['action'] = 'append' + + elif params['insert']: + _edit['action'] = 'insert' + _edit['index'] = params['index'] + + edits.append(_edit) + + elif params['edits'] is not None: + edits = params['edits'] + + if edits: + results = Yedit.process_edits(edits, yamlfile) + + # if there were changes and a src provided to us we need to write + if results['changed'] and params['src']: + yamlfile.write() + + return {'changed': results['changed'], 'result': results['results'], 'state': state} + + # no edits to make + if params['src']: + rval = yamlfile.write() + return {'changed': rval[0], + 'result': rval[1], + 'state': state} + + # We were passed content but no src, key or value, or edits. Return contents in memory + return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state} + return {'failed': True, 'msg': 'Unkown state passed'} + +def json_roundtrip_clean(js): + ''' Clean-up any non-string keys from a Python object, to ensure it can be serialized as JSON ''' + cleaned_json = json.dumps(js, skipkeys=True) + return json.loads(cleaned_json) + +# pylint: disable=too-many-branches +def main(): + ''' ansible oc module for secrets ''' + + module = AnsibleModule( + argument_spec=dict( + state=dict(default='present', type='str', + choices=['present', 'absent', 'list']), + debug=dict(default=False, type='bool'), + src=dict(default=None, type='str'), + content=dict(default=None), + content_type=dict(default='yaml', choices=['yaml', 'json']), + key=dict(default='', type='str'), + value=dict(), + value_type=dict(default='', type='str'), + update=dict(default=False, type='bool'), + append=dict(default=False, type='bool'), + insert=dict(default=False, type='bool'), + index=dict(default=None, type='int'), + curr_value=dict(default=None, type='str'), + curr_value_format=dict(default='yaml', + choices=['yaml', 'json', 'str'], + type='str'), + backup=dict(default=False, type='bool'), + backup_ext=dict(default=".{0}".format(time.strftime("%Y%m%dT%H%M%S")), type='str'), + separator=dict(default='.', type='str'), + edits=dict(default=None, type='list'), + ), + mutually_exclusive=[["curr_value", "index"], ['update', "append"]], + required_one_of=[["content", "src"]], + ) + + # Verify we recieved either a valid key or edits with valid keys when receiving a src file. + # A valid key being not None or not ''. + if module.params['src'] is not None: + key_error = False + edit_error = False + + if module.params['key'] is None: + key_error = True + + if module.params['edits'] in [None, []]: + edit_error = True + + else: + for edit in module.params['edits']: + if edit.get('key') in [None, '']: + edit_error = True + break + + if key_error and edit_error: + return module.fail_json(failed=True, msg='Empty value for parameter key not allowed.') + + rval = json_roundtrip_clean(Yedit.run_ansible(module.params)) + if 'failed' in rval and rval['failed']: + return module.fail_json(**rval) + + return module.exit_json(**rval) + + +if __name__ == '__main__': + main() diff --git a/roles/kwoodson.yedit/meta/.galaxy_install_info b/roles/kwoodson.yedit/meta/.galaxy_install_info new file mode 100644 index 00000000..150df6ba --- /dev/null +++ b/roles/kwoodson.yedit/meta/.galaxy_install_info @@ -0,0 +1,2 @@ +install_date: Wed Mar 23 18:12:11 2022 +version: master diff --git a/roles/kwoodson.yedit/meta/main.yml b/roles/kwoodson.yedit/meta/main.yml new file mode 100644 index 00000000..3369467f --- /dev/null +++ b/roles/kwoodson.yedit/meta/main.yml @@ -0,0 +1,18 @@ +--- +galaxy_info: + author: Kenny Woodson + description: A yaml/json editing module for idempotent edits. + company: Red Hat, Inc. + license: Apache License, Version 2.0 + min_ansible_version: 1.8 + platforms: + - name: EL + versions: + - 7 + galaxy_tags: + - json + - yaml + - edit + - replace + - sed +dependencies: [] diff --git a/roles/kwoodson.yedit/tasks/main.yml b/roles/kwoodson.yedit/tasks/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/roles/kwoodson.yedit/tasks/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/kwoodson.yedit/tests/integration/.gitignore b/roles/kwoodson.yedit/tests/integration/.gitignore new file mode 100644 index 00000000..8bd2a935 --- /dev/null +++ b/roles/kwoodson.yedit/tests/integration/.gitignore @@ -0,0 +1,3 @@ +*.retry +*.orig +*.2\d\d\d* diff --git a/roles/kwoodson.yedit/tests/integration/files/kube-manager.yaml b/roles/kwoodson.yedit/tests/integration/files/kube-manager.yaml new file mode 100644 index 00000000..6f4b9e6d --- /dev/null +++ b/roles/kwoodson.yedit/tests/integration/files/kube-manager.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-controller-manager + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-controller-manager + image: openshift/kube:v1.0.0 + command: + - /hyperkube + - controller-manager + - --master=http://127.0.0.1:8080 + - --leader-elect=true + - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem + - --root-ca-file=/etc/kubernetes/ssl/ca.pem + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10252 + initialDelaySeconds: 15 + timeoutSeconds: 1 + volumeMounts: + - mountPath: /etc/kubernetes/ssl + name: ssl-certs-kubernetes + readOnly: true + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + volumes: + - hostPath: + path: /etc/kubernetes/ssl + name: ssl-certs-kubernetes + - hostPath: + path: /usr/share/ca-certificates + name: ssl-certs-host diff --git a/roles/kwoodson.yedit/tests/integration/files/playbook.yml b/roles/kwoodson.yedit/tests/integration/files/playbook.yml new file mode 100644 index 00000000..4d7d8692 --- /dev/null +++ b/roles/kwoodson.yedit/tests/integration/files/playbook.yml @@ -0,0 +1,5 @@ +- hosts: master + tasks: + - debug: var=foo + vars: + foo: bar diff --git a/roles/kwoodson.yedit/tests/integration/yedit.yml b/roles/kwoodson.yedit/tests/integration/yedit.yml new file mode 100755 index 00000000..50d7ad76 --- /dev/null +++ b/roles/kwoodson.yedit/tests/integration/yedit.yml @@ -0,0 +1,347 @@ +#!/usr/bin/ansible-playbook --module-path=../../../library/ +# +# Yedit test so that we can quickly determine if features are working +# Ensure that the kube-manager.yaml file exists +# +# ./yedit_test.yml +# +--- +- hosts: localhost + gather_facts: no + vars: + test_file: kube-manager-test.yaml + test: test +# strategy: debug + + post_tasks: + - name: copy the kube-manager.yaml file so that we have a pristine copy each time + copy: + src: kube-manager.yaml + dest: "./{{ test_file }}" + changed_when: False + + ####### add key to top level ##### + - name: add a key at the top level + yedit: + src: "{{ test_file }}" + key: yedittest + value: yedittest + + - name: retrieve the inserted key + yedit: + src: "{{ test_file }}" + state: list + key: yedittest + register: results + + - name: Assert that key is at top level + assert: + that: results.result == 'yedittest' + msg: 'Test: add a key to top level failed. yedittest != [{{ results.result }}]' + ###### end add key to top level ##### + + ###### modify multilevel key, value ##### + - name: modify multilevel key, value + yedit: + src: "{{ test_file }}" + key: metadata-namespace + value: openshift-is-awesome + separator: '-' + + - name: retrieve the inserted key + yedit: + src: "{{ test_file }}" + state: list + key: metadata-namespace + separator: '-' + register: results + + - name: Assert that key is as expected + assert: + that: results.result == 'openshift-is-awesome' + msg: 'Test: multilevel key, value modification: openshift-is-awesome != [{{ results.result }}]' + ###### end modify multilevel key, value ##### + + ###### test a string boolean ##### + - name: test a string boolean + yedit: + src: "{{ test_file }}" + key: spec.containers[0].volumeMounts[1].readOnly + value: 'true' + value_type: str + + - name: retrieve the inserted key + yedit: + src: "{{ test_file }}" + state: list + key: spec.containers[0].volumeMounts[1].readOnly + register: results + + - name: Assert that key is a string + assert: + that: results.result == "true" + msg: "Test: boolean str: 'true' != [{{ results.result }}]" + + - name: Assert that key is not bool + assert: + that: results.result != true + msg: "Test: boolean str: true != [{{ results.result }}]" + ###### end test boolean string ##### + + ###### test array append ##### + - name: test array append + yedit: + src: "{{ test_file }}" + key: spec.containers[0].command + value: --my-new-parameter=openshift + append: True + + - name: retrieve the array + yedit: + src: "{{ test_file }}" + state: list + key: spec.containers[0].command + register: results + + - name: Assert that the last element in array is our value + assert: + that: results.result[-1] == "--my-new-parameter=openshift" + msg: "Test: '--my-new-parameter=openshift' != [{{ results.result[-1] }}]" + ###### end test array append ##### + + ###### test non-existing array append ##### + - name: test array append to non-existing key + yedit: + src: "{{ test_file }}" + key: nonexistingkey + value: --my-new-parameter=openshift + append: True + + - name: retrieve the array + yedit: + src: "{{ test_file }}" + state: list + key: nonexistingkey + register: results + + - name: Assert that the last element in array is our value + assert: + that: results.result[-1] == "--my-new-parameter=openshift" + msg: "Test: '--my-new-parameter=openshift' != [{{ results.result[-1] }}]" + ###### end test non-existing array append ##### + + ###### test array update modify ##### + - name: test array update modify + yedit: + src: "{{ test_file }}" + key: spec.containers[0].command + value: --root-ca-file=/etc/k8s/ssl/my.pem + curr_value: --root-ca-file=/etc/kubernetes/ssl/ca.pem + curr_value_format: str + update: True + + - name: retrieve the array + yedit: + src: "{{ test_file }}" + state: list + key: spec.containers[0].command + register: results + + - name: Assert that the element in array is our value + assert: + that: results.result[5] == "--root-ca-file=/etc/k8s/ssl/my.pem" + msg: "Test: '--root-ca-file=/etc/k8s/ssl/my.pem' != [{{ results.result[5] }}]" + ###### end test array update modify##### + + ###### test dict create ##### + - name: test dict create + yedit: + src: "{{ test_file }}" + key: a.b.c + value: d + + - name: retrieve the key + yedit: + src: "{{ test_file }}" + state: list + key: a.b.c + register: results + + - name: Assert that the key was created + assert: + that: results.result == "d" + msg: "Test: 'd' != [{{ results.result }}]" + ###### end test dict create ##### + + ###### test create dict value ##### + - name: test create dict value + yedit: + src: "{{ test_file }}" + key: e.f.g + value: + h: + i: + j: k + + - name: retrieve the key + yedit: + src: "{{ test_file }}" + state: list + key: e.f.g.h.i.j + register: results + + - name: Assert that the key was created + assert: + that: results.result == "k" + msg: "Test: 'k' != [{{ results.result }}]" + ###### end test dict create ##### + + ###### test create list value ##### + - name: test create list value + yedit: + src: "{{ test_file }}" + key: z.x.y + value: + - 1 + - 2 + - 3 + + - name: retrieve the key + yedit: + src: "{{ test_file }}" + state: list + key: z#x#y + separator: '#' + register: results + - debug: var=results + + - name: Assert that the key was created + assert: + that: results.result == [1, 2, 3] + msg: "Test: '[1, 2, 3]' != [{{ results.result }}]" + ###### end test create list value ##### + + ###### test create multiple list value ##### + - name: test multiple edits + yedit: + src: "{{ test_file }}" + edits: + - key: z.x.y + value: + - 1 + - 2 + - 3 + - key: z.x.y + value: 4 + action: append + + - name: retrieve the key + yedit: + src: "{{ test_file }}" + state: list + key: z#x#y + separator: '#' + register: results + - debug: var=results + + - name: Assert that the key was created + assert: + that: results.result == [1, 2, 3, 4] + msg: "Test: '[1, 2, 3, 4]' != [{{ results.result }}]" + ###### end test create multiple list value ##### + + ###### test state absent on list item ##### + - name: test state absent on a list item + yedit: + state: absent + content: ['oranges', 'apples'] + value: apples + register: absentout + + - debug: var=absentout + - assert: + that: + - absentout.result == ['oranges'] + - absentout.result|length == 1 + + - name: test state absent on a list item + yedit: + state: absent + content: ['oranges', 'apples'] + index: 1 + register: absentout + + - debug: var=absentout + - assert: + that: + - absentout.result == ['oranges'] + - absentout.result|length == 1 + ###### end test state absent on list item #### + + + ###### test backup ext on filename ##### + - name: test backup ext on file + yedit: + state: present + src: "{{ test_file }}" + key: test + backup: true + backup_ext: .orig + value: "{{ lookup('pipe', 'date +%s') }}" + + - name: second call which should be noop + yedit: + state: present + src: "{{ test_file }}" + key: test + backup: true + value: "{{ lookup('pipe', 'date +%s') }}" + + - name: stat file + stat: + path: "{{ test_file }}.orig" + register: statout + + - assert: + that: + - statout.stat.exists + + - name: set the date string for backup_ext + set_fact: + date_str: "{{ lookup('pipe', 'date +%s') }}" + + - yedit: + state: present + src: "{{ test_file }}" + key: test + backup: true + backup_ext: "{{ date_str }}" + value: "{{ lookup('pipe', 'date +%s') }}" + + - name: stat file + stat: + path: "{{ test_file }}{{ date_str }}" + register: statout + + - assert: + that: + - statout.stat.exists + ###### end test state absent on list item #### + + ###### test append to top level + - name: test append to top level + yedit: + src: files/playbook.yml + edits: + - key: "[1]" + value: + hosts: masters[1] + tasks: + - debug: msg=hello + register: addedpb + + - debug: var=addedpb + - assert: + that: + - addedpb.result[0].edit[1].hosts == "masters[1]" + ###### end test append to base list diff --git a/roles/kwoodson.yedit/tests/main.yml b/roles/kwoodson.yedit/tests/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/roles/kwoodson.yedit/tests/main.yml @@ -0,0 +1 @@ +--- diff --git a/roles/kwoodson.yedit/tests/test.py b/roles/kwoodson.yedit/tests/test.py new file mode 100755 index 00000000..716327b4 --- /dev/null +++ b/roles/kwoodson.yedit/tests/test.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python +import sys +import os + +# setup import path for yedit +sys.path.append(os.path.join(os.path.realpath('.'), '../library')) +import yedit + + +# perform simple test from README.md +yedit = yedit.Yedit('./pytest.yml') +results = yedit.put('a#b#c', {'d': {'e': {'f': "this is a test"}}}) +print results +#yedit.write() diff --git a/roles/kwoodson.yedit/tests/unit/.gitignore b/roles/kwoodson.yedit/tests/unit/.gitignore new file mode 100644 index 00000000..0d20b648 --- /dev/null +++ b/roles/kwoodson.yedit/tests/unit/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/roles/kwoodson.yedit/tests/unit/test_yedit.py b/roles/kwoodson.yedit/tests/unit/test_yedit.py new file mode 100755 index 00000000..a567548a --- /dev/null +++ b/roles/kwoodson.yedit/tests/unit/test_yedit.py @@ -0,0 +1,377 @@ +''' + Unit tests for yedit +''' + +import os +import sys +import unittest +import mock + +# Removing invalid variable names for tests so that I can +# keep them brief +# pylint: disable=invalid-name,no-name-in-module +# Disable import-error b/c our libraries aren't loaded in jenkins +# pylint: disable=import-error +# place yedit in our path +yedit_path = os.path.join(os.path.realpath('.'), '../../library') # noqa: E501 +sys.path.insert(0, yedit_path) + +from yedit import Yedit, YeditException # noqa: E402 + +# pylint: disable=too-many-public-methods +# Silly pylint, moar tests! + + +class YeditTest(unittest.TestCase): + ''' + Test class for yedit + ''' + data = {'a': 'a', + 'b': {'c': {'d': [{'e': 'x'}, 'f', 'g']}}, + } # noqa: E124 + + filename = 'yedit_test.yml' + + def setUp(self): + ''' setup method will create a file and set to known configuration ''' + yed = Yedit(YeditTest.filename) + yed.yaml_dict = YeditTest.data + yed.write() + + def test_load(self): + ''' Testing a get ''' + yed = Yedit('yedit_test.yml') + self.assertEqual(yed.yaml_dict, self.data) + + def test_write(self): + ''' Testing a simple write ''' + yed = Yedit('yedit_test.yml') + yed.put('key1', 1) + yed.write() + self.assertTrue('key1' in yed.yaml_dict) + self.assertEqual(yed.yaml_dict['key1'], 1) + + def test_write_x_y_z(self): + '''Testing a write of multilayer key''' + yed = Yedit('yedit_test.yml') + yed.put('x.y.z', 'modified') + yed.write() + yed.load() + self.assertEqual(yed.get('x.y.z'), 'modified') + + def test_delete_a(self): + '''Testing a simple delete ''' + yed = Yedit('yedit_test.yml') + yed.delete('a') + yed.write() + yed.load() + self.assertTrue('a' not in yed.yaml_dict) + + def test_delete_b_c(self): + '''Testing delete of layered key ''' + yed = Yedit('yedit_test.yml', separator=':') + yed.delete('b:c') + yed.write() + yed.load() + self.assertTrue('b' in yed.yaml_dict) + self.assertFalse('c' in yed.yaml_dict['b']) + + def test_create(self): + '''Testing a create ''' + os.unlink(YeditTest.filename) + yed = Yedit('yedit_test.yml') + yed.create('foo', 'bar') + yed.write() + yed.load() + self.assertTrue('foo' in yed.yaml_dict) + self.assertTrue(yed.yaml_dict['foo'] == 'bar') + + def test_create_content(self): + '''Testing a create with content ''' + content = {"foo": "bar"} + yed = Yedit("yedit_test.yml", content) + yed.write() + yed.load() + self.assertTrue('foo' in yed.yaml_dict) + self.assertTrue(yed.yaml_dict['foo'], 'bar') + + def test_array_insert(self): + '''Testing a create with content ''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('b:c:d[0]', 'inject') + self.assertTrue(yed.get('b:c:d[0]') == 'inject') + + def test_array_insert_first_index(self): + '''Testing a create with content ''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('b:c:d[0]', 'inject') + self.assertTrue(yed.get('b:c:d[1]') == 'f') + + def test_array_insert_second_index(self): + '''Testing a create with content ''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('b:c:d[0]', 'inject') + self.assertTrue(yed.get('b:c:d[2]') == 'g') + + def test_dict_array_dict_access(self): + '''Testing a create with content''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}]) + self.assertTrue(yed.get('b:c:d[0]:[0]:x:y') == 'inject') + + def test_dict_array_dict_replace(self): + '''Testing multilevel delete''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}]) + yed.put('b:c:d[0]:[0]:x:y', 'testing') + self.assertTrue('b' in yed.yaml_dict) + self.assertTrue('c' in yed.yaml_dict['b']) + self.assertTrue('d' in yed.yaml_dict['b']['c']) + self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list)) + self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list)) + self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict)) + self.assertTrue('y' in yed.yaml_dict['b']['c']['d'][0][0]['x']) + self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x']['y'] == 'testing') # noqa: E501 + + def test_dict_array_dict_remove(self): + '''Testing multilevel delete''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}]) + yed.delete('b:c:d[0]:[0]:x:y') + self.assertTrue('b' in yed.yaml_dict) + self.assertTrue('c' in yed.yaml_dict['b']) + self.assertTrue('d' in yed.yaml_dict['b']['c']) + self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list)) + self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list)) + self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict)) + self.assertFalse('y' in yed.yaml_dict['b']['c']['d'][0][0]['x']) + + def test_key_exists_in_dict(self): + '''Testing exist in dict''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}]) + self.assertTrue(yed.exists('b:c', 'd')) + + def test_key_exists_in_list(self): + '''Testing exist in list''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}]) + self.assertTrue(yed.exists('b:c:d', [{'x': {'y': 'inject'}}])) + self.assertFalse(yed.exists('b:c:d', [{'x': {'y': 'test'}}])) + + def test_update_to_list_with_index(self): + '''Testing update to list with index''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('x:y:z', [1, 2, 3]) + yed.update('x:y:z', [5, 6], index=2) + self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]]) + self.assertTrue(yed.exists('x:y:z', [5, 6])) + self.assertFalse(yed.exists('x:y:z', 4)) + + def test_update_to_list_with_curr_value(self): + '''Testing update to list with index''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('x:y:z', [1, 2, 3]) + yed.update('x:y:z', [5, 6], curr_value=3) + self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]]) + self.assertTrue(yed.exists('x:y:z', [5, 6])) + self.assertFalse(yed.exists('x:y:z', 4)) + + def test_update_to_list(self): + '''Testing update to list''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('x:y:z', [1, 2, 3]) + yed.update('x:y:z', [5, 6]) + self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6]]) + self.assertTrue(yed.exists('x:y:z', [5, 6])) + self.assertFalse(yed.exists('x:y:z', 4)) + + def test_append_twice_to_list(self): + '''Testing append to list''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('x:y:z', [1, 2, 3]) + yed.append('x:y:z', [5, 6]) + yed.append('x:y:z', [5, 6]) + self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6], [5, 6]]) + self.assertFalse(yed.exists('x:y:z', 4)) + + def test_add_item_to_dict(self): + '''Testing update to dict''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('x:y:z', {'a': 1, 'b': 2}) + yed.update('x:y:z', {'c': 3, 'd': 4}) + self.assertTrue(yed.get('x:y:z') == {'a': 1, 'b': 2, 'c': 3, 'd': 4}) + self.assertTrue(yed.exists('x:y:z', {'c': 3})) + + def test_first_level_dict_with_none_value(self): + '''test dict value with none value''' + yed = Yedit(content={'a': None}, separator=":") + yed.put('a:b:c', 'test') + self.assertTrue(yed.get('a:b:c') == 'test') + self.assertTrue(yed.get('a:b'), {'c': 'test'}) + + def test_adding_yaml_variable(self): + '''test dict value with none value''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('z:y', '{{test}}') + self.assertTrue(yed.get('z:y') == '{{test}}') + + def test_keys_with_underscore(self): + '''test dict value with none value''' + yed = Yedit("yedit_test.yml", separator=':') + yed.put('z_:y_y', {'test': '{{test}}'}) + self.assertTrue(yed.get('z_:y_y') == {'test': '{{test}}'}) + + def test_first_level_array_update(self): + '''test update on top level array''' + yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':') + yed.update('', {'c': 4}) + self.assertTrue({'c': 4} in yed.get('')) + + def test_first_level_array_delete(self): + '''test remove top level key''' + yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}]) + yed.delete('') + self.assertTrue({'b': 3} not in yed.get('')) + + def test_first_level_array_get(self): + '''test dict value with none value''' + yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}]) + yed.get('') + self.assertTrue([{'a': 1}, {'b': 2}, {'b': 3}] == yed.yaml_dict) + + def test_pop_list_item(self): + '''test dict value with none value''' + yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':') + yed.pop('', {'b': 2}) + self.assertTrue([{'a': 1}, {'b': 3}] == yed.yaml_dict) + + def test_pop_list_item_2(self): + '''test dict value with none value''' + z = list(range(10)) + yed = Yedit(content=z, separator=':') + yed.pop('', 5) + z.pop(5) + self.assertTrue(z == yed.yaml_dict) + + def test_pop_dict_key(self): + '''test dict value with none value''' + yed = Yedit(content={'a': {'b': {'c': 1, 'd': 2}}}, separator='#') + yed.pop('a#b', 'c') + self.assertTrue({'a': {'b': {'d': 2}}} == yed.yaml_dict) + + def test_accessing_path_with_unexpected_objects(self): + '''test providing source path objects that differ from current object state''' + yed = Yedit(content={'a': {'b': {'c': ['d', 'e']}}}) + with self.assertRaises(YeditException): + yed.put('a.b.c.d', 'x') + + def test_creating_new_objects_with_embedded_list(self): + '''test creating new objects with an embedded list in the creation path''' + yed = Yedit(content={'a': {'b': 12}}) + with self.assertRaises(YeditException): + yed.put('new.stuff[0].here', 'value') + + def test_creating_new_objects_with_trailing_list(self): + '''test creating new object(s) where the final piece is a list''' + yed = Yedit(content={'a': {'b': 12}}) + with self.assertRaises(YeditException): + yed.put('new.stuff.here[0]', 'item') + + def test_empty_key_with_int_value(self): + '''test editing top level with not list or dict''' + yed = Yedit(content={'a': {'b': 12}}) + result = yed.put('', 'b') + self.assertFalse(result[0]) + + def test_setting_separator(self): + '''test editing top level with not list or dict''' + yed = Yedit(content={'a': {'b': 12}}) + yed.separator = ':' + self.assertEqual(yed.separator, ':') + + def test_remove_all(self): + '''test removing all data''' + data = Yedit.remove_entry({'a': {'b': 12}}, '') + self.assertTrue(data) + + def test_remove_list_entry(self): + '''test removing list entry''' + data = {'a': {'b': [{'c': 3}]}} + results = Yedit.remove_entry(data, 'a.b[0]') + self.assertTrue(results) + self.assertTrue(data, {'a': {'b': []}}) + + def test_append_base_list(self): + '''test removing list entry''' + content = [{'a': {'b': [{'c': 3}]}}] + yed = Yedit(content=content) + yed.put("[1]", [{"next": "something"}]) + self.assertTrue(yed.yaml_dict, [{'a': {'b': [{'c': 3}]}}, {"next": "something"}]) + + def test_parse_value_string_true(self): + '''test parse_value''' + results = Yedit.parse_value('true', 'str') + self.assertEqual(results, 'true') + + def test_parse_value_bool_true(self): + '''test parse_value''' + results = Yedit.parse_value('true', 'bool') + self.assertTrue(results) + + def test_parse_value_bool_exception(self): + '''test parse_value''' + with self.assertRaises(YeditException): + Yedit.parse_value('TTT', 'bool') + + @mock.patch('yedit.Yedit.write') + def test_run_ansible_basic(self, mock_write): + '''test parse_value''' + params = { + 'src': None, + 'backup': False, + 'backup_ext': '', + 'separator': '.', + 'state': 'present', + 'edits': [], + 'value': None, + 'key': None, + 'content': {'a': {'b': {'c': 1}}}, + 'content_type': '', + } + + results = Yedit.run_ansible(params) + + mock_write.side_effect = [ + (True, params['content']), + ] + + self.assertFalse(results['changed']) + + @mock.patch('yedit.Yedit.write') + def test_run_ansible_and_write(self, mock_write): + '''test parse_value''' + params = { + 'src': '/tmp/test', + 'backup': False, + 'backup_ext': '', + 'separator': '.', + 'state': 'present', + 'edits': [], + 'value': None, + 'key': None, + 'content': {'a': {'b': {'c': 1}}}, + 'content_type': '', + } + + results = Yedit.run_ansible(params) + + mock_write.side_effect = [ + (True, params['content']), + ] + + self.assertTrue(results['changed']) + + def tearDown(self): + '''TearDown method''' + os.unlink(YeditTest.filename) diff --git a/roles/kwoodson.yedit/vars/main.yml b/roles/kwoodson.yedit/vars/main.yml new file mode 100644 index 00000000..ed97d539 --- /dev/null +++ b/roles/kwoodson.yedit/vars/main.yml @@ -0,0 +1 @@ +--- From 920dcfb8857752112030ec72d6d4dbe3c2b695c9 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 01:51:12 -0700 Subject: [PATCH 617/885] Reworked setup playbook Signed-off-by: Jacob Emery --- roles/set_inventory/tasks/main.yaml | 76 ++++------------------------- 1 file changed, 10 insertions(+), 66 deletions(-) diff --git a/roles/set_inventory/tasks/main.yaml b/roles/set_inventory/tasks/main.yaml index 751d9687..c94e93d4 100644 --- a/roles/set_inventory/tasks/main.yaml +++ b/roles/set_inventory/tasks/main.yaml @@ -1,89 +1,33 @@ --- -- name: Load in variables from env.yaml - tags: setup - include_vars: env.yaml - - name: Populate inventory with KVM host, bastion and bootstrap IP addresses - tags: setup + tags: set_inventory blockinfile: - path: inventory + path: "{{ inventory_file }}" marker: "#{mark} ansible managed block from set_inventory role" marker_begin: "start of" marker_end: "end of" block: | + [ftp] + {{ vault.ftp.ip }} ansible_user={{ env.ftp.user }} + [kvm_host] - {{env.ip.kvm}} ansible_become_password={{env.access.login.kvm.sudo_pass}} + {{ env.z.lpar.networking.hostname }} ansible_host={{vault.z.lpar.networking.ip}} ansible_user={{env.z.lpar.access.user}} ansible_become_password={{vault.z.lpar.access.pass}} [bastion] - {{env.ip.bastion}} ansible_become_password={{env.access.login.bastion.sudo_pass}} - - [bootstrap] - {{env.ip.bootstrap}} - - [control_nodes] - - [compute_nodes] + {{env.bastion.networking.hostname}} ansible_host={{vault.bastion.networking.ip}} ansible_user={{env.bastion.access.user}} ansible_become_password={{vault.bastion.access.pass}} state: present -- name: Add control nodes' IP addresses to inventory - tags: setup - lineinfile: - path: inventory - insertafter: "control_nodes" - line: "{{ item }}" - loop: "{{env.ip.control}}" - -- name: Add compute nodes' IP addresses to inventory - tags: setup - lineinfile: - path: inventory - insertafter: "compute_nodes" - line: "{{ item }}" - loop: "{{env.ip.compute}}" - -- name: Add infrastructure nodes group to inventory if set - tags: setup - lineinfile: - path: inventory - line: "[infra]" - when: env.ip.infra is defined - -- name: Add infrastructure nodes' IP addresses to inventory - tags: setup - lineinfile: - path: inventory - insertafter: "infra" - line: "{{ item }}" - loop: "{{env.ip.infra}}" - when: env.ip.infra is defined - -- name: Add extra RHEL VM apps group to inventory if set - tags: setup - lineinfile: - path: inventory - line: "[app]" - when: env.ip.app is defined - -- name: Add extra RHEL VM apps' IP addresses to inventory - tags: setup - lineinfile: - path: inventory - insertafter: "app" - line: "{{ item }} ansible_become_password={{env.access.login.app.sudo_pass}}" - loop: "{{env.ip.app}}" - when: env.ip.app is defined - - name: check inventory setup - tags: setup + tags: set_inventory command: ansible-inventory --list register: inv_check failed_when: inv_check.rc != 0 - name: Gather facts to re-read inventory after changes made to inventory - tags: setup + tags: set_inventory ansible.builtin.gather_facts: - name: Refresh inventory - tags: setup + tags: set_inventory meta: refresh_inventory \ No newline at end of file From b4de717473174386fb2520a11b8adc349de5a377 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 01:52:25 -0700 Subject: [PATCH 618/885] Changed variable names to match new structure Signed-off-by: Jacob Emery --- roles/approve_certs/tasks/main.yaml | 22 ++--- roles/attach_subscription/tasks/main.yaml | 2 +- roles/check_dns/tasks/main.yaml | 63 ++++++-------- roles/check_ssh/tasks/main.yaml | 12 --- roles/create_bootstrap/tasks/main.yaml | 24 +++--- roles/create_compute_nodes/tasks/main.yaml | 38 ++++----- roles/create_control_nodes/tasks/main.yaml | 20 ++--- roles/dns/tasks/main.yaml | 85 +++++++------------ roles/dns/templates/dns-named.conf.j2 | 8 +- roles/dns/templates/dns.db.j2 | 17 ++-- roles/dns/templates/dns.rev.j2 | 14 +-- roles/get_ocp/tasks/main.yaml | 46 +++++----- .../get_ocp/templates/install-config.yaml.j2 | 10 +-- roles/haproxy/tasks/main.yaml | 34 ++++---- roles/haproxy/templates/haproxy.cfg.j2 | 8 +- roles/macvtap/tasks/main.yaml | 19 +++-- roles/macvtap/templates/macvtap.xml.j2 | 4 +- roles/prep_kvm_guests/tasks/main.yaml | 14 ++- roles/reset_files/tasks/main.yaml | 4 +- roles/set_firewall/tasks/main.yaml | 6 ++ roles/ssh_copy_id/tasks/main.yaml | 14 ++- roles/ssh_copy_id/vars/path_to_key_pair.yaml | 2 - roles/ssh_key_gen/tasks/main.yaml | 14 +-- roles/ssh_ocp_key_gen/tasks/main.yaml | 35 ++++---- roles/teardown_vms/tasks/main.yaml | 36 ++++---- roles/wait_for_bootstrap/tasks/main.yaml | 14 +-- .../wait_for_install_complete/tasks/main.yaml | 6 +- 27 files changed, 244 insertions(+), 327 deletions(-) delete mode 100644 roles/check_ssh/tasks/main.yaml delete mode 100644 roles/ssh_copy_id/vars/path_to_key_pair.yaml diff --git a/roles/approve_certs/tasks/main.yaml b/roles/approve_certs/tasks/main.yaml index 1c0b8b02..d8bc3f0c 100644 --- a/roles/approve_certs/tasks/main.yaml +++ b/roles/approve_certs/tasks/main.yaml @@ -3,7 +3,7 @@ - name: Approving all pending certificates tags: approve_certs shell: | - /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + /home/{{ env.bastion.access.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.bastion.access.user }}/ocpinst/oc adm certificate approve register: csr_approved ignore_errors: true @@ -20,7 +20,7 @@ - name: Approving all pending certificates tags: approve_certs shell: | - /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + /home/{{ env.bastion.access.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.bastion.access.user }}/ocpinst/oc adm certificate approve register: csr_approved ignore_errors: true @@ -37,7 +37,7 @@ - name: Approving all pending certificates tags: approve_certs shell: | - /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + /home/{{ env.bastion.access.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.bastion.access.user }}/ocpinst/oc adm certificate approve register: csr_approved ignore_errors: true @@ -54,7 +54,7 @@ - name: Approving all pending certificates tags: approve_certs shell: | - /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + /home/{{ env.bastion.access.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.bastion.access.user }}/ocpinst/oc adm certificate approve register: csr_approved ignore_errors: true @@ -71,7 +71,7 @@ - name: Approving all pending certificates tags: approve_certs shell: | - /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + /home/{{ env.bastion.access.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.bastion.access.user }}/ocpinst/oc adm certificate approve register: csr_approved ignore_errors: true @@ -88,7 +88,7 @@ - name: Approving all pending certificates tags: approve_certs shell: | - /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + /home/{{ env.bastion.access.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.bastion.access.user }}/ocpinst/oc adm certificate approve register: csr_approved ignore_errors: true @@ -105,7 +105,7 @@ - name: Approving all pending certificates tags: approve_certs shell: | - /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + /home/{{ env.bastion.access.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.bastion.access.user }}/ocpinst/oc adm certificate approve register: csr_approved ignore_errors: true @@ -122,7 +122,7 @@ - name: Approving all pending certificates tags: approve_certs shell: | - /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + /home/{{ env.bastion.access.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.bastion.access.user }}/ocpinst/oc adm certificate approve register: csr_approved ignore_errors: true @@ -139,7 +139,7 @@ - name: Approving all pending certificates tags: approve_certs shell: | - /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + /home/{{ env.bastion.access.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.bastion.access.user }}/ocpinst/oc adm certificate approve register: csr_approved ignore_errors: true @@ -156,7 +156,7 @@ - name: Approving all pending certificates tags: approve_certs shell: | - /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + /home/{{ env.bastion.access.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.bastion.access.user }}/ocpinst/oc adm certificate approve register: csr_approved ignore_errors: true @@ -173,7 +173,7 @@ - name: Approving all pending certificates tags: approve_certs shell: | - /home/{{ env.access.login.bastion.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.access.login.bastion.user }}/ocpinst/oc adm certificate approve + /home/{{ env.bastion.access.user }}/ocpinst/oc get csr | awk '{print $1'} | grep -v NAME | xargs /home/{{ env.bastion.access.user }}/ocpinst/oc adm certificate approve register: csr_approved ignore_errors: true diff --git a/roles/attach_subscription/tasks/main.yaml b/roles/attach_subscription/tasks/main.yaml index 05946d12..f09aebd0 100644 --- a/roles/attach_subscription/tasks/main.yaml +++ b/roles/attach_subscription/tasks/main.yaml @@ -5,6 +5,6 @@ community.general.redhat_subscription: state: present username: "{{env.redhat.username}}" - password: "{{env.redhat.password}}" + password: "{{vault.redhat.password}}" auto_attach: yes force_register: yes \ No newline at end of file diff --git a/roles/check_dns/tasks/main.yaml b/roles/check_dns/tasks/main.yaml index 169c1bbc..cb583728 100644 --- a/roles/check_dns/tasks/main.yaml +++ b/roles/check_dns/tasks/main.yaml @@ -1,75 +1,60 @@ --- -- name: Load in variables from env.yaml - tags: check_dns,dns - include_vars: env.yaml - - name: Check internal cluster DNS resolution for bastion and its services - tags: check_dns,dns + tags: check_dns shell: "dig +short {{ item }} | tail -n1" loop: - - "{{ env.hostname.bastion }}.{{ env.networking.base_domain }}" - - "api.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}" - - "api-int.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}" - - "test.apps.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}" + - "{{ env.bastion.networking.hostname }}.{{ env.bastion.networking.base_domain }}" + - "api.{{ env.cluster.networking.metadata_name }}.{{ env.bastion.networking.base_domain }}" + - "api-int.{{ env.cluster.networking.metadata_name }}.{{ env.bastion.networking.base_domain }}" + - "test.apps.{{ env.cluster.networking.metadata_name }}.{{ env.bastion.networking.base_domain }}" register: bastion_lookup - failed_when: env.ip.bastion != bastion_lookup.stdout + failed_when: vault.bastion.networking.ip != bastion_lookup.stdout - name: Check internal cluster DNS resolution for bootstrap - tags: check_dns,dns - shell: "dig +short {{ env.hostname.bootstrap }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }} | tail -n1" + tags: check_dns + shell: "dig +short {{ env.cluster.nodes.bootstrap.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} | tail -n1" register: bootstrap_lookup - failed_when: env.ip.bootstrap != bootstrap_lookup.stdout + failed_when: env.cluster.nodes.bootstrap.ip != bootstrap_lookup.stdout - name: Print results from bootstrap lookup - tags: check_dns, dns + tags: check_dns debug: var: bootstrap_lookup.stdout - name: Check control nodes DNS resolution - tags: check_dns,dns - shell: "dig +short {{ env.hostname.control[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }} | tail -n1" + tags: check_dns + shell: "dig +short {{ env.cluster.nodes.control.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} | tail -n1" register: control_lookup - failed_when: env.ip.control[i] != control_lookup.stdout - with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 + failed_when: env.cluster.nodes.control.ip[i] != control_lookup.stdout + with_sequence: start=0 end={{(env.cluster.nodes.control.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i - name: Check compute nodes DNS resolution - tags: check_dns,dns - shell: "dig +short {{ env.hostname.compute[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }} | tail -n1" + tags: check_dns + shell: "dig +short {{ env.cluster.nodes.compute.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} | tail -n1" register: compute_lookup - failed_when: env.ip.compute[i] != compute_lookup.stdout - with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 + failed_when: env.cluster.nodes.compute.ip[i] != compute_lookup.stdout + with_sequence: start=0 end={{(env.cluster.nodes.compute.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i - name: Check infrastructure nodes DNS resolution - tags: check_dns,dns - shell: "dig +short {{ env.hostname.infra[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }} | tail -n1" + tags: check_dns + shell: "dig +short {{ env.cluster.nodes.infra.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} | tail -n1" register: infra_lookup - failed_when: env.ip.infra[i] != infra_lookup.stdout - with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 - loop_control: - extended: yes - index_var: i - when: env.hostname.infra is defined - -- name: Check extra RHEL VM apps DNS resolution - tags: check_dns,dns - shell: "dig +short {{ env.hostname.app[i] }}.{{ env.networking.base_domain }} | tail -n1" - register: app_lookup - failed_when: env.ip.app[i] != app_lookup.stdout - with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 + failed_when: env.cluster.nodes.infra.ip[i] != infra_lookup.stdout + with_sequence: start=0 end={{(env.cluster.nodes.infra.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i - when: env.hostname.app is defined + when: env.cluster.nodes.infra.hostname is defined - name: Check external DNS resolution from forwarder - tags: check_dns,dns + tags: check_dns register: external_dns_check failed_when: '"server can" in external_dns_check.stdout' command: "nslookup {{ item }}" diff --git a/roles/check_ssh/tasks/main.yaml b/roles/check_ssh/tasks/main.yaml deleted file mode 100644 index ebb2d22f..00000000 --- a/roles/check_ssh/tasks/main.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- - -- name: Check SSH to remote hosts works - tags: check_ssh, ssh - shell: "hostname; id" - register: ssh_connection_test - failed_when: ssh_connection_test.rc != 0 - -- name: Print the connectivity test results - tags: check_ssh, ssh - debug: - var: ssh_connection_test.stdout_lines \ No newline at end of file diff --git a/roles/create_bootstrap/tasks/main.yaml b/roles/create_bootstrap/tasks/main.yaml index eea8543e..3e561d7d 100644 --- a/roles/create_bootstrap/tasks/main.yaml +++ b/roles/create_bootstrap/tasks/main.yaml @@ -1,13 +1,9 @@ --- -- name: Load in variables from env.yaml - tags: create_bootstrap - include_vars: env.yaml - - name: Check if bootstrap already exists tags: create_bootstrap community.libvirt.virt: - name: "{{ env.hostname.bootstrap }}" + name: "{{ env.cluster.nodes.bootstrap.hostname }}" command: status register: bootstrap_check ignore_errors: true @@ -21,16 +17,16 @@ tags: create_bootstrap command: | virt-install \ - --name {{env.hostname.bootstrap}} \ - --disk /var/lib/libvirt/images/{{env.hostname.bootstrap}}-bootstrap.qcow2,size={{ env.node_resources.bootstrap.disk_size }} \ - --ram {{ env.node_resources.bootstrap.ram }} \ + --name {{env.cluster.nodes.bootstrap.hostname}} \ + --disk pool=default,size={{ env.cluster.nodes.bootstrap.disk_size }} \ + --ram {{ env.cluster.nodes.bootstrap.ram }} \ --cpu host \ - --vcpus {{ env.node_resources.bootstrap.vcpu }} \ + --vcpus {{ env.cluster.nodes.bootstrap.vcpu }} \ --os-type linux \ - --os-variant rhel{{ env.node_resources.bootstrap.os_variant }} \ + --os-variant rhel{{ env.cluster.nodes.bootstrap.os_variant }} \ --network network=macvtap-net \ - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.ip.bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.ip.bootstrap}}::{{env.networking.gateway}}:{{env.networking.netmask}}:{{env.hostname.bootstrap}}::none:1500 nameserver={{env.networking.dns.nameserver}} coreos.inst.ignition_url=http://{{env.ip.bastion}}:8080/ignition/bootstrap.ign" \ + --location {{ env.z.lpar.storage_group.pool_path }},kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{vault.bastion.networking.ip}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.cluster.nodes.bootstrap.ip}}::{{env.z.lpar.networking.gateway}}:{{env.z.lpar.networking.subnetmask}}:{{env.cluster.nodes.bootstrap.hostname}}::none:1500 nameserver={{env.cluster.networking.nameserver}} coreos.inst.ignition_url=http://{{vault.bastion.networking.ip}}:8080/ignition/bootstrap.ign" \ --graphics none \ --wait=-1 \ --noautoconsole @@ -39,9 +35,9 @@ - name: Set bootstrap qcow2 permissions become: true tags: create_bootstrap - command: chmod 600 /var/lib/libvirt/images/{{env.hostname.bootstrap}}-bootstrap.qcow2 + command: chmod 600 {{ env.z.lpar.storage_group.pool_path }}/{{env.cluster.nodes.bootstrap.hostname}}.qcow2 - name: Set bootstrap qcow2 ownership to qemu become: true tags: create_bootstrap - command: chown qemu:qemu /var/lib/libvirt/images/{{env.hostname.bootstrap}}-bootstrap.qcow2 \ No newline at end of file + command: chown qemu:qemu {{ env.z.lpar.storage_group.pool_path }}/{{env.cluster.nodes.bootstrap.hostname}}.qcow2 \ No newline at end of file diff --git a/roles/create_compute_nodes/tasks/main.yaml b/roles/create_compute_nodes/tasks/main.yaml index b87829d3..75a949d2 100644 --- a/roles/create_compute_nodes/tasks/main.yaml +++ b/roles/create_compute_nodes/tasks/main.yaml @@ -1,26 +1,22 @@ --- -- name: Load in variables from env.yaml - tags: create_compute_nodes - include_vars: env.yaml - - name: Install CoreOS on compute nodes tags: create_compute_nodes command: | virt-install \ - --name {{env.hostname.compute[i]}} \ - --disk size={{env.node_resources.compute.disk_size}} \ - --ram {{env.node_resources.compute.ram}} \ + --name {{env.cluster.nodes.compute.hostname[i]}} \ + --disk pool=default,size={{ env.cluster.nodes.compute.disk_size }} \ + --ram {{env.cluster.nodes.compute.ram}} \ --cpu host \ - --vcpus {{env.node_resources.compute.vcpu}} \ + --vcpus {{env.cluster.nodes.compute.vcpu}} \ --os-type linux \ - --os-variant rhel{{env.node_resources.compute.os_variant}} \ + --os-variant rhel{{env.cluster.nodes.compute.os_variant}} \ --network network=macvtap-net \ - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.ip.bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.ip.compute[i]}}::{{env.networking.gateway}}:{{env.networking.netmask}}:{{env.hostname.compute[i]}}::none:1500 nameserver={{env.networking.dns.nameserver}} coreos.inst.ignition_url=http://{{env.ip.bastion}}:8080/ignition/worker.ign" \ + --location {{ env.z.lpar.storage_group.pool_path }},kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{vault.bastion.networking.ip}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.cluster.nodes.compute.ip[i]}}::{{env.z.lpar.networking.gateway}}:{{env.z.lpar.networking.subnetmask}}:{{env.cluster.nodes.compute.hostname[i]}}::none:1500 nameserver={{env.cluster.networking.nameserver}} coreos.inst.ignition_url=http://{{vault.bastion.networking.ip}}:8080/ignition/worker.ign" \ --wait=-1 \ --noautoconsole - with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.compute.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -29,21 +25,21 @@ tags: create_compute_nodes command: | virt-install \ - --name {{env.hostname.infra[i]}} \ - --disk size={{env.node_resources.infra.disk_size}} \ - --ram {{env.node_resources.infra.ram}} \ + --name {{env.cluster.nodes.infra.hostname[i]}} \ + --disk pool=default,size={{ env.cluster.nodes.infra.disk_size }} \ + --ram {{env.cluster.nodes.infra.ram}} \ --cpu host \ - --vcpus {{env.node_resources.infra.vcpu}} \ + --vcpus {{env.cluster.nodes.infra.vcpu}} \ --os-type linux \ - --os-variant rhel{{env.node_resources.infra.os_variant}} \ + --os-variant rhel{{env.cluster.nodes.infra.os_variant}} \ --network network=macvtap-net \ - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.ip.bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.ip.infra[i]}}::{{env.networking.gateway}}:{{env.networking.netmask}}:{{env.hostname.infra[i]}}::none:1500 nameserver={{env.networking.dns.nameserver}} coreos.inst.ignition_url=http://{{env.ip.bastion}}:8080/ignition/worker.ign" \ + --location {{ env.z.lpar.storage_group.pool_path }},kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{vault.bastion.networking.ip}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.cluster.nodes.infra.ip[i]}}::{{env.z.lpar.networking.gateway}}:{{env.z.lpar.networking.subnetmask}}:{{env.cluster.nodes.infra.hostname[i]}}::none:1500 nameserver={{env.cluster.networking.nameserver}} coreos.inst.ignition_url=http://{{vault.bastion.networking.ip}}:8080/ignition/worker.ign" \ --wait=-1 \ --noautoconsole - with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.infra.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i - when: env.hostname.infra is defined + when: env.cluster.nodes.infra.hostname is defined diff --git a/roles/create_control_nodes/tasks/main.yaml b/roles/create_control_nodes/tasks/main.yaml index 49fe226a..22137681 100644 --- a/roles/create_control_nodes/tasks/main.yaml +++ b/roles/create_control_nodes/tasks/main.yaml @@ -1,27 +1,23 @@ --- -- name: Load in variables from env.yaml - tags: create_control_nodes - include_vars: env.yaml - - name: Install CoreOS on control nodes tags: create_control_nodes command: | virt-install \ - --name {{env.hostname.control[i]}} \ - --disk size={{env.node_resources.control.disk_size}} \ - --ram {{env.node_resources.control.ram}} \ + --name {{env.cluster.nodes.control.hostname[i]}} \ + --disk pool=default,size={{ env.cluster.nodes.control.disk_size }} \ + --ram {{env.cluster.nodes.control.ram}} \ --cpu host \ - --vcpus {{env.node_resources.control.vcpu}} \ + --vcpus {{env.cluster.nodes.control.vcpu}} \ --os-type linux \ - --os-variant rhel{{env.node_resources.control.os_variant}} \ + --os-variant rhel{{env.cluster.nodes.control.os_variant}} \ --network network=macvtap-net \ - --location /var/lib/libvirt/images,kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ - --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{env.ip.bastion}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.ip.control[i]}}::{{env.networking.gateway}}:{{env.networking.netmask}}:{{env.hostname.control[i]}}::none:1500 nameserver={{env.networking.dns.nameserver}} coreos.inst.ignition_url=http://{{env.ip.bastion}}:8080/ignition/master.ign" \ + --location {{ env.z.lpar.storage_group.pool_path }},kernel=rhcos-live-kernel-s390x,initrd=rhcos-live-initramfs.s390x.img \ + --extra-args "rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url=http://{{vault.bastion.networking.ip}}:8080/bin/rhcos-live-rootfs.s390x.img ip={{env.cluster.nodes.control.ip[i]}}::{{env.z.lpar.networking.gateway}}:{{env.z.lpar.networking.subnetmask}}:{{env.cluster.nodes.control.hostname[i]}}::none:1500 nameserver={{env.cluster.networking.nameserver}} coreos.inst.ignition_url=http://{{vault.bastion.networking.ip}}:8080/ignition/master.ign" \ --graphics none \ --wait=-1 \ --noautoconsole - with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.control.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i \ No newline at end of file diff --git a/roles/dns/tasks/main.yaml b/roles/dns/tasks/main.yaml index 0d22d90c..53f5a6d4 100644 --- a/roles/dns/tasks/main.yaml +++ b/roles/dns/tasks/main.yaml @@ -1,26 +1,22 @@ --- -- name: Load in variables from env.yaml - tags: dns - include_vars: env.yaml - - name: Enable named tags: dns ansible.builtin.systemd: - name: named - enabled: yes + name: named + enabled: yes - name: Start named tags: dns ansible.builtin.systemd: - name: named - state: started + name: named + state: started - name: Split IP addresses for use in templates tags: dns set_fact: - bastion_split_ip: "{{ env.ip.bastion.split('.') }}" - bootstrap_split_ip: "{{ env.ip.bootstrap.split('.') }}" + bastion_split_ip: "{{ vault.bastion.networking.ip.split('.') }}" + bootstrap_split_ip: "{{ env.cluster.nodes.bootstrap.ip.split('.') }}" - name: Template named.conf file to bastion tags: dns @@ -36,7 +32,7 @@ tags: dns template: src: dns.db.j2 - dest: /var/named/{{env.networking.metadata_name}}.db + dest: /var/named/{{env.cluster.networking.metadata_name}}.db owner: named group: named mode: '0755' @@ -45,10 +41,10 @@ - name: Add control nodes to DNS forwarding file on bastion tags: dns lineinfile: - path: /var/named/{{env.networking.metadata_name}}.db + path: /var/named/{{env.cluster.networking.metadata_name}}.db insertafter: "entries for the control nodes" - line: "{{ env.hostname.control[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. IN A {{ env.ip.control[i] }}" - with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 + line: "{{ env.cluster.nodes.control.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. IN A {{ env.cluster.nodes.control.ip[i] }}" + with_sequence: start=0 end={{(env.cluster.nodes.control.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -56,10 +52,10 @@ - name: Add compute nodes to DNS forwarding file on bastion tags: dns lineinfile: - path: /var/named/{{env.networking.metadata_name}}.db + path: /var/named/{{env.cluster.networking.metadata_name}}.db insertafter: "entries for the compute nodes" - line: "{{ env.hostname.compute[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. IN A {{ env.ip.compute[i] }}" - with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 + line: "{{ env.cluster.nodes.compute.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. IN A {{ env.cluster.nodes.compute.ip[i] }}" + with_sequence: start=0 end={{(env.cluster.nodes.compute.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -67,32 +63,20 @@ - name: Add infrastructure nodes to DNS forwarding file on bastion if requested tags: dns lineinfile: - path: /var/named/{{env.networking.metadata_name}}.db + path: /var/named/{{env.cluster.networking.metadata_name}}.db insertafter: "entries for extra RHEL VMs" - line: "{{ env.hostname.infra[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. IN A {{ env.ip.infra[i] }}" - with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + line: "{{ env.cluster.nodes.infra.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. IN A {{ env.cluster.nodes.infra.ip[i] }}" + with_sequence: start=0 end={{(env.cluster.nodes.infra.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i - when: env.hostname.infra is defined - -- name: Add extra RHEL VM apps to DNS forwarding file on bastion if requested - tags: dns - lineinfile: - path: /var/named/{{env.networking.metadata_name}}.db - insertafter: "entries for extra RHEL VMs" - line: "{{ env.hostname.app[i] }}.{{ env.networking.base_domain }}. IN A {{ env.ip.app[i] }}" - with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 - loop_control: - extended: yes - index_var: i - when: env.hostname.app is defined + when: env.cluster.nodes.infra.hostname is defined - name: Template DNS reverse lookup file to bastion tags: dns template: src: dns.rev.j2 - dest: /var/named/{{env.networking.metadata_name}}.rev + dest: /var/named/{{env.cluster.networking.metadata_name}}.rev owner: named group: named mode: '0755' @@ -101,10 +85,10 @@ - name: Add control nodes to DNS reverse lookup file on bastion tags: dns lineinfile: - path: /var/named/{{env.networking.metadata_name}}.rev + path: /var/named/{{env.cluster.networking.metadata_name}}.rev insertafter: "PTR Record IP address to Hostname" - line: "{{ env.ip.control[i].split('.').3 }} IN PTR {{ env.hostname.control[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}." - with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 + line: "{{ env.cluster.nodes.control.ip[i].split('.').3 }} IN PTR {{ env.cluster.nodes.control.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}." + with_sequence: start=0 end={{(env.cluster.nodes.control.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -112,10 +96,10 @@ - name: Add compute nodes to DNS reverse lookup file on bastion tags: dns lineinfile: - path: /var/named/{{env.networking.metadata_name}}.rev + path: /var/named/{{env.cluster.networking.metadata_name}}.rev insertafter: "PTR Record IP address to Hostname" - line: "{{ env.ip.compute[i].split('.').3 }} IN PTR {{ env.hostname.compute[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}." - with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 + line: "{{ env.cluster.nodes.compute.ip[i].split('.').3 }} IN PTR {{ env.cluster.nodes.compute.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}." + with_sequence: start=0 end={{(env.cluster.nodes.compute.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -123,26 +107,21 @@ - name: Add infrastructure nodes to DNS reverse lookup file on bastion tags: dns lineinfile: - path: /var/named/{{env.networking.metadata_name}}.rev + path: /var/named/{{env.cluster.networking.metadata_name}}.rev insertafter: "PTR Record IP address to Hostname" - line: "{{ env.ip.infra[i].split('.').3 }} IN PTR {{ env.hostname.infra[i] }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}." - with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + line: "{{ env.cluster.nodes.infra.ip[i].split('.').3 }} IN PTR {{ env.cluster.nodes.infra.hostname[i] }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}." + with_sequence: start=0 end={{(env.cluster.nodes.infra.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i - when: env.hostname.infra is defined + when: env.cluster.nodes.infra.hostname is defined -- name: Add extra RHEL VM apps to DNS reverse lookup file on bastion if requested +- name: Add cluster nameserver to bastion's resolv.conf file, replacing default tags: dns lineinfile: - path: /var/named/{{env.networking.metadata_name}}.rev - insertafter: "PTR Record IP address to Hostname" - line: "{{ env.ip.app[i].split('.').3 }} IN PTR {{ env.hostname.app[i] }}.{{ env.networking.base_domain }}." - with_sequence: start=0 end={{(env.hostname.app | length) - 1}} stride=1 - loop_control: - extended: yes - index_var: i - when: env.hostname.app is defined + path: /etc/resolv.conf + regexp: "nameserver" + line: "nameserver {{ env.cluster.networking.nameserver }}" - name: Restart named to update changes made to DNS tags: dns diff --git a/roles/dns/templates/dns-named.conf.j2 b/roles/dns/templates/dns-named.conf.j2 index 0f872308..946c55a5 100644 --- a/roles/dns/templates/dns-named.conf.j2 +++ b/roles/dns/templates/dns-named.conf.j2 @@ -18,7 +18,7 @@ options { secroots-file "/var/named/data/named.secroots"; recursing-file "/var/named/data/named.recursing"; allow-query { any; }; - forwarders { {{ env.networking.dns.forwarder }}; }; + forwarders { {{ env.cluster.networking.forwarder }}; }; /* - If you are building an AUTHORITATIVE DNS server, do NOT enable recursion. @@ -60,9 +60,9 @@ include "/etc/named.rfc1912.zones"; include "/etc/named.root.key"; //forward zone -zone "{{ env.networking.base_domain }}" IN { +zone "{{ env.cluster.networking.base_domain }}" IN { type master; - file "/var/named/{{ env.networking.metadata_name }}.db"; + file "/var/named/{{ env.cluster.networking.metadata_name }}.db"; allow-update { any; }; allow-query { any; }; }; @@ -70,7 +70,7 @@ zone "{{ env.networking.base_domain }}" IN { //backward zone zone "{{ bastion_split_ip.2 }}.{{ bastion_split_ip.1 }}.{{ bastion_split_ip.0 }}.in-addr.arpa" IN { type master; - file "/var/named/{{ env.networking.metadata_name }}.rev"; + file "/var/named/{{ env.cluster.networking.metadata_name }}.rev"; allow-update { any; }; allow-query { any; }; }; diff --git a/roles/dns/templates/dns.db.j2 b/roles/dns/templates/dns.db.j2 index 7ad7b6ea..18c78632 100644 --- a/roles/dns/templates/dns.db.j2 +++ b/roles/dns/templates/dns.db.j2 @@ -1,5 +1,5 @@ $TTL 86400 -@ IN SOA {{ env.hostname.bastion }}.{{ env.networking.base_domain }}. admin.{{ env.networking.base_domain }}.( +@ IN SOA {{ env.bastion.networking.hostname }}.{{ env.bastion.networking.base_domain }}. admin.{{ env.bastion.networking.base_domain }}.( 2020021821 ;Serial 3600 ;Refresh 1800 ;Retry @@ -8,25 +8,24 @@ $TTL 86400 ) ;Name Server / Bastion Information -@ IN NS {{ env.hostname.bastion }}.{{ env.networking.base_domain }}. +@ IN NS {{ env.bastion.networking.hostname }}.{{ env.bastion.networking.base_domain }}. ;IP Address for Name Server -{{ env.hostname.bastion }} IN A {{ env.ip.bastion }} +{{ env.bastion.networking.hostname }} IN A {{ vault.bastion.networking.ip }} ;entry for bootstrap host. -{{ env.hostname.bootstrap }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. IN A {{ env.ip.bootstrap }} +{{ env.cluster.nodes.bootstrap.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. IN A {{ env.cluster.nodes.bootstrap.ip }} ;entries for the control nodes ;entries for the compute nodes -;entries for extra RHEL VMs (if requested) - ;The api identifies the IP of your load balancer. -api.{{ env.networking.metadata_name }} IN CNAME {{ env.hostname.bastion }}.{{ env.networking.base_domain }}. -api-int.{{ env.networking.metadata_name }} IN CNAME {{ env.hostname.bastion }}.{{ env.networking.base_domain }}. +api.{{ env.cluster.networking.metadata_name }} IN CNAME {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.base_domain }}. +api-int.{{ env.cluster.networking.metadata_name }} IN CNAME {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.base_domain }}. ;The wildcard also identifies the load balancer. -*.apps.{{ env.networking.metadata_name }} IN CNAME {{ env.hostname.bastion }}.{{ env.networking.base_domain }}. +apps.{{ env.cluster.networking.metadata_name }} IN CNAME {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.base_domain }}. +*.apps.{{ env.cluster.networking.metadata_name }} IN CNAME {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.base_domain }}. ;EOF \ No newline at end of file diff --git a/roles/dns/templates/dns.rev.j2 b/roles/dns/templates/dns.rev.j2 index 42943bbb..eb3139b5 100644 --- a/roles/dns/templates/dns.rev.j2 +++ b/roles/dns/templates/dns.rev.j2 @@ -1,5 +1,5 @@ $TTL 86400 -@ IN SOA {{ env.hostname.bastion }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. admin.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }} ( +@ IN SOA {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. admin.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }} ( 2020011800 ;Serial 3600 ;Refresh 1800 ;Retry @@ -7,13 +7,13 @@ $TTL 86400 86400 ;Minimum TTL ) ;Name Server Information -@ IN NS {{ env.hostname.bastion }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. -{{ env.hostname.bastion }} IN A {{ env.ip.bastion }} +@ IN NS {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. +{{ env.bastion.networking.hostname }} IN A {{ vault.bastion.networking.ip }} ;Reverse lookup for Name Server -{{ bastion_split_ip.3 }} IN PTR {{ env.hostname.bastion }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. +{{ bastion_split_ip.3 }} IN PTR {{ env.bastion.networking.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. ;PTR Record IP address to Hostname -{{ bootstrap_split_ip.3 }} IN PTR {{ env.hostname.bootstrap }}.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. -{{ bastion_split_ip.3 }} IN PTR api-int.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. -{{ bastion_split_ip.3 }} IN PTR api.{{ env.networking.metadata_name }}.{{ env.networking.base_domain }}. \ No newline at end of file +{{ bootstrap_split_ip.3 }} IN PTR {{ env.cluster.nodes.bootstrap.hostname }}.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. +{{ bastion_split_ip.3 }} IN PTR api-int.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. +{{ bastion_split_ip.3 }} IN PTR api.{{ env.cluster.networking.metadata_name }}.{{ env.cluster.networking.base_domain }}. \ No newline at end of file diff --git a/roles/get_ocp/tasks/main.yaml b/roles/get_ocp/tasks/main.yaml index 8aed4be3..213c6ff1 100644 --- a/roles/get_ocp/tasks/main.yaml +++ b/roles/get_ocp/tasks/main.yaml @@ -1,9 +1,5 @@ --- -- name: Load in variables from env.yaml - tags: get_ocp - include_vars: env.yaml - - name: Create directory bin for mirrors tags: get_ocp become: true @@ -11,8 +7,8 @@ path: /var/www/html/bin state: directory mode: '0755' - owner: "{{ env.access.login.bastion.user }}" - group: "{{ env.access.login.bastion.user }}" + owner: "{{ env.bastion.access.user }}" + group: "{{ env.bastion.access.user }}" - name: Check to see if rootfs already exists on bastion tags: get_ocp @@ -33,20 +29,20 @@ tags: get_ocp become: true file: - path: /home/{{ env.access.login.bastion.user }}/ocpinst + path: /home/{{ env.bastion.access.user }}/ocpinst state: absent - name: Create OCP download directory tags: get_ocp file: - path: /home/{{ env.access.login.bastion.user }}/ocpinst + path: /home/{{ env.bastion.access.user }}/ocpinst state: directory - name: Unzip OCP client and installer tags: get_ocp ansible.builtin.unarchive: src: "{{ item }}" - dest: /home/{{ env.access.login.bastion.user }}/ocpinst/ + dest: /home/{{ env.bastion.access.user }}/ocpinst/ remote_src: yes loop: - "{{ env.openshift.client }}" @@ -56,7 +52,7 @@ tags: get_ocp become: true ansible.builtin.copy: - src: /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} + src: /home/{{ env.bastion.access.user }}/ocpinst/{{item}} dest: /usr/local/bin/{{item}} mode: '0755' remote_src: yes @@ -72,12 +68,12 @@ dest: "{{ item }}" force: yes loop: - - /home/{{ env.access.login.bastion.user }}/ocpinst/install-config.yaml - - /home/{{ env.access.login.bastion.user }}/ocpinst/install-config-backup.yaml + - /home/{{ env.bastion.access.user }}/ocpinst/install-config.yaml + - /home/{{ env.bastion.access.user }}/ocpinst/install-config-backup.yaml - name: Capture OCP public key tags: get_ocp - command: cat /home/{{ env.access.login.bastion.user }}/.ssh/id_rsa.pub + command: cat /home/{{ env.bastion.access.user }}/.ssh/id_rsa.pub register: ocp_pub_key - name: Place SSH key in install-config @@ -86,26 +82,26 @@ line: "sshKey: '{{ ocp_pub_key.stdout }}'" path: "{{ item }}" loop: - - /home/{{ env.access.login.bastion.user }}/ocpinst/install-config.yaml - - /home/{{ env.access.login.bastion.user }}/ocpinst/install-config-backup.yaml + - /home/{{ env.bastion.access.user }}/ocpinst/install-config.yaml + - /home/{{ env.bastion.access.user }}/ocpinst/install-config-backup.yaml - name: Create manifests tags: get_ocp - command: /home/{{ env.access.login.bastion.user }}/ocpinst/openshift-install create manifests --dir=/home/{{ env.access.login.bastion.user }}/ocpinst/ + command: /home/{{ env.bastion.access.user }}/ocpinst/openshift-install create manifests --dir=/home/{{ env.bastion.access.user }}/ocpinst/ become: true - name: Set masters schedulable parameter to false tags: get_ocp become: true replace: - path: /home/{{ env.access.login.bastion.user }}/ocpinst/manifests/cluster-scheduler-02-config.yml + path: /home/{{ env.bastion.access.user }}/ocpinst/manifests/cluster-scheduler-02-config.yml regexp: ': true' replace: ': false' - name: Set permissions for ocpinst directory contents to bastion admin user tags: get_ocp become: true - command: chmod 0755 /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} + command: chmod 0755 /home/{{ env.bastion.access.user }}/ocpinst/{{item}} loop: - manifests - openshift @@ -115,7 +111,7 @@ - name: Set ownership of ocpinst directory contents to bastion admin user tags: get_ocp become: true - command: chown {{ env.access.login.bastion.user }}:{{ env.access.login.bastion.user }} /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} + command: chown {{ env.bastion.access.user }}:{{ env.bastion.access.user }} /home/{{ env.bastion.access.user }}/ocpinst/{{item}} loop: - manifests - openshift @@ -125,12 +121,12 @@ - name: Create ignition files tags: get_ocp become: true - command: /home/{{ env.access.login.bastion.user }}/ocpinst/openshift-install create ignition-configs --dir=/home/{{ env.access.login.bastion.user }}/ocpinst/ + command: /home/{{ env.bastion.access.user }}/ocpinst/openshift-install create ignition-configs --dir=/home/{{ env.bastion.access.user }}/ocpinst/ - name: Set permissions of ignitions and related files to bastion admin user tags: get_ocp become: true - command: chmod 0755 /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} + command: chmod 0755 /home/{{ env.bastion.access.user }}/ocpinst/{{item}} loop: - bootstrap.ign - master.ign @@ -141,7 +137,7 @@ - name: Set ownership of ignitions and related files to bastion admin user tags: get_ocp become: true - command: chown {{ env.access.login.bastion.user }}:{{ env.access.login.bastion.user }} /home/{{ env.access.login.bastion.user }}/ocpinst/{{item}} + command: chown {{ env.bastion.access.user }}:{{ env.bastion.access.user }} /home/{{ env.bastion.access.user }}/ocpinst/{{item}} loop: - bootstrap.ign - master.ign @@ -162,12 +158,12 @@ tags: get_ocp become: true copy: - src: /home/{{ env.access.login.bastion.user }}/ocpinst/{{ item }}.ign + src: /home/{{ env.bastion.access.user }}/ocpinst/{{ item }}.ign dest: /var/www/html/ignition remote_src: yes mode: '775' - group: "{{ env.access.login.bastion.user }}" - owner: "{{ env.access.login.bastion.user }}" + group: "{{ env.bastion.access.user }}" + owner: "{{ env.bastion.access.user }}" loop: - bootstrap - master diff --git a/roles/get_ocp/templates/install-config.yaml.j2 b/roles/get_ocp/templates/install-config.yaml.j2 index 06067293..4b471a1b 100644 --- a/roles/get_ocp/templates/install-config.yaml.j2 +++ b/roles/get_ocp/templates/install-config.yaml.j2 @@ -1,17 +1,17 @@ apiVersion: {{ env.install_config.api_version }} -baseDomain: {{ env.networking.base_domain }} +baseDomain: {{ env.cluster.networking.base_domain }} compute: - hyperthreading: {{ env.install_config.compute.hyperthreading }} name: worker - replicas: {{(env.ip.compute | length)}} + replicas: {{(env.cluster.nodes.compute.ip | length)}} architecture: {{ env.install_config.compute.architecture }} controlPlane: hyperthreading: {{ env.install_config.control.hyperthreading }} name: master - replicas: {{(env.ip.control | length)}} + replicas: {{(env.cluster.nodes.control.ip | length)}} architecture: {{ env.install_config.control.architecture }} metadata: - name: {{ env.networking.metadata_name }} + name: {{ env.cluster.networking.metadata_name }} networking: clusterNetwork: - cidr: {{ env.install_config.cluster_network.cidr }} @@ -22,4 +22,4 @@ networking: platform: none: {} fips: {{ env.install_config.fips }} -pullSecret: '{{ env.redhat.pull_secret }}' \ No newline at end of file +pullSecret: '{{ vault.redhat.pull_secret }}' \ No newline at end of file diff --git a/roles/haproxy/tasks/main.yaml b/roles/haproxy/tasks/main.yaml index 0b167461..42dfde39 100644 --- a/roles/haproxy/tasks/main.yaml +++ b/roles/haproxy/tasks/main.yaml @@ -1,9 +1,5 @@ --- -- name: Load in variables from env.yaml - tags: haproxy - include_vars: env.yaml - - name: Change permissive domain for haproxy tags: selinux,haproxy selinux_permissive: @@ -21,10 +17,10 @@ - name: Add control node information to 6443 section in haproxy config tags: haproxy lineinfile: - line: " server {{ env.hostname.control[i] }} {{env.ip.control[i]}}:6443 check inter 1s" + line: " server {{ env.cluster.nodes.control.hostname[i] }} {{env.cluster.nodes.control.ip[i]}}:6443 check inter 1s" path: /etc/haproxy/haproxy.cfg insertafter: "6443 section" - with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.control.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -32,10 +28,10 @@ - name: Add control node information to 22623 section in haproxy config tags: haproxy lineinfile: - line: " server {{ env.hostname.control[i] }} {{env.ip.control[i]}}:22623 check inter 1s" + line: " server {{ env.cluster.nodes.control.hostname[i] }} {{env.cluster.nodes.control.ip[i]}}:22623 check inter 1s" path: /etc/haproxy/haproxy.cfg insertafter: "22623 section" - with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.control.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -43,10 +39,10 @@ - name: Add compute node information to 443 section in haproxy config tags: haproxy lineinfile: - line: " server {{ env.hostname.compute[i] }} {{ env.ip.compute[i] }}:443 check inter 1s" + line: " server {{ env.cluster.nodes.compute.hostname[i] }} {{ env.cluster.nodes.compute.ip[i] }}:443 check inter 1s" path: /etc/haproxy/haproxy.cfg insertafter: "443 section" - with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.compute.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -54,21 +50,21 @@ - name: Add infrastructure node information to 443 section in haproxy config tags: haproxy lineinfile: - line: " server {{ env.hostname.infra[i] }} {{ env.ip.infra[i] }}:443 check inter 1s" + line: " server {{ env.cluster.nodes.infra.hostname[i] }} {{ env.cluster.nodes.infra.ip[i] }}:443 check inter 1s" path: /etc/haproxy/haproxy.cfg insertafter: "443 section" - with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.infra.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i - when: env.hostname.infra is defined + when: env.cluster.nodes.infra.hostname is defined - name: Add compute node information to 80 section in haproxy config tags: haproxy lineinfile: - line: " server {{ env.hostname.compute[i] }} {{ env.ip.compute[i] }}:80 check inter 1s" + line: " server {{ env.cluster.nodes.compute.hostname[i] }} {{ env.cluster.nodes.compute.ip[i] }}:80 check inter 1s" path: /etc/haproxy/haproxy.cfg - with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.compute.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -76,20 +72,20 @@ - name: Add infrastructure node information to 80 section in haproxy config tags: haproxy lineinfile: - line: " server {{ env.hostname.infra[i] }} {{ env.ip.infra[i] }}:80 check inter 1s" + line: " server {{ env.cluster.nodes.infra.hostname[i] }} {{ env.cluster.nodes.infra.ip[i] }}:80 check inter 1s" path: /etc/haproxy/haproxy.cfg - with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.infra.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i - when: env.hostname.infra is defined + when: env.cluster.nodes.infra.hostname is defined - name: Set haproxy boolean to enable connections tags: haproxy command: setsebool -P haproxy_connect_any 1 - name: Enable haproxy - tags: dns + tags: haproxy ansible.builtin.systemd: name: haproxy enabled: yes diff --git a/roles/haproxy/templates/haproxy.cfg.j2 b/roles/haproxy/templates/haproxy.cfg.j2 index df2d8eb9..2f055b89 100644 --- a/roles/haproxy/templates/haproxy.cfg.j2 +++ b/roles/haproxy/templates/haproxy.cfg.j2 @@ -27,19 +27,19 @@ frontend stats stats hide-version stats refresh 30s stats show-node - stats show-desc Stats for {{env.networking.metadata_name}} cluster - stats auth admin:{{env.networking.metadata_name}} + stats show-desc Stats for {{env.cluster.networking.metadata_name}} cluster + stats auth admin:{{env.cluster.networking.metadata_name}} stats uri /stats listen api-server-6443 bind *:6443 mode tcp #6443 section - server {{ env.hostname.bootstrap }} {{env.ip.bootstrap}}:6443 check inter 1s backup + server {{ env.cluster.nodes.bootstrap.hostname }} {{env.cluster.nodes.bootstrap.ip}}:6443 check inter 1s backup listen machine-config-server-22623 bind *:22623 mode tcp #22623 section - server {{ env.hostname.bootstrap }} {{env.ip.bootstrap}}:22623 check inter 1s backup + server {{ env.cluster.nodes.bootstrap.hostname }} {{env.cluster.nodes.bootstrap.ip}}:22623 check inter 1s backup listen ingress-router-443 bind *:443 mode tcp diff --git a/roles/macvtap/tasks/main.yaml b/roles/macvtap/tasks/main.yaml index 6df00c66..6ff05cf6 100644 --- a/roles/macvtap/tasks/main.yaml +++ b/roles/macvtap/tasks/main.yaml @@ -1,22 +1,27 @@ --- -- name: Set up macvtap bridge +- name: Start and enable libvirt + tags: macvtap + service: + name: libvirtd + enabled: yes + state: started + +- name: Set up macvtap bridge configuration xml from template to KVM host tags: macvtap community.libvirt.virt_net: command: define name: macvtap-net - autostart: true xml: "{{ lookup ('template', 'macvtap.xml.j2') }}" -- name: Start macvtap-net +- name: Start macvtap-net bridge tags: macvtap community.libvirt.virt_net: - autostart: yes command: start name: macvtap-net - -- name: Set autostart for macvtap-net + +- name: Set macvtap-net bridge to autostart tags: macvtap community.libvirt.virt_net: autostart: yes - name: macvtap-net + name: macvtap-net \ No newline at end of file diff --git a/roles/macvtap/templates/macvtap.xml.j2 b/roles/macvtap/templates/macvtap.xml.j2 index 6d0ab6fb..9a5a746a 100644 --- a/roles/macvtap/templates/macvtap.xml.j2 +++ b/roles/macvtap/templates/macvtap.xml.j2 @@ -1,6 +1,6 @@ macvtap-net - - + + diff --git a/roles/prep_kvm_guests/tasks/main.yaml b/roles/prep_kvm_guests/tasks/main.yaml index 79b36169..6cd33c65 100644 --- a/roles/prep_kvm_guests/tasks/main.yaml +++ b/roles/prep_kvm_guests/tasks/main.yaml @@ -1,20 +1,16 @@ --- -- name: Load in variables from env.yaml - tags: prep_kvm_guests - include_vars: env.yaml - - name: Check to see if kernel already exists on KVM host tags: prep_kvm_guests stat: - path: /var/lib/libvirt/images/rhcos-live-kernel-s390x + path: "{{ env.z.lpar.storage_group.pool_path }}/rhcos-live-kernel-s390x" register: kernel_check - name: Get Red Hat CoreOS kernel tags: prep_kvm_guests get_url: url: "{{ env.coreos.kernel }}" - dest: /var/lib/libvirt/images/rhcos-live-kernel-s390x + dest: "{{ env.z.lpar.storage_group.pool_path }}/rhcos-live-kernel-s390x" mode: '0755' force: yes when: kernel_check.stat.exists == false @@ -22,14 +18,14 @@ - name: Check to see if initramfs already exists on KVM host tags: prep_kvm_guests stat: - path: /var/lib/libvirt/images/rhcos-live-initramfs.s390x.img + path: "{{ env.z.lpar.storage_group.pool_path }}/rhcos-live-initramfs.s390x.img" register: initramfs_check - name: Get Red Hat CoreOS initramfs tags: prep_kvm_guests get_url: url: "{{ env.coreos.initramfs }}" - dest: /var/lib/libvirt/images/rhcos-live-initramfs.s390x.img + dest: "{{ env.z.lpar.storage_group.pool_path }}/rhcos-live-initramfs.s390x.img" mode: '0755' force: yes when: initramfs_check.stat.exists == false @@ -37,7 +33,7 @@ - name: Add admin user to qemu and libvirt groups tags: prep_kvm_guests become: true - command: usermod -aG {{item}} {{ env.access.login.kvm.user }} + command: usermod -aG {{item}} {{ env.z.lpar.access.user }} loop: - libvirt - qemu \ No newline at end of file diff --git a/roles/reset_files/tasks/main.yaml b/roles/reset_files/tasks/main.yaml index e56825cc..f58bf7ca 100644 --- a/roles/reset_files/tasks/main.yaml +++ b/roles/reset_files/tasks/main.yaml @@ -1,6 +1,4 @@ - -- name: Load in variables from env.yaml - include_vars: env.yaml +--- - name: Delete files_to_reset from teardown.yaml file: diff --git a/roles/set_firewall/tasks/main.yaml b/roles/set_firewall/tasks/main.yaml index cbc3f2c0..0f7babf6 100644 --- a/roles/set_firewall/tasks/main.yaml +++ b/roles/set_firewall/tasks/main.yaml @@ -1,5 +1,11 @@ --- +- name: start firewalld service + service: + name: firewalld + state: started + enabled: true + - name: Add ports to firewall tags: set_firewall firewalld: diff --git a/roles/ssh_copy_id/tasks/main.yaml b/roles/ssh_copy_id/tasks/main.yaml index c978a313..57d45438 100644 --- a/roles/ssh_copy_id/tasks/main.yaml +++ b/roles/ssh_copy_id/tasks/main.yaml @@ -2,10 +2,7 @@ - name: Load in variables tags: ssh_copy_id, ssh - include_vars: "{{item}}" - with_items: - - env.yaml - - roles/ssh_copy_id/vars/path_to_key_pair.yaml + include_vars: "{{inventory_dir}}/group_vars/all.yaml" - name: Get ansible.pub key for check in next task tags: ssh_copy_id, ssh @@ -23,18 +20,17 @@ path: "~/.ssh/known_hosts" line: "{{ ssh_target[0] }}" state: absent - delegate_to: workstation - name: Use template file to create expect script tags: ssh_copy_id, ssh template: src: ssh-copy-id.exp.j2 - dest: roles/ssh_copy_id/files/ssh-copy-id-expect-pass.exp + dest: "{{ role_path }}/files/ssh-copy-id-expect-pass.exp" force: yes - name: Copy SSH ID to remote host with pre-provided password tags: ssh_copy_id, ssh - command: "expect roles/ssh_copy_id/files/ssh-copy-id-expect-pass.exp" + command: "expect {{ role_path }}/files/ssh-copy-id-expect-pass.exp" register: ssh_copy - name: Print results of copying ssh id to remote host. @@ -45,11 +41,11 @@ - name: Delete templated expect script tags: ssh_copy_id, ssh file: - path: roles/ssh_copy_id/files/ssh-copy-id-expect-pass.exp + path: "{{ role_path }}/files/ssh-copy-id-expect-pass.exp" state: absent - name: Re-create ssh-copy-id files folder tags: ssh_copy_id, ssh file: - path: roles/ssh_copy_id/files/ + path: "{{ role_path }}/files/" state: directory \ No newline at end of file diff --git a/roles/ssh_copy_id/vars/path_to_key_pair.yaml b/roles/ssh_copy_id/vars/path_to_key_pair.yaml deleted file mode 100644 index f78991d6..00000000 --- a/roles/ssh_copy_id/vars/path_to_key_pair.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# Will be filled in by ssh-key-gen role -path_to_key_pair: /Users/jacob/.ssh/ansible.pub diff --git a/roles/ssh_key_gen/tasks/main.yaml b/roles/ssh_key_gen/tasks/main.yaml index d321a745..f1e87b55 100644 --- a/roles/ssh_key_gen/tasks/main.yaml +++ b/roles/ssh_key_gen/tasks/main.yaml @@ -1,7 +1,4 @@ --- -- name: Load in variables from env.yaml - tags: ssh_key_gen, ssh - include_vars: env.yaml - name: Check to see if local SSH directory exists tags: ssh_key_gen, ssh @@ -23,7 +20,7 @@ community.crypto.openssh_keypair: path: ~/.ssh/ansible passphrase: "" - comment: "{{ env.access.ssh.ansible_key_comment }}" + comment: "Ansible-OpenShift-Provisioning SSH key" regenerate: always register: ssh_key_creation @@ -32,14 +29,9 @@ debug: var: ssh_key_creation -- name: Create a vars file for path to key - tags: ssh_key_gen, ssh - file: - state: touch - path: roles/ssh_copy_id/vars/path_to_key_pair.yaml - - name: Save path to key pair for use in ssh-copy-id role tags: ssh_key_gen, ssh lineinfile: line: "path_to_key_pair: {{ssh_key_creation.filename}}.pub" - path: roles/ssh_copy_id/vars/path_to_key_pair.yaml \ No newline at end of file + path: "{{inventory_dir}}/group_vars/all.yaml" + insertafter: EOF #end of file \ No newline at end of file diff --git a/roles/ssh_ocp_key_gen/tasks/main.yaml b/roles/ssh_ocp_key_gen/tasks/main.yaml index d2ed9265..b57f2b3a 100644 --- a/roles/ssh_ocp_key_gen/tasks/main.yaml +++ b/roles/ssh_ocp_key_gen/tasks/main.yaml @@ -1,52 +1,47 @@ --- -- name: Load in variables from env.yaml - tags: ssh_ocp_key_gen, ssh - include_vars: env.yaml - - name: Check to see if local SSH directory exists - tags: ssh_ocp_key_gen, ssh + tags: ssh_ocp_key_gen stat: - path: /home/{{ env.access.login.bastion.user }}/.ssh + path: /home/{{ env.bastion.access.user }}/.ssh register: ssh_directory_exists_check - name: Create SSH local directory if it doesn't already exist - tags: ssh_ocp_key_gen, ssh + tags: ssh_ocp_key_gen file: - path: /home/{{ env.access.login.bastion.user }}/.ssh + path: /home/{{ env.bastion.access.user }}/.ssh state: directory mode: "0700" register: ssh_directory_creation when: ssh_directory_exists_check.stat.exists == false -- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key if it doesn't exist already - tags: ssh_ocp_key_gen, ssh +- name: Generate an OpenSSH keypair with the default values (4096 bits, rsa) and encrypted private key. + tags: ssh_ocp_key_gen community.crypto.openssh_keypair: - path: /home/{{ env.access.login.bastion.user }}/.ssh/id_rsa + path: "/home/{{ env.bastion.access.user }}/.ssh/id_rsa" backend: opensshbin - owner: "{{ env.access.login.bastion.user }}" + owner: "{{ env.bastion.access.user }}" passphrase: "" - comment: "{{ env.access.ssh.ocp_key_comment }}" + comment: "{{ env.bastion.access.ocp_ssh_key_comment }}" regenerate: always register: ssh_ocp - name: Print results of SSH key generation - tags: ssh_ocp_key_gen, ssh + tags: ssh_ocp_key_gen debug: var: ssh_ocp.public_key when: ssh_ocp.changed == true - name: Set SSH key permissions - tags: ssh_ocp_key_gen, ssh - command: chmod 600 /home/{{ env.access.login.bastion.user }}/.ssh/{{item}} + tags: ssh_ocp_key_gen + command: chmod 600 /home/{{ env.bastion.access.user }}/.ssh/{{ item }} loop: - id_rsa - id_rsa.pub - name: Set SSH key ownership - tags: ssh_ocp_key_gen, ssh - command: chown {{ env.access.login.bastion.user }}:{{ env.access.login.bastion.user }} /home/{{ env.access.login.bastion.user }}/.ssh/{{item}} + tags: ssh_ocp_key_gen + command: chown {{ env.bastion.access.user }}:{{ env.bastion.access.user }} /home/{{ env.bastion.access.user }}/.ssh/{{item}} loop: - id_rsa - - id_rsa.pub - + - id_rsa.pub \ No newline at end of file diff --git a/roles/teardown_vms/tasks/main.yaml b/roles/teardown_vms/tasks/main.yaml index 92b8f36e..2a6e637d 100644 --- a/roles/teardown_vms/tasks/main.yaml +++ b/roles/teardown_vms/tasks/main.yaml @@ -2,35 +2,35 @@ - name: Destroy bastion for full, skip for partial teardown. Expect ignored errors if it is already destroyed. community.libvirt.virt: - name: "{{ env.hostname.bastion }}" + name: "{{ env.bastion.networking.hostname }}" command: destroy when: bastion_teardown ignore_errors: true - name: Undefine bastion for full, skip for partial teardown. Expect ignored errors if it is already undefined. community.libvirt.virt: - name: "{{ env.hostname.bastion }}" + name: "{{ env.bastion.networking.hostname }}" command: undefine when: bastion_teardown ignore_errors: true - name: Destroy bootstrap. Expect ignored errors if it is already destroyed. community.libvirt.virt: - name: "{{ env.hostname.bootstrap }}" + name: "{{ env.cluster.nodes.bootstrap.hostname }}" command: destroy ignore_errors: true - name: Undefine bootstrap. Expect ignored errors if it is already undefined. community.libvirt.virt: - name: "{{ env.hostname.bootstrap }}" + name: "{{ env.cluster.nodes.bootstrap.hostname }}" command: undefine ignore_errors: true - name: Destroy running control nodes. Expect ignored errors if some VMs are already destroyed. community.libvirt.virt: - name: "{{ env.hostname.control[i] }}" + name: "{{ env.cluster.nodes.control.hostname[i] }}" command: destroy - with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.control.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -38,9 +38,9 @@ - name: Undefine remaining control nodes. Expect ignored errors if some VMs are already undefined. community.libvirt.virt: - name: "{{ env.hostname.control[i] }}" + name: "{{ env.cluster.nodes.control.hostname[i] }}" command: undefine - with_sequence: start=0 end={{(env.hostname.control | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.control.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -48,9 +48,9 @@ - name: Destroy running compute nodes. Expect ignored errors if some VMs are already destroyed. community.libvirt.virt: - name: "{{ env.hostname.compute[i] }}" + name: "{{ env.cluster.nodes.compute.hostname[i] }}" command: destroy - with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.compute.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -58,9 +58,9 @@ - name: Undefine remaining compute nodes. Expect ignored errors if some VMs are already undefined. community.libvirt.virt: - name: "{{ env.hostname.compute[i] }}" + name: "{{ env.cluster.nodes.compute.hostname[i] }}" command: undefine - with_sequence: start=0 end={{(env.hostname.compute | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.compute.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i @@ -68,22 +68,22 @@ - name: Destroy running infrastructure nodes, if defined. Expect ignored errors if some VMs are already destroyed. community.libvirt.virt: - name: "{{ env.hostname.infra[i] }}" + name: "{{ env.cluster.nodes.infra.hostname[i] }}" command: destroy - with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.infra.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i ignore_errors: true - when: env.hostname.infra is defined + when: env.cluster.nodes.infra.hostname is defined - name: Undefine remaining infrastructure nodes. Expect ignored errors if some VMs are already undefined. community.libvirt.virt: - name: "{{ env.hostname.infra[i] }}" + name: "{{ env.cluster.nodes.infra.hostname[i] }}" command: undefine - with_sequence: start=0 end={{(env.hostname.infra | length) - 1}} stride=1 + with_sequence: start=0 end={{(env.cluster.nodes.infra.hostname | length) - 1}} stride=1 loop_control: extended: yes index_var: i ignore_errors: true - when: env.hostname.infra is defined \ No newline at end of file + when: env.cluster.nodes.infra.hostname is defined \ No newline at end of file diff --git a/roles/wait_for_bootstrap/tasks/main.yaml b/roles/wait_for_bootstrap/tasks/main.yaml index 11ec6139..18bc9d04 100644 --- a/roles/wait_for_bootstrap/tasks/main.yaml +++ b/roles/wait_for_bootstrap/tasks/main.yaml @@ -15,7 +15,7 @@ - name: Watch wait-for bootstrap-complete process. tags: wait_for_bootstrap - shell: openshift-install wait-for bootstrap-complete --dir=/home/{{ env.access.login.bastion.user }}/ocpinst + shell: openshift-install wait-for bootstrap-complete --dir=/home/{{ env.access.bastion.user }}/ocpinst async: 3600 poll: 0 register: watch_bootstrap @@ -32,23 +32,23 @@ - name: Destroy bootstrap. Expect ignored errors if bootstrap is already destroyed. tags: wait_for_bootstrap community.libvirt.virt: - name: "{{ env.hostname.bootstrap }}" + name: "{{ env.cluster.nodes.bootstrap.hostname }}" command: destroy ignore_errors: true - delegate_to: "{{ env.ip.kvm }}" + delegate_to: "{{ vault.z.lpar.networking.ip }}" - name: Undefine bootstrap. Expect ignored errors if bootstrap is already undefined. tags: wait_for_bootstrap community.libvirt.virt: - name: "{{ env.hostname.bootstrap }}" + name: "{{ env.cluster.nodes.bootstrap.hostname }}" command: undefine ignore_errors: true - delegate_to: "{{ env.ip.kvm }}" + delegate_to: "{{ vault.z.lpar.networking.ip }}" - name: Remove qcow2 tags: wait_for_bootstrap become: true file: - path: /var/lib/libvirt/images/{{env.hostname.bootstrap}}-bootstrap.qcow2 + path: "{{ env.z.lpar.storage_group.pool_path }}/{{env.cluster.nodes.bootstrap.hostname}}-bootstrap.qcow2" state: absent - delegate_to: "{{ env.ip.kvm }}" \ No newline at end of file + delegate_to: "{{ vault.z.lpar.networking.ip }}" \ No newline at end of file diff --git a/roles/wait_for_install_complete/tasks/main.yaml b/roles/wait_for_install_complete/tasks/main.yaml index 6da2e81d..ddb5054b 100644 --- a/roles/wait_for_install_complete/tasks/main.yaml +++ b/roles/wait_for_install_complete/tasks/main.yaml @@ -2,7 +2,7 @@ - name: Wait for OpenShift install to complete tags: wait_for_install_complete - shell: /home/{{ env.access.login.bastion.user }}/ocpinst/openshift-install --dir=/home/{{ env.access.login.bastion.user }}/ocpinst wait-for install-complete + shell: /home/{{ env.bastion.access.user }}/ocpinst/openshift-install --dir=/home/{{ env.bastion.access.user }}/ocpinst wait-for install-complete register: wait_install_complete until: ("Install complete!" in wait_install_complete.stderr) retries: 120 @@ -11,11 +11,11 @@ - name: Set OCP URL tags: wait_for_install_complete set_fact: - ocp_url: https://console-openshift-console.apps.{{env.networking.metadata_name}}.{{env.networking.base_domain}} + ocp_url: https://console-openshift-console.apps.{{env.cluster.networking.metadata_name}}.{{env.cluster.networking.base_domain}} - name: Set OCP password tags: wait_for_install_complete - command: "cat /home/{{ env.access.login.bastion.user }}/ocpinst/auth/kubeadmin-password" + command: "cat /home/{{ env.bastion.access.user }}/ocpinst/auth/kubeadmin-password" register: ocp_passwd - name: Congratulations! OpenShift installation complete. Use the information below for first-time login. From 2f0c57c198c5b7737a71eb2b05f95201bca418c6 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 02:04:19 -0700 Subject: [PATCH 619/885] Fixed link in README Signed-off-by: Jacob Emery --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7bba2c72..6e101b08 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ * MacOS X ## Installation Instructions -A step-by-step guide can be found [here](#installation_instructions.md) in the installation_instructions.md file. +A step-by-step guide can be found [here](installation_instructions.md) in the installation_instructions.md file. ## Pre-Requisites * A Red Hat account ([Sign Up](https://www.redhat.com/wapps/ugc/register.html?_flowId=register-flow&_flowExecutionKey=e1s1)) From 1e1c720f9711056c4d9b3d4a96a15f2690ddf2e4 Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Sun, 27 Mar 2022 12:33:44 -0700 Subject: [PATCH 620/885] Reset test playbook Use test playbook for miscellaneous testing, if needed. --- playbooks/test.yaml | 44 +++++++------------------------------------- 1 file changed, 7 insertions(+), 37 deletions(-) diff --git a/playbooks/test.yaml b/playbooks/test.yaml index ea22db76..62fa035c 100644 --- a/playbooks/test.yaml +++ b/playbooks/test.yaml @@ -1,39 +1,9 @@ --- -- hosts: web-servers - become: true - become_method: sudo +- hosts: workstation + gather_facts: False + vars: + test_var: test tasks: - - name: Ensure the PGP key is installed - apt_key: - id: AC40B2F7 - url: "http://keyserver.ubuntu.com/pks/lookup?op=get&fingerprint=on&search=0x561F9B9CAC40B2F7" - - - name: Ensure https support for apt is installed - package: - name: apt-transport-https - - - name: Ensure the passenger apt repository is added - apt_repository: - repo: 'deb https://oss-binaries.phusionpassenger.com/apt/passenger raring main' - - - name: Ensure nginx is installed - package: - name: nginx-full - - - name: Ensure passenger is installed - apt: - name: passenger - update_cache: yes - - - name: Ensure the nginx configuration file is set - copy: - src: /app/config/nginx.conf - dest: /etc/nginx/nginx.conf - - - name: Ensure nginx is running - service: - name: nginx - state: started - - - + - name: Test variables or anything here + debug: + msg: "{{ test_var }}" From 4bac7d1d6f22ab2346439a7e542d0a080c22726b Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 28 Mar 2022 02:55:46 -0700 Subject: [PATCH 621/885] Reset inventory --- inventories/default/hosts | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/inventories/default/hosts b/inventories/default/hosts index 2bdd3cf4..d289690d 100755 --- a/inventories/default/hosts +++ b/inventories/default/hosts @@ -5,13 +5,3 @@ [workstation:vars] ansible_python_interpreter=/usr/local/bin/python3 -#start of ansible managed block from set_inventory role -[ftp] -9.60.86.17 ansible_user=zemery - -[kvm_host] -bnsf1test ansible_host=9.60.87.132 ansible_user=admin ansible_become_password=ibmzrocks - -[bastion] -bastion ansible_host=9.60.87.139 ansible_user=admin ansible_become_password=ibmzrocks -#end of ansible managed block from set_inventory role From 1ca37580ee4293199caf69a38db89d6e5975359a Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Mon, 28 Mar 2022 02:59:03 -0700 Subject: [PATCH 622/885] add single quotes to pull_secret example --- inventories/default/group_vars/all.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inventories/default/group_vars/all.yaml b/inventories/default/group_vars/all.yaml index 124ca41c..47b736eb 100644 --- a/inventories/default/group_vars/all.yaml +++ b/inventories/default/group_vars/all.yaml @@ -76,7 +76,7 @@ env: redhat: username: #X password: #X - pull_secret: #X #OpenShift pull secret + pull_secret: #'X' #OpenShift pull secret, in single quotes bastion: create: true From d2f459ba16a83ff8d5e80845c0db7edb1536e20e Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Tue, 29 Mar 2022 07:58:27 -0700 Subject: [PATCH 623/885] Added FCP stipulation to storage group pre-reqs --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 6e101b08..7af8266f 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ A step-by-step guide can be found [here](installation_instructions.md) in the in * For a minimum installation, at least: * 6 Integrated Facilities for Linux (IFLs) with SMT2 enabled * 85 GB of RAM - * A storage group created with 1 TB of disk space + * An FCP storage group created with 1 TB of disk space * 8 IPv4 addresses * If you are using MacOS for your workstation running Ansible, you also need to have: * [Homebrew](https://brew.sh/) package manager installed: @@ -80,4 +80,4 @@ A step-by-step guide can be found [here](installation_instructions.md) in the in ~~~ ansible-playbook playbooks/vault.yaml --tags decrypt --ask-vault-pass ansible-playbook playbooks/vault.yaml --tags encrypt --ask-vault-pass - ~~~ \ No newline at end of file + ~~~ From 56522947f72df6833e9209e30b00b0b27519124d Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Jun 2022 19:20:11 -0700 Subject: [PATCH 624/885] Add vault.yaml file Previously was created during playbook run. Creating it now so it's always there. --- vault.yaml | 1 + 1 file changed, 1 insertion(+) create mode 100644 vault.yaml diff --git a/vault.yaml b/vault.yaml new file mode 100644 index 00000000..ba61580d --- /dev/null +++ b/vault.yaml @@ -0,0 +1 @@ +#Will fill in during setup.yaml playbook's encrypt_vars role. From 888fe1070a0732f35b3df652fac3ed7df34f631f Mon Sep 17 00:00:00 2001 From: Jacob Emery Date: Fri, 3 Jun 2022 20:20:50 -0700 Subject: [PATCH 625/885] Added GitHub Pages for documentation. Signed-off-by: Jacob Emery --- CHANGELOG.md | 118 ----------------------------------- README.md | 85 +------------------------ docs/CHANGELOG.md | 113 +++++++++++++++++++++++++++++++++ docs/acknowledgements.md | 0 docs/before-you-begin.md | 30 +++++++++ docs/first-time-login.md | 4 ++ docs/get-pull-secret.md | 2 + docs/get-the-repository.md | 36 +++++++++++ docs/images/ansible-logo.png | Bin 0 -> 8020 bytes docs/images/overview.png | Bin 0 -> 246219 bytes docs/index.md | 8 +++ docs/prerequisites.md | 47 ++++++++++++++ docs/run-setup-playbook.md | 6 ++ docs/run-the-playbooks.md | 21 +++++++ docs/set-variables.md | 5 ++ docs/teardown.md | 13 ++++ docs/troubleshooting.md | 21 +++++++ docs/vault-and-tags.md | 23 +++++++ installation_instructions.md | 89 -------------------------- mkdocs.yaml | 45 +++++++++++++ 20 files changed, 376 insertions(+), 290 deletions(-) delete mode 100644 CHANGELOG.md create mode 100644 docs/CHANGELOG.md create mode 100644 docs/acknowledgements.md create mode 100644 docs/before-you-begin.md create mode 100644 docs/first-time-login.md create mode 100644 docs/get-pull-secret.md create mode 100644 docs/get-the-repository.md create mode 100644 docs/images/ansible-logo.png create mode 100644 docs/images/overview.png create mode 100644 docs/index.md create mode 100644 docs/prerequisites.md create mode 100644 docs/run-setup-playbook.md create mode 100644 docs/run-the-playbooks.md create mode 100644 docs/set-variables.md create mode 100644 docs/teardown.md create mode 100644 docs/troubleshooting.md create mode 100644 docs/vault-and-tags.md delete mode 100644 installation_instructions.md create mode 100644 mkdocs.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index c4a95429..00000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,118 +0,0 @@ -# Changelog -All notable changes to this project will be documented in this file. - -## Table of Contents -* [Roadmap](#Roadmap) -* [Automated KVM Host Provisioning](#Automated-KVM-Host-Provisioning) -* [Infrastructure Nodes and Extra Apps](#Infrastructure-Nodes-and-Extra-Apps) -* [Scaling](#Scaling) -* [Automated OCP Verification](#Automated-OCP-Verification) -* [Automated Bastion Install](#Automated-Bastion-Install) -* [First Working Build](#First-Working-Build) -* [Initial Commit](#Initial-Commit) - -## Roadmap -* Add option to use a VPN to reduce # of IPs needed -* Add the ability to provision multiple LPARs for high availability -* Tag infrastructure nodes for specific operators -* Add air-gapped (disconnected) install option -* Add option for OpenShift to use a proxy server -* Add picture of finished infrastructure to README -* Add README’s for each role -* Make ssh-copy-id role idempotent - -## #Automated KVM Host Provisioning -Version 1.4.0 \ -Released: 2022-03-26 -* ### Summary - * Now able to provision the KVM host via Ansible. - * Changed the structure of playbooks, variables, and inventories to use Ansible best practices. -* ### Added - * Support for using IBM's zHMC Ansible modules to automate the creation of a logical partition (LPAR) profile, connect storage group and network card, boot from an FTP server, and then kickstart the installation of RHEL to serve as the KVM hypervisor for the cluster. - * Usage of Ansible vault to encrypt sensitive data. Playbooks must now be run with --ask-vault-pass, e.g. 'ansible-playbook playbooks/site.yaml --ask-vault-pass' -* ### Modified - * Bastion boot method from cloud-init to FTP and kickstart. - * The structure of playbooks. The setup.yaml playbook still must be run before anything else, but now there is a master playbook - site.yaml which imports all other playbooks. This was done to be more user-friendly and in-line with best practices. Previously, everything was all in one playbook and relied on tags to start back from a given point. Relying solely on tags proved tedious. - * The structure for inventories, which allows for more flexibility with deployments and is more in-line with best practices. Now you can have multiple inventories and specify which you would like to use for a given run in the ansible.cfg file. - * The structure of variables, to allow for the separation of the bastion node from the rest of the cluster. This opens up many more possibilities for more complex deployments where, for example, the bastion node is already created. - -## Infrastructure Nodes, Extra Apps, Security -Version: 1.3.0 \ -Released: 2022-01-06 -* ### Summary - * Now able to designate compute nodes as infrastructure nodes, and create optional RHEL VMs for additional non-cluster applications running on the KVM host. - * Made changes to SSH and SELinux tasks to be more secure. -* ### Added - * Support for creating infrastructure nodes and extra apps. - * Added tcp port 53 to firewall. - * Setting of permissions and ownership of important configuration files to bastion admin user instead of root. - * Wheel to groups that bastion admin user is added to on boot. - * More rounds of checking cluster operators and CSRs in verification steps to ensure the playbook doesn't fail if it takes a long time for those steps to complete. - * Task to httpd to allow port 4443 because SELinux is no longer set to permissive (see '[Removed](###Removed)' below). -* ### Modified - * Formatting of README file to be prettier and more useful. - * env.yaml to have two sections separated by a comment block: one for variables that need to be filled out, the other for pre-filled variables that can be modified if desired. - * Ansible user from running as root to an admin user with sudo privileges. -* ### Removed - * The need to run anything as root user for security reasons. - * set_selinux_permissive mode role for security reasons. - -## Scaling -Version: 1.2.0 \ -Released: 2021-12-09 -* ### Summary - * Now supports any number of control and compute nodes to be provisioned in the cluster. - * This update heavily modifies the variable structure in env.yaml in order to make scaling work. -* ### Added - * Support for scaling of control and compute nodes. -* ### Modified - * Variable structure in env.yaml in order to support scaling. - * Tags to match their corresponding role. - * Every reference to a variable from env.yaml to match the new structure. - -## Automated OCP Verification -Version: 1.1.0 \ -Released: 2021-12-03 -* ### Summary - * Fully automated all OCP verification steps. Cutting the number of steps nearly in half. The main playbook can now run completely hands-off from kicking it off all the way to an operational cluster. The last step provides the first-time login credentials. -* ### Added - * 5 roles related to automating OCP verification steps: wait_for_bootstrap, approve_certs, check_nodes, wait_for_cluster_operators, and wait_for_install_complete. - * Role to check internal and external DNS configuration before continuing. Including checking to make sure the name resolves to the correct IP address. -* ### Modified - * The mirrors for CoreOS versions to update to 4.9 and tested them. - * The acquisition method of RHEL qcow2 from downloading via ephemeral link to having the user download the file to their local machine as a pre-req. This was changed to avoid having to re-copy the link every time it expires. - * teardown.yaml and reset_files role to be fully idempotent when running the main playbook from the point where each type of teardown sets the user back to. - * Lots of small tweaks. -* ### Removed - * Instructions in README for doing OCP verification steps manually - -## Automated Bastion Install -Version: 1.0.0 \ -Released: 2021-11-24 -* ### Summary - * Fully automated bastion installation and configuration using cloud-init -* ### Added - * Options in env.yaml for creating a DNS server on the bastion or not, and for automatically attaching Red Hat subscriptions - * Variables for bootstrap, bastion, control and compute nodes' specifications in env.yaml - * Node name variables in env.yaml - * Variable for network interface name in env.yaml - * Variable for DNS forwarder in env.yaml - * Templating of DNS configuration files so they don't have to be pre-provided - * Expect script to ssh_copy_id role so that the user doesn't have to type in ssh password when copying ssh key - * Templating of haproxy config file - * A boot_teardown tag in teardown.yaml to automate the teardown of bootstrap node -* ### Modified - * create_bastion role to use cloud-init to fully automate configuration and installation of the bastion node - * teardown.yaml script to decrease complexity and work faster. - * Some tags to match their corresponding role names - * Lots of small improvements and tweaks -* ### Removed - * Encryption of env.yaml as it was unnecessary and increased complexity - -## First Working Build -Version: 0.5.0 \ -Released: 2021-08-24 - -## Initial Commit -Version: 0.0.0 \ -Released: 2021-06-11 \ No newline at end of file diff --git a/README.md b/README.md index 7af8266f..f57e0065 100644 --- a/README.md +++ b/README.md @@ -1,83 +1,2 @@ -# Ansible-OpenShift-Provisioning - -## Table of Contents -* [Scope](#Scope) -* [Supported Operating Systems](#Supported-Operating-Systems) -* [Installation Instructions](#Installation-Instructions) -* [Pre-Requisites](#Pre-Requisites) -* [Troubleshooting](#Troubleshooting) -* [Teardown](#Teardown) -* [Tags](#Tags) -* [Vault](#Vault) - -## Scope -* The goal of this playbook is to automate the setup and deployment of a Red Hat OpenShift Container Platform (RHOCP) cluster on IBM Z / LinuxONE with Kernel Virtual Machine (KVM) as the hypervisor. This is a user-provisioned infrastructure (UPI) installation of RHOCP. -* These playbooks assume a basic understanding of the command-line. Using them requires near-zero experience with Ansible, unless you want to customize them. - -## Supported Operating Systems -(for local workstation running Ansible) -* Linux (RedHat and Debian) -* MacOS X - -## Installation Instructions -A step-by-step guide can be found [here](installation_instructions.md) in the installation_instructions.md file. - -## Pre-Requisites -* A Red Hat account ([Sign Up](https://www.redhat.com/wapps/ugc/register.html?_flowId=register-flow&_flowExecutionKey=e1s1)) -* A [license](https://access.redhat.com/products/red-hat-openshift-container-platform/) or [free trial](https://www.redhat.com/en/technologies/cloud-computing/openshift/try-it) of Red Hat OpenShift Container Platform for IBM Z systems - s390x architecture (OCP license comes with licenses for RHEL and CoreOS) -* Hardware Management Console (HMC) access on IBM Z or LinuxONE (390x) -* Must be Dynamic Partition Manager (DPM) enabled -* An FTP server with RHEL iso mounted -* For a minimum installation, at least: - * 6 Integrated Facilities for Linux (IFLs) with SMT2 enabled - * 85 GB of RAM - * An FCP storage group created with 1 TB of disk space - * 8 IPv4 addresses -* If you are using MacOS for your workstation running Ansible, you also need to have: - * [Homebrew](https://brew.sh/) package manager installed: - ~~~ - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" - ~~~ - * Updated software for command line tools: - ~~~ - softwareupdate --all --install - ~~~ - ~~~ - xcode-select --install - ~~~ -* [Python3](https://realpython.com/installing-python/) and [Ansible](https://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) intalled on your local workstation \ - * Mac: - ~~~ - brew install python3 - ~~~ - ~~~ - brew install ansible - ~~~ - * Linux: - ~~~ - sudo apt install python3 - ~~~ - ~~~ - sudo apt install ansible - ~~~ - * or (depending on your distribution), - ~~~ - sudo yum install python3 - ~~~ - ~~~ - sudo yum install ansible - ~~~ - -## Tags -* To be more selective with what parts of playbooks run, use tags. -* To determine what part of a playbook you would like to run, open the playbook you'd like to run and find the roles parameter. Each [role](roles) has a corresponding tag. -* This is especially helpful for troubleshooting. - -## Vault -* The setup.yaml playbook encrypts passwords entered into the [master variables file](inventories/default/group_vars/all.yaml) for security. -* The sensitive data is transferred to the [vault](vault.yaml) and the variables are redacted from the original variables file. -* To encrypt/decrypt the vault to view its contents, run either of the following commnds: - ~~~ - ansible-playbook playbooks/vault.yaml --tags decrypt --ask-vault-pass - ansible-playbook playbooks/vault.yaml --tags encrypt --ask-vault-pass - ~~~ +# Ansible-Automated OpenShift Provisioning on KVM on IBM zSystems / LinuxONE +The documentation for this project can be found at [here](https://ibm.github.io/Ansible-OpenShift-Provisioning/) \ No newline at end of file diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md new file mode 100644 index 00000000..e7e5ee8e --- /dev/null +++ b/docs/CHANGELOG.md @@ -0,0 +1,113 @@ +# Changelog +All notable changes to this project will be documented in this file. + +## Roadmap +* Add option to use a VPN to reduce # of IPs needed +* Add the ability to provision multiple LPARs for high availability +* Tag infrastructure nodes for specific operators +* Add air-gapped (disconnected) install option +* Add option for OpenShift to use a proxy server +* Add README’s for each role +* Make ssh-copy-id role idempotent + +## Documentation Overhaul +Version 0.3.1 +Released: 2022-06-03 +### Summary +* Moved documentation to GitHub Pages to be more reader-friendly. + +## Automated KVM Host Provisioning +Version 0.3.0 +Released: 2022-03-26 +### Summary +* Now able to provision the KVM host via Ansible. +* Changed the structure of playbooks, variables, and inventories to use Ansible best practices. +### Added +* Support for using IBM's zHMC Ansible modules to automate the creation of a logical partition (LPAR) profile, connect storage group and network card, boot from an FTP server, and then kickstart the installation of RHEL to serve as the KVM hypervisor for the cluster. +* Usage of Ansible vault to encrypt sensitive data. Playbooks must now be run with --ask-vault-pass, e.g. 'ansible-playbook playbooks/site.yaml --ask-vault-pass' +### Modified +* Bastion boot method from cloud-init to FTP and kickstart. +* The structure of playbooks. The setup.yaml playbook still must be run before anything else, but now there is a master playbook - site.yaml which imports all other playbooks. This was done to be more user-friendly and in-line with best practices. Previously, everything was all in one playbook and relied on tags to start back from a given point. Relying solely on tags proved tedious. +* The structure for inventories, which allows for more flexibility with deployments and is more in-line with best practices. Now you can have multiple inventories and specify which you would like to use for a given run in the ansible.cfg file. +* The structure of variables, to allow for the separation of the bastion node from the rest of the cluster. This opens up many more possibilities for more complex deployments where, for example, the bastion node is already created. + +## Infrastructure Nodes, Extra Apps, Security +Version: 0.2.1 +Released: 2022-01-06 +### Summary +* Now able to designate compute nodes as infrastructure nodes, and create optional RHEL VMs for additional non-cluster applications running on the KVM host. +* Made changes to SSH and SELinux tasks to be more secure. +### Added +* Support for creating infrastructure nodes and extra apps. +* Added tcp port 53 to firewall. +* Setting of permissions and ownership of important configuration files to bastion admin user instead of root. +* Wheel to groups that bastion admin user is added to on boot. +* More rounds of checking cluster operators and CSRs in verification steps to ensure the playbook doesn't fail if it takes a long time for those steps to complete. +* Task to httpd to allow port 4443 because SELinux is no longer set to permissive (see '[Removed](###Removed)' below). +### Modified +* Formatting of README file to be prettier and more useful. +* env.yaml to have two sections separated by a comment block: one for variables that need to be filled out, the other for pre-filled variables that can be modified if desired. +* Ansible user from running as root to an admin user with sudo privileges. +### Removed +* The need to run anything as root user for security reasons. +* set_selinux_permissive mode role for security reasons. + +## Scaling +Version: 0.2.0 +Released: 2021-12-09 +### Summary +* Now supports any number of control and compute nodes to be provisioned in the cluster. +* This update heavily modifies the variable structure in env.yaml in order to make scaling work. +### Added +* Support for scaling of control and compute nodes. +### Modified +* Variable structure in env.yaml in order to support scaling. +* Tags to match their corresponding role. +* Every reference to a variable from env.yaml to match the new structure. + +## Automated OCP Verification +Version: 0.1.1 +Released: 2021-12-03 +### Summary +* Fully automated all OCP verification steps. Cutting the number of steps nearly in half. The main playbook can now run completely hands-off from kicking it off all the way to an operational cluster. The last step provides the first-time login credentials. +### Added +* 5 roles related to automating OCP verification steps: wait_for_bootstrap, approve_certs, check_nodes, wait_for_cluster_operators, and wait_for_install_complete. +* Role to check internal and external DNS configuration before continuing. Including checking to make sure the name resolves to the correct IP address. +### Modified +* The mirrors for CoreOS versions to update to 4.9 and tested them. +* The acquisition method of RHEL qcow2 from downloading via ephemeral link to having the user download the file to their local machine as a pre-req. This was changed to avoid having to re-copy the link every time it expires. +* teardown.yaml and reset_files role to be fully idempotent when running the main playbook from the point where each type of teardown sets the user back to. +* Lots of small tweaks. +### Removed +* Instructions in README for doing OCP verification steps manually + +## Automated Bastion Install +Version: 0.1.0 +Released: 2021-11-24 +### Summary +* Fully automated bastion installation and configuration using cloud-init +### Added +* Options in env.yaml for creating a DNS server on the bastion or not, and for automatically attaching Red Hat subscriptions +* Variables for bootstrap, bastion, control and compute nodes' specifications in env.yaml +* Node name variables in env.yaml +* Variable for network interface name in env.yaml +* Variable for DNS forwarder in env.yaml +* Templating of DNS configuration files so they don't have to be pre-provided +* Expect script to ssh_copy_id role so that the user doesn't have to type in ssh password when copying ssh key +* Templating of haproxy config file +* A boot_teardown tag in teardown.yaml to automate the teardown of bootstrap node +### Modified +* create_bastion role to use cloud-init to fully automate configuration and installation of the bastion node +* teardown.yaml script to decrease complexity and work faster. +* Some tags to match their corresponding role names +* Lots of small improvements and tweaks +### Removed +* Encryption of env.yaml as it was unnecessary and increased complexity + +## First Working Build +Version: 0.0.1 +Released: 2021-08-24 + +## Initial Commit +Version: 0.0.0 +Released: 2021-06-11 \ No newline at end of file diff --git a/docs/acknowledgements.md b/docs/acknowledgements.md new file mode 100644 index 00000000..e69de29b diff --git a/docs/before-you-begin.md b/docs/before-you-begin.md new file mode 100644 index 00000000..362d9d85 --- /dev/null +++ b/docs/before-you-begin.md @@ -0,0 +1,30 @@ +# Before You Begin +## Description +* This project automates the User-Provisioned Infrastructure (UPI) method for deploying Red Hat OpenShift Container Platform (RHOCP) on IBM zSystems / LinuxONE using Kernel-based Virtual Machine (KVM) as the hypervisor. +## Support +* This is an unofficial project created by IBMers. +* This installation method is not officially supported by either Red Hat or IBM. +* However, once installation is complete, the resulting cluster is supported by Red Hat. UPI is the only supported method for RHOCP on IBM zSystems. +## Difficulty +* This process is much easier than doing so manually, but still not an easy task. You will likely encounter errors, but you will reach those errors quicker and understand the problem faster than if you were doing this process manually. After using these playbooks once, successive deployments will be much easier. + * A very basic understanding of what Ansible does is recommended. Advanced understanding is helpful for further customization of the playbooks. + * A basic understanding of the command-line is required. + * A basic understanding of git is recommended, especially for creating your organization's own fork of the repository for further customization. + * An advanced understanding of your computing environment is required for setting the environment variables. +* These Ansible Playbooks automate a User-Provisioned Infrastructure (UPI) deployment of Red Hat OpenShift Container Platform (RHOCP). This process, when done manually, is extremely tedious, time-consuming, and requires high levels of Linux AND IBM zSystems expertise. +* UPI is currently the only supported method for deploying RHOCP on IBM zSystems. +## Why Free and Open-Source? +* Trust: + * IBM zSystems run some of the most highly-secure workloads in the world. Trust is paramount. + * Developing and using code transparently builds trust between developers and users, so that users feel safe using it on their highly sensitive systems. +* Customization: + * IBM zSystems exist in environments that can be highly complex and vary drastically from one datacenter to another. + * Using code that isn't in a proprietary black box allows you to see exactly what is being done so that you can change any part of it to meet your specific needs. +* Collaboration: + * If users encounter a problem, or have a feature request, they can get in contact with the developers directly. + * Submit an issue or pull request on GitHub or email jacob.emery@ibm.com. + * Collaboration is highly encouraged! +* Lower Barriers to Entry: + * The easier it is to get RHOCP on IBM zSystems up and running, the better - for you, IBM and Red Hat! + * It is free because RHOCP is an incredible product that should have the least amount of barriers to entry as possible. + * The world needs open-source, private, and hybrid cloud. \ No newline at end of file diff --git a/docs/first-time-login.md b/docs/first-time-login.md new file mode 100644 index 00000000..34bc1400 --- /dev/null +++ b/docs/first-time-login.md @@ -0,0 +1,4 @@ +# Step 6: First-Time Login +* The last step of the main playbook will print a URL, username and temporary password for first-time login. +* Use a web-browser to type in the URL, which should take you to a sign-in page. Use the provided credentials to sign in. You will have to bypass a warning screen. +* Congratulations! Your OpenShift cluster installation is now complete. \ No newline at end of file diff --git a/docs/get-pull-secret.md b/docs/get-pull-secret.md new file mode 100644 index 00000000..2b8a8379 --- /dev/null +++ b/docs/get-pull-secret.md @@ -0,0 +1,2 @@ +# Step 2: Get Pull Secret +* In a web browser, navigate to Red Hat's [Hybrid Cloud Console](https://console.redhat.com/openshift/install/ibmz/user-provisioned), click the text that says 'Copy pull secret' and save it for the next step. \ No newline at end of file diff --git a/docs/get-the-repository.md b/docs/get-the-repository.md new file mode 100644 index 00000000..55597639 --- /dev/null +++ b/docs/get-the-repository.md @@ -0,0 +1,36 @@ +# Step 1: Get the Repository +* Open the terminal + * On MacOS: cmd+space to open spotlight search, type in 'terminal' and hit enter +* Navigate to a folder (AKA directory) where you would like to store this project. + * Either do so graphically, or use the command-line. + * Here are some helpful commands for doing so: + * `pwd` to see what directory you're currently in + * `ls` to list child directories + * `cd ` to change directories (`cd ..` to go up to the parent directory) + * `mkdir ` to create a new directory +* Copy/paste the following and hit enter: +`git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git` +* Open the newly created folder (`cd`) +* All in all, it will look something like this: +``` +$ pwd +/Users/example-user +$ mkdir ansible-project +$ cd ansible-project/ +$ git clone https://github.com/IBM/Ansible-OpenShift-Provisioning.git +Cloning into 'Ansible-OpenShift-Provisioning'... +remote: Enumerating objects: 3472, done. +remote: Counting objects: 100% (200/200), done. +remote: Compressing objects: 100% (57/57), done. +remote: Total 3472 (delta 152), reused 143 (delta 143), pack-reused 3272 +Receiving objects: 100% (3472/3472), 506.29 KiB | 1.27 MiB/s, done. +Resolving deltas: 100% (1699/1699), done. +$ ls +Ansible-OpenShift-Provisioning +$ cd Ansible-OpenShift-Provisioning/ +$ ls +CHANGELOG.md README.md docs mkdocs.yaml roles +LICENSE ansible.cfg inventories playbooks vault.yaml +``` +* All you need to run Ansible is a terminal and a text editor. +* However, an IDE like [VS Code](https://code.visualstudio.com/download) is highly recommended for an integrated, user-friendly experience with helpful extensions like [YAML](https://marketplace.visualstudio.com/items?itemName=redhat.vscode-yaml). \ No newline at end of file diff --git a/docs/images/ansible-logo.png b/docs/images/ansible-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..16b6dd614e97db32428e8703463901c94b16a426 GIT binary patch literal 8020 zcmbVxXHZjJ)NVoogr3lQLMV!aB7#T{F-Q{xq?bq&P`dPLzz`BC#zq$qM0&4Mq$>zW zQ>24PM?tEB0yn;Q=KFoWduH~Wwf8({)!o)QWMd;87Dj$X006+Er;9ZO06>U;4FaN$ zd@nz=p}w4rubXL8-~X%U?Cgw+-`?KN&dzplaDYOg2n1q&ejbfR%gMS&#pz+YQ?QDnnAvuKaqYC4>JO?BIAWYH2YMF)L zzQ3RPsX=~_xo`` zasf8O3O62@n{rB~kGPyQ58QW>e-t7=q2b16>08p|sNr>V@cKZI9<)63D3o{l4ml6M zT)RAyuqk+pZ6)Y~bMcAtTlRs?)RbNb>IY861>EZV!Pex=2OjO%KP`V+@rHXKa!{a_ zWoI&n`Q}`4lMGN;_)n9F1_QJG+}kM`zd+-ui`&@wZu5@O)!TEmIqSP~30E16)q_6Q zcelkj4%i>7Ec$&pzKR9!Kw;fiu~4=^p>)Y$GUQPBk*nY$pi)t@w2Ex0b!X2c(EqOx zvm$)LhUVEL`vXqZoc+E}-QvE*daj4!QS|;PDWO9#)hn!fDEBB;ON>BHW?PdI!QW6u zRy)Tt^tLcEBqD_55YnUYK3lG(^Gf{XTRIKhqE2bM(uJ!bpj!Weno0qzrCZjndg0ZD zDFm*DWaUq9fqW(z?2xW~)1N+>n7R9HO@Bu5I6{dNb^y{Qyjjuw#`(v$k)WYLcVK6( zqwZ$o#N(ZHbM-fGLIrb;Z%B%WaN3T{yVl)kKiR9o@dN~1#zX{uU0epOL4l@tJchnE zpg@n%7z!JMmWCMk#111j{~Wq>l{!%0NWUBXG+rST#UAqqUg|f!1Zwb=bQY!%7XFA2 z`1yKogLivwq!VYr9MzzY7=Z|yZKj{)$&p|~p1=dE=QV(QY*hRj0K z1dU_3N{?9lZROkeMet7Y8S{>y;MP_At+F}Ll0*5CJ$ONMKki7qzU=o6?%~{t45go+ zWj;k~TK%{?=T!7-M>1#G?UIlD`0+WW!lJE#=|&q{BYZM(wk46}`Dq``Twm1+gIYGR z8>nd`6tc&xeydOic*|BCwIWI6hyMoOjk>lcSpJS5k>mC$?cDl#WfM^BfufbPAvUt353B#RjR3-YU!m-a;#X^>z6veXXQ1%T1WTkXuIu`v$3})tv#(n}Y zyt=eHp(mngU za*h_0e3*e-NNU(b5>Vb@UvJtzC)TMucWF-D?<$G?*;-C|TVGaz=?(M3%RJ>}+4O~# zp?gh_B?NLDg)aY@z8G0DwoZ#YewkOuQ_a7i*Qv|m5gue)?GeA;&K()m6d7EG={=q# zMP~Tz(v9WI)Xhv8=M zt_wWX`vUPl^DeNIJTlmSRIGslGnTA2s(uxo#j?3eJPmy+BVWzDfA*8D1j*tNADQo! z*8+UpK&2Cfi&*n5=}9deEqd_ncSAGI>v+}DBdph-s|XjrM?J~LwgMKxi=TGBGb-Za z>e4Tfr#~CJoKykTO2v`6Iz9U^c)XHU@o?p_e^g6Xs4RRV>X1Yd<{JZjR84GARMVe$ z(IOc2)827?czj`SGQY7ztM~4OI3>2%V{B|m8?h)H25lyYM27wK^n%3O?G}Hjbd469 z_Z#UKueit39}p9vX&E}gq>W@qNwwljc8OS@(9X z^(@VTVfNwI&k+{V0!Co!8n0C`5E!?^)#ERD3+T0nwRSI-n~oE z?|J_e_9FH4Wf63o(0V=>9ukY;hQEF~@`Xf_NnB@oi}b+6@{b3l$*JR+rOP~RfugJkD*RbpVjM+Udt&GO zAWDE~W4JO|OvyRRl_M*8^rhpGSo3po_}pJDbGz&5mgc>Ud#q*9e^5+^!-j+w(ANSwptPEXQ$@z?Yw+iq4 z+a}Vo%AClOfy(W74vYJl^c`1EtP`8n+G2))OzVluPjQc}3BiX1Z4ax`ar}4O zl(|aF-7l4g0+*#*r+~m8{K@NEjS#CkR*3tT%Gb8#Pn60)G*hBYp)=J9-=8%}ok#B1x6?h0-}*sM}(thTnBIs!30(h|evWW*@Rw^Fkl$w36KzC7r{D z@JpbcyjJCCp_}jaqp4=y8?Suw@SMNae{8S#v-6Cqch6l!gtp@YRupcgc#64E73s6= z(nn=6_du+vd`ATs{EJh!_$Y!7=$6M-u?q9QWd4b3Ad1T`EMJUtLGw~DqD^*ce%k-x z7YAs+>p6uvpna?4$14eREo!edd7*T#hDkC!H-u^b!^o$V{QLs92T@I1Dt7Ga;RyXb z`9GL21eJ*S@^4u zh1>?%D6l}>KUF5{IkbP_7;97T0r4xM;Iet&dENyStQ~5kQDrO#6{S(&tV0d$0MI=97QZHo-F+KxVx>R~Q3aH&QisBr7fCe>h3H6XWqDn|QnF+`T$GwX_-zYN+YD`L&eGAF(2F z;n#i4Ixft<*A57UPTuwjGh*(2Q7M_3@I5+7&fdMfVwuUhB2i+3GwI&+aF3;|zEhL) zjo#OCHJ3|wHa?olT5r%F686()iX8jS4j&y}*m-q{kHPvogEc{oy52akueSoOZ(Bd8 zg!JikRf}l*6$u$Bfh)Y;MR;X_qVA4V+I3rtDJ}f*B%sC-Gke~Pq$s%wgYKs~GSrjV zz;9^U9y-F-wEl3T$3Y%e-MGA*72I~Hn7J@OOV`qm?ZpJWU#%M@$*>{+6s2x)?I(3+ zUr<2tq#T#osjb&Qt9?UvLJRtl#~oM5EWdSFG2F$-ro|9*EYG;I6&yY$lw!5^(AwSV zG^wH3t_99*%r%Dbxhe;Psry0GFyBWG+q*AhW2n~%Ta82c9hVChy~m&3qo2*`VE3&$ zk?c1jsxadX*BCghDj1MWuReA(LJ#cv#$9_2IDiq7mR(O|ms+?;=e8naQSvI1tsK~W z7cG}W^13RYy3nhAbuBE<7v)rCdn8?!YBg)OmrWmpyi;gRJ) z)_1Wdkpz?(4%3Sut^J~X+|d$G(FThUJgGr>Z)rCEHtxS}fCN?XPs+LzWQ65aNE!z= zKB-0}9MdN1drRszeuBIn>Qc16^E+x(VH`XS9*^ap-yTG*jWr2Q7}aG?o_ zNV>Dl_a5h5+AqCfoT?&*i&8!G)kq}X04EdHknykg5-PIk@6R0~Y1p;fP%gDRzR}}fKS=E@2qlx z31)AzO2PGF%&=AgM{f*gpOGoCg}wJn9jGqfLoB}OGOd#m%Y_eEL#$^UtE9eDqwBdL zNViq#QYj_Kj@^&g5VY$P=8iz*7L@z1{j3yQXbHBSPx-hl<>J`%tz~GJlyzWf7TLG0 z-=TyI+fbpub6KibpIpxiwpInZ18y17W;EOPB6B&$!kr(6norK7Two*Q^#_Qh zWKOIg?1b5K?1ddk`kPd7lC;Y7z8Sy8yz5opUkuisBa%L;Oysp)mAI^G;*%pZ_J;jx zC9>&#%>G+O8t*$=E~~R2t)SK1p;*}!sbXOkgB=(KoAO0DK>WTWExabs1@1*Gz3d11 zn#5C}G!gS}_By`%mKY=2&4dh#)^b?^kYjFtHFLpgPSgldwhfPlDT~umB+AIpuY<18B9xeDPQ`)L$R4>T}M2i-0vzi7ttvWrEfGc{scJt!@O6&8^~xm0cHjOb=%ty_$~c z;-6YfEjyCx*PSkuIn~ufblt3_Wj~ci_M`0&fI`;8bZsZ$*S;UFO{pHyhX>bkh%jS; zX6JjVD?KCj&f3g4;MWxrRe%ipp7%7qx~g81_J0yd|2j6RLlb=oGlo&C{k$P=6mMC= zBaI7Gd28V%L_=@Gy-;J~w_zMPKe-+9P*awgsd6@@Zim2uz##Gw+< zg4LuD2>vQJs}-yD6$fRPmk*XHv9On_*1WJ|Lz)Fv?!X2V02m~s;0(h9;1BnjD0$1) zfc>zGm|qG6fDrCB1UGr%sxkgNwWSgrXq?JeB@h$j1x9X}+b_(#*BFy&N30G@jo%ZD zUnc3S;coa<>P447k7YRK6E8!tpk|au+A&i7NJ%x;sv?d-91Bhi%z_)lQS5&HHO0Vtdr}6;alQ0ghac$K z7b7_}-kgVr6hsA;MA&^0Wx{X&aZrxg?t)zsAbE-lWZyF32cyGze$2fvJQQ$ueHr=r zl~IxR8A3mBSy@X@gx&JlDvK2u;cgst9$UCU+b(+H?(S8x`?Y0;$|A0S&jrFw4K^4<>8a z3Nv)d?0+vsSyYYSHP`1vb1&1gl+Bb3l0#$`fnP}^dZc_UFPm{W&yEAf?3Af9PXa4F z8{MSYhOAk|qs!VRFL6Vys+QlsU0xZ>E_){Y{k*iB#+}wGXW7fl59D|WUf(3Zd&a+R z)eD#vPrZW9Qi~I3YDR5k0Q&>J9reB=d@X$F^Ah(xE(A(8(1zs=xA7D*zV&RimgXIg zKS2d8`U&_fxnM@9<`WQm(05u&902P&_xq~}wGyvGNKC)$Yr(jl7UT`jg@iYq-t2J=qli_l%(M+pSxX!z&JA7Zv;E&>s_w|Reedqt*YlCW7 z`HjV6^5CXSsn?ft>-iaxlg$l6?@bWQ?U|40;(y(reC}E|X>{6nF6g{z&Zk-3Jn!j2 zx8@u>-Sy8y6R(~<-7~JQ<94wWE`M>FW<85+laBJQQcrZq-1T`;5^;8=#Q%nAa7G{Hvrg1bO^}+sEjrz-2fPT|W~eJ2Vo+Zm@h!6U$aLTKJ106_fJO{%w024k$;{iMAN6!%=!WBVZ3*P) zg+h5Tbb0}O%p0FVeSIsNU&W~azR?1FVZn~48SNlcw;tUUO4hlsTyCZq)M<@7z^gt5 z+#w6=HjO#tPEO==yvZ-8dOb?lj_v3iI)^J_{9rGF-!`Cz_ zOP;w4y^eQq54<5(143?xhE&-(B;CG)uhnN=S`6oaBY2fD67Ojt8t6=JCc1L#P=18r zV_=8NT@DWbtf})MiztHcw+zo4Ei4jYI7&0=SRDpLV00RTcYxJpo1pn=(9&3Iw3Q(U z92a9l12IxJb46eH>u1S;>Z(?qVG0A?@#BQP0YfvxD+lo4#&cyExy^955;DH_o^UTu zFKgxmAeyg!d2u!TGo7S&Fk ZkHCx|E1)K$fm9I($Iii*W@9P{Ut6&u(zHQ+QkZa zo#1-FIXi#ivCV+xKW{4rtk#lOW$xwCwH}RooBcpnuKgG=!tDy8PKQyYMOi5Go3NyE zVWJlSI8NIeiK#XLj(81=E-``Kat1dfkGnE0))|(*swQH=SZs)K8BnbHMURbw-ut62 zNC`4Bn#6cJ9*m9viQEKroO52H0k5y*_gAV({D_QR1E}9R2EFMIOdqKMY5j`jgEj=P zKdHKo=x8nGfIdb&lRl)4xiw>>sS3TWl?t3pkx`V?G!2MweI14VdE!<8L^?GyJzb%T zQB3v3JCy0!S28nQ3_*_pA(sWK(!h`?z{YM1`e*CN9x&$bRJ|vkMMUwnU!a(VU%DWy zSiBcQ0bQ=B8u|)wHT2Eq9>i)5K3{pMn<$^Kg7}g`bp@YM9$1z_Nyy;{gs`L&dW0#kd=Vv zj3nJEM+MO%fU?jv4l4=RJwb^Z*2Fp?nMZ}?=*ouaaSn*LWE|{oDVlNh2U|d-3F$LO zvZG>TOZyxTL?*}piFn_9owni}*eQ5LV9hhA&P$8=)f&CH4b_COml6OPNtuCTBl=g` z71p3W*Qys^c74p@!XFDbpabj7Kqx!%TXc>`J-XV~+oN-L^DT$*gHa}kj2R~*VjsH} z?EuyT5l&C;mWN4~GR-Gd6o~?8ZpX)=&y#22}Wb14?01J zXV{obR^9k?A*hh$%=F>TD<_qW^KTC5Vs6JbU4kYoWw95fsY5wCS#;H=ZZYO%@ zRSjuGL(h)j@8^a=BKhwq&E-opX3ilmAy^wyo=81&bS!*u8#^fgVLwnn`^xe&=Y`TJ z8|5-eAyl93l|qjjsdeeQKUl&2`J*ss81nKCUCcDa;2OI2_AR|pzltSFR{2vG8pQC* z5(J_S+q4FCV6XfH#5BC|>hQKw?6ivBDRIX;Od@Icu6EbZ#bDIv5vlsew9Sr+M{;EQ zpPGsWAnP*lbCbWTZm?xY z&LCaa!Y}};e$1eln+2KCIYr&9B)cjBiz{~V)$F4IGzhDD2%2cF;C6~erv6o+G$APg zau-vOoLQdeqDxf9@(_r|Te15Hq7bwO38kOU)x3c%CcZ<1(K()eVo(^|oBI}G{v7G_ zws(!1S^L%WIUz{BG9MWH^!h2f6M&c(bos;5+I9om;@QwEhW33ID8SG?1OywdZ=<*) z`7C~K&bUbdWdJ~^i9k0m;?H$-P}EpURBP=1iLB4;)C@yaJDG-*lo!1)yb}@au)2!%cfz59h^Tg-SyPVlG?`nVoh6_?r->ZBb5f z7#ywbsPUQ_eum3u?zf*%}|5S=VP<=!A+VQjcW2)k59-iAgM?y!k;#_e1g@zG>Sf2l9;v zl|?2?&>}}O(4Nhlm&u{{kPdnXV_NfI_!afh+YPKmaOB1d4FyP8?BE zbF&Sx%om|w=YXa%MXBZrWXMr+`{WBoTQ}f2wD%>-@fp>IH8IJN$EyF zN9bA&tnsLq=aC@`O-xPyV!-#0?4Eb8(R9Cnn>06alZ8pc8w#+?^1eAfmfNgZf~1N0 z2)b?YmXBTc%U|kf^ohRpD_!A-Iw8q?(D2LJ?}J!PhCwjed#g2%BO__`W;LgF-enKN z^!aP_rouNOBS(me`i!n$FKTOMka*Q;NWPU^UE*Rr6V8I;p(pFf#_RdNw49#}ZU5S2 z@volgi{4M;5kmGGrs-=l0ts=1zkKpcO?R&H~CHyfb>9N%R`~&{~CYYFplHWX6_`dRwgPyh#wjARS^M3%a C4gEy` literal 0 HcmV?d00001 diff --git a/docs/images/overview.png b/docs/images/overview.png new file mode 100644 index 0000000000000000000000000000000000000000..21545a9c4bfe542129683f2eceabc808e01c18c1 GIT binary patch literal 246219 zcmeFZc{r5e`!}p2wAm`knq6cpMRv09TZL@dLbmL?iIl7p#=eZKm3=2$DeGWj?1L!` zi80yN;ki}veZRk_{&?T_IG*Q^9LLPu?zy(}I`_|c%^j+xp?LPxO%1ZS0%~}R@w{xr5Tw8;cq#^ zI#XUeF52nb=-KE_ld|sF&9j`DxI|Fl5+?{BpAh&? z5HH?n)EN~Xep2h?b`yLjg+P0KC^gP#pKiZ6lbgJAi-2j4-1^k?uHW+%f_FnpIc3KQ zo~txqrjNg-L_E{iJb5D6D~HlL|NA+%p4(qo@Is+WbkBmwh-=h!WJ>vi;|An}%W{enzE zXTE9sOY_?65aG3bp6>nlUGbL3WVFLJ%m$YA0$}|Qs9LHlXyQrdJXS&Wy9Rd8eLo(* zGk#3^HM6{BOee#Q8MsJEz+=+V{KwRniAwoDeu=41w5gO7RHf^AN>g;vEv)8o3Q6g) zkEer|O9(5w*+)KlRe~ybRa?#S*Ji!S0-97z$u86?+uztXjDC?n({?>vE|iO0_e#~( z^Bir`1*E0n;=)%{90W6!K89W@iRPM&TQZcP13~42Uu~8)^B8#5k2YR6m3#rJQf8fL zeYMnhT-!YRjpQ(wWU*90;%N_gPBV_P-$^hQnZzy>B(fiF*IIyrW3mZvXhJHOSWRwF z=c%kr_OI7GDdmon)l+Hc+w-_#*Ldz0yW z$>ZZyYJ3#a1kS3~{v5kGmr04mnNEyIlcWwL2VUaBvrulT5y*cgdmbz;c@vZkzD;uK zz_8PE%)2$I6!wqV*| zq2@HwN%Ijh?Wp4pFQkl&!@F>HfhcJWO>si9#|tkmSx+jwyR`8*VAzX%b%C^!&V8)& zO-aD#J;Re|JnciVFm9rpGcO5s7XngW&t=Ssv}YEqZLDdxB#Y=f^?YuJ?1`Z7m=f%~ zV!T#LG#s$N<8fRJLjV5TvD0EvH|{5tWT}YPyiH#(E;{D4gkPGz#jqr`yVNfk7YD5y ztnsJ`xEpzaB=0<@FoSib+Z(miYgg}=ei@Q!pH$i8)!-4<6BZ_bqAw53&(oLh(+2T@ z{E*$_1k&#U8yg!xpxv}12^K(4Q2T?zY1lKH$v2O^t5s>db@x@@mrmkijcgU4H3SKB z-%$>~_Zxdm{hjzd)0qguQYMP*prw*d=9(U>`vQsQW*$>&os1810f#5;>6oeNs5|3Ou^9{XzEStqUT-snR_xh3)jy6n%m9@{%uURAkq*sQORRKikf* zzkW(xuI=T>SJn!Wv=Cke?oU_UsPBcOOM^0nKMAc~7>zupcEZAZCQ)$aiF1iQ&sli0 z<*-`8dG5A1CHD#__7FoQLIqSi bG_B5w`ITiIW_K~C{DJ&G!JTxixU3B++z*%sx z=ND5_w;=xa(VrDRQ?`+0HS5d;q>y%kzu5?77mkwiFQXdav32=( zrF3b^sy!_msRSj9B^%_o6M|1cZek*Yy&egq-+sl_ix*Wt>&R z7EAA99?n@LS=IKHjg-MVyBPOX=+$h0} zUK|-5o~X`35sj#q<(JhtYF2ENA}URtz%9qzzq^r(+!i(RkfU#)Yl)htH%ueZXw;z0 zyP9X%bKSbi^`&d%3JZ0vYK3a4Y6E=U?CyuYXV+*wJcjl3G>6283X3+L&7WfwsrT}l z`d;mL&e5-0rFwqRZR62~!Q$CPCviH7U&Dyu`Rm(l! zO=kV!iU^J#>u7|CX^_)%91ohMkr7|Ndun%m3qOZjfXz1{_ghF%6wMTdisGatB%g`w z&)J-|Bzi#Tc*_2?0NEtb9U_TyRc9^|I-L&asj{9e@hmkBsd?6Z>)bQH&zsH<9wpx@ z{q+1(-=}!K@sjM4N;9$#*P6>e*frNNi?ig)Uzew3&SFlGpO{R3v^Dn7aYzXlEvS$l zmMPCBR~YF-hb6xj?nw7E>axhgmRCFcgZwdx+xq12NMni}rXC?jov|;yAA6m%xD}pyZ0V z_nrDhHOHB^UA&SZP09Nz6IotE_(apcXkf0BJlRdTv@#u0&qt9GDbHG2^4Qmnmk(<&P3*VArVndkRw-(z-2nHIJk$mTVG+NaP( zLt=%`e|;~DXM379$r@Aoer_^|Xv#?g!p!m4o zcIq7DL5a0}dT3Wy>WDbpWw71=Th8(TmnfSSD>hqgJ~WAG0e7n>p>859ClLP>9%G+8#FS{xW>K9g+_cBjQk?q$`&Nj9fgd4_t zT^lKZ*4!>zohi$(5{sYb0o~iPYYeNw$zSkb;HzwGr@^sOeK*cNYA&>{^S3H{`K)6S(rG^~Y<0?!=dfRp(#4eeG_%mbqQ^tpC}dEDPBU z;ca0C@dX#J9f}o}IK?#xuZV(#5KbSxlsPr7-j~}$HNug8UDKw~)1|9pK2HJmP=wxn zdo1a@AP1rh!5~HbXtCY9X(fbr)96*xSNGDznAw~sP2PJRB`x_mi)9;_XWWf?mkkvw z80s#&t$nQgWM^IET5MWR;hMGfpsyQM-RD#}esL_onFH68Quox;7uVgWS!22_y(6lQ*^_eZ{63lFLhY3_wK+L{Sf}U|K837S&qar z2{FlSKk*$d402_n1Evh)ZX5>N?p|J73os2Zg+V!{))zG^Tz0*4J+<5>*om6V{dzE<&b|#Gt+%VIc~u3D6|O- zkHop`mJ)Z8h`)p2n+URUqq)QqU-1CRN0^nq@&k2sf~&wa2?61;%LGKg)iK~Dd5re= zwZbt@f)hXPA15FPu_Yk<)kXvOJotM7ybk*O{5%o!jDQ&Uj}mx!WgI_jeNrXk#NjpJ z!Q2Egy0Xg3z^AT-iDH*sD5L{$CcpX#LVf_w_KWuwj-%Vd# zP1M54f!EB^$=r(9%i+<%I0WKeqQIqtm75upmxH~dtEiU*^N$vy!1cjx5Hr({CT?~T z%=+qDOtMZcR!l;?e7t$LyljtVa&-Mg7C<2A;2RJ>FCXal*g#kDgS(Uu z-4y@P;eUK}H06JE)&GyKLi~LH+4VoZ`tPoHT&-MWog9Ek-6W3~_N()Me)+4TIOu@( z|JaM48~x)hu+x&K#6iEgCV7hFikd9INIF|N&D+2yAZ7=D$1ehJoIgK->tk0~6>?-q z2?(SKl;vb@dmURCC#`4KGv-)~ofyh4k0!rGLTX16Ugi{*`8lJkhWQB`?fj17_!*|o zqQ@D6-yWZ0&bS#SZK>;QMkYu1fmtlbo`QsQM0Czyb8l8}4tX0^ztM2Ve+RoW=ntE8 zt*g?#lb0?!&ZvX-{YK|$kscFDaO^lC2|1JWd~KbQUMPFKO> z49f?M4YVA8YC_01`rjn~Zpv4FJSZl~f7LPdPfY;APy8eNzjojs$N!1?f18tkI{x1~ za$sry?=o)p+mDNZ0=~Y;FJAl)ji$2-bVsl3W%;#^h@@p$dZbs1u^n1S{ab*6WLxDM zKlPisz$_*vP7xP5nT}|bNX&1$`7V=`MCD@S#M2`)u9ovSW4fI&9C*XhDgSmn_j?Hm z*}@fY%|~m<)sfb6nTE&%Zyr&shm$jv*Z{N3XL*_(ITl?mVDjZQW?%(kij=?Inf~=- zG98({b$mz5e0`oAO1EV}0i~-=V>wzBPcq;zdcPVc91FOM$nsp6H`RT~xns zq*b=(D^+5%=JFp)AY|t#nUY0I&2|v6x%PF{$!ZCJqK{Tcn5!wRCE$;ci#&3-YWIL! zm&{~3ERE9QjwY5PB4ne&xVs#6J|Hit3s|&T>wv-0qSbB#{k2Uvj+`7{9I!PV;urr7 z`IBMq{?G2^{kuUi103Tw9P05?eQ#ocVnALj)uD9qs0W@9D^@`@F1J>!lx=`QY0pE| zX82F3uupv}IFdK^t9++rl!IA0(6}5bcemeC5ja9#vnL2Lr8Eo8uoS@F0VGM!`nQHM z@o5Jdkf><*w!?BU^N#%5ibqnCcsB5>feVIsANASku9iTOA}423M1J{^+9bYu$3w_w zEk2n>b>lxL1meA}o^r_;u?tYT&}54vLFOp~v?rSJG5j}iXEZ)9gLix157ZWwFtAm& z_Zm<`>F5=$ul&~HM{0nWw?R=YzlVu`42(i5{*iP%D8uqriH7QtZHNKHTKSu(@Sz^R z(*e|&rzOwFZBbdv#f>iQ>5=;IUr1cbu;kiRT;zA)<5l=INFqg`^VHb2r~71cNW)m$ zc0f&PM7a;gBM1xywEfw>|6HgL`R|?TdIc;d5ua9FqaIoqsgV z?r__3jR9rEYTAg-b8sg0%x2y_Vqq&VHX)k@?enx>_X00n0Jie$6VuNQr&TVJe0_AJ z>fu+II4Ph!3U#jPziz&(I8CCGizijL#uKt3w~YG3j%d~Yu?+hYrLn*6@nr!!b;3Rp5j^oGZHIDX>AF!`MF)cQpMTbT=(E~7#6;dzf3Sb$QqOkCL6o;}&P6LGB6wn9< zCXYk<9vm2(EV6*W>T8?^Oc@`J%n;AB#~E^3Wi;5H8;3WMynw-4=eVA>Mg>)yb=BrR zBG3Y0dr(a2*^e5B^hF#b3ajry->Zn&P7}J_JYq@C*=2i;&zTnyAXK419d<$WE5f91o(D=`gPl8#?qW zYIOj!M`54b1~s(=(h%~6zvYr70^k;jN~;jL*dcv_R{{EZiJRV`E<$<8KYcmk^+j1} zEoM%G2#?w1LuT7jlS9>ZsFHiONmX)JYF}MABCzzwGKNo@a~#sA0MIA6{h8Ve6%<2s zT=Xadd1p>?AVuN5i$=ehjRlx(3%-Rkkdo{;gencMq0v2_?O7NZ&tU{wK@_ zLiSIX|36sfBr(of+_M{VK}eyTodVNx8c6GJn@1R>^?BKU2gzvS?eBw{!76$^gA%eS zstF^BPpLfEUaIpxG=hOI0gItbEW7Ftz+Q32Nk1Nps#>T`s*OK!e;q%-N zi=3Oq_J-)tVhJ4g-0T7~^oFi+iZE9tNhxUwtf&(l`6%)q2LDQxp*#C8^W+BD(}?)K zEXxY`t?E-M7q5QxI^3FL$HRd<0&sx0%3YNY9RxoW&_4GfG{VE+>s^_STSWA~X_mfE z0W2N=B!&eK!ODBp^ZvH|q=3JN)6eaDuFMrC*@!CQ*@@T`)A~sdO*kQmn#qBUO!DJ) zB|4nR5?CwVe~oj_X%IsqqfE_ab>s!ql79FXD|H?B! zO8bZ?)4D^p@zF@PBVaEDW4wh)RA$&jGXZ^IF?S_Dindw+yQ9X-d1-@jA$+IbIPEZI zRs+JsUSG$cm@MlN$=HnSLbIeJ^Cb<;9266H{m#H)Bz66V!JAMs9|OILxC$<(iWq*> z$}%BhKChm_yWiHk&*;)~BKJ`Ukd~3j|Bti`nCt6)yTs-FQi(-6D*sm+hatLt^YN@8 zi1U7v-R&)2@t_!9mCk;EgnJ)Fk0?0yBL+NSll$gyXK%9dUEKt%{p4JIhN80d8xtZT zwuYMXho+XCiH{x_q=i&P0Z+W=RzXsE7~=CZN~1q#Ic@QJzEV6ebPtcoC|@%_+!Fx5 zC-_15vFo~bei8maEyL-1Z?zw_#t+N*uLH1^|90tzj{Xyke+u*?DEg-l|McOXfB5G` z4`QZ&p~Js;>0gleFG&2CKm1FY{-u`x?;~SmZWuR2jJMFh!{}ZL_$~%BrEuGCKavaE zxb&=I_8Bd*XV>Ca<>wjKC*jSnhK=E2Kk&lJ#~IldOAi4CCh7YGKQPJbH;^RKcW0R_ zjrT;%w#sH^r!T;f&+6;hap5m4{Y|1=;BUeddCG9Dw8%xe7atMpw2r6Lio_ntz75z9 ztYAY_-`H_f@^$@!uPyj>fG|-bu<^Yjdf9mKo>rf{auRT`K_^`QsNTj$@Uvc;VoS)_ z#&||f2($H?J0Xum!F(&8TQ6JpE{|&7tT!PCKYhZ9m56gMtlq4Hp{rNl?z9%>@z^xG7WhY*GLVb>E3Mz^H8kcT(E%F! z8o*~OpS%A8iU3UK#1Hs7%1SU&nne;;X*NaL;ZgQ9t!rW1`UR2up)c6tvp9gdyxsx|R@5EXk>k6b z2U?s!1#?hjF0?mjtO|8#vHzI4>&HBwi?EfyS;jMN*8Ro} zv6i3mnAImrt~iE7z_I;UtNO8yua^e`+rR3JS1F{$(=ViziEXflny?Mc!XUWeSjh%g z&F#U7F%+v4MANHfwqETg#`doC!moXC{DD&cVPDt>)Uq~O-_Ql6#`}hiIgEdEgSh7} zBfhzrpHuuM%{rLn=gY8m6*USsu8tJV^|$v0U&66A$CB7fmb*G_FBCP*xz?>`gK!f= zgj`sYYtFx=BlQ>5bb}h|rmd3;z~KGNgGWI88R;Kj_HDrihrR5<4^gJ5U{>oHO}oOL zD<9v#R7h4;u%SS{+Pgfp-ok%+L<(J+qd}2qlp@u#_LMGh7rs6|XX?42;p;+1MfDRZ zZQDaQMts^F)z*p;dbqBM?%^+mQdMe4!XhzJ}~jc_7g&KWHEQkF#; z$-aqxHmITpMzOT)0vk-3qu&2isoco;B+qzWbe|s3XQRI z^!HN4KvlAlE=8hUiTP?q*>t+5C3L#$39j+pT&9BwvlINj%S^bT>37rlB7ENagT`~Q zwILnr_1$tmaa=lyUtD>3m2)mLTkRSX=WF?b-!&+58sPkgoR&*=_iDem(#{j%BIOOE zrrfQu!M8No65!0M%ugtJKKd2C4yIb$bFugJ-4Vx5jan(jaiK=r`e!5kbBugV4T>O< z6KqzWHj1Z9rl&+WYgsq=w#*q;{Y@Lezl5XY(cv;U|FVp}>ZGC`mNK++cjsb2O8ouCEaM79zcB{xB3WR}+=*=*& z8MtCbL#kyn62HYD!&9AMOt~r-7JQ--? zPrw|T*cS<5W9))Wg&|x*Rd4nD0iXu%{F(b_I3H)VkNQ;38er&h@y+f zqBdZDvN_G9NvTI=Ztc`7+JRHc8x=%6YbT6$=CL_`B8>~6d8}QWgLW=Y0=|?1rH6TE z>2xkqGG401{mYC)cV5l<*e70k#l)W#wLsoqma<>wFM4a)_~7!1Dl{+I@E(%GL1_aH zL)~*Np;jinINFw=f4{$xQ@8_<8FNn49s=zsV!7Q@?9hHY@Y;mgAPR<-VKygpJ&SRS zR7<4KPF%a3{o@<`H8t~h;>1W>r4Y36G zi(Xvmj6hKjF;;_%{GG>YTp1(r&QU(exMc0d{;hqJ@@oyg^F3~BjVGF8F8BR}^=Vjs z&f1kjhNXnkWtMyb4v_SnP}M$iT1e&}XL0PprILK3#V3Q^=OXTK>#WAx5VJTN`8EWf zz+$lur415s7Hx612~ro~D^HnL25RX{XX=*oswZGiVyca+vo3v@)*pLf_v18uU=Gu- zI3fCP9_iHspXOz&x#*O^JjQORl)F4){j!qHzCU#7eaE1(F>UGfSN!)|-RC|&xuPs~h7+UuPHVkV zHb{qkChOMo?O~mw)O-`rtPP`HmHdc6B?RK&8h5EuqBCJ-2wuvd+aOg{iMWG<_`{bG z=}*i$P{pWp$vRbiO2?mGnor}0wcf3_dmbG8V3bYUWK|=&SEy#t30;0))S^w zYDGUi$CrYthIYQKanhnjb$9AZ{Tl)6;2P&-nw1LjiT`L_!`lJ-;)9%Vm{YCW7|1L8 z<;CdE{|p_7{ubG>CeJO^ZnS@Zm?ki|_HDbt+zrcoYEQFcgUvwbB5r{ ze(hm?W6SmaR_VniK(G!23(XMNsHYcq>6ygMYP08_Aohh{=N@1bj;rzJq-<%$(pE}! zCirr!qHvsLqq4@00*#OzY{7|MKQk9HXm=(g2} z_r^=V6+l#DKC#}4(bi3635t%52WeK{t7J~AedsNEZR17g(p_`>wD?W@wiUc@^vO$Q z`oxNP177|i|J89cFQb`8u$0qV4__Z&hH^_qlXSbVGFn&vDTbwD`}>nS{WICShPU1(bQxkzAx;s*5 ziY-8l&Zc+>o@1M^Yz6K@={7ViguC>nu$XKrGA%FB*C9h(^&epxPpI?h!1Cxkug_52 zN`3Jmq^`gqj-kfcfhu*3IfRoU@ne6HYx$SYjHL}@*7grUj65#GRX*U_JeNI}DEKnI zX|i)3N$r8Pi;}wHxgx^0@w^uQ?Lw_1cVDtm@hjJ)iBc=Q?dEj+>2ohSx4M23bp6!o zS;_5qoD1U5CFlo&oS|Bun*f81Z_izu%f0|lF`@4@n&->7{$*Wtzt&_EB3cD?fQhw| zj-*Q!u9eb7`SFoG+{1}5)$oE%xI3%h33i$f8P*~-AIXDacM45o7n{t~O-!8I@9PXX zz=V&#*o+udtioyFP-TLk@kGZ2DT<5l)g!9=&fQslDCo}}qRcql$N@aW;Ns7yPXEoX z$f4%XK}=Eupz@7i%|+#<(_68I)-wsqrIHW3!iOb!Yiyc67Q^HGt$YgV&xU$4P8`0&wHg+f-sqH z3A5g-?V*}b<3!@X{c3yLpZSKW_AlsL@XtpD$}d1ap5CCT$McJj=%*#j?~%A~YMXpc zz*wN;qs0PPP2DliVY&o=i$)L{S2gBpsk+Edc~NYo)??OtxJ}dfOq@RNOQltY zPz+xLmLdkq<&!7}iSeBIJonAdgeowHl^pEpm|Di<&b9TH`nW8`FVp+yn6zVokZq|C z&oF;CCoi>%D=pqPuIonJ&=RJ_ADW*Psgs+UHNn`xus$vQIp6!!5*Y>|hj-oE@>nF| z&*nN=*VMLsXO1|^G-0vU(S1)`Z}0q2A}vWTiC|_(dkQ!-y)OD=7#K4`s?wuh%Z#6VO^bZ8vdOx1ypi7V zBYazXo{iiVXD2)lICnO1g#7Yrdfz^@Q4jHc6W0bMa=wt-nHJC z>lgp-?~kR{V~?J6bg!= zK{KCxw2#q+Xwt6K(DKK9XE7N~z&udg%W5cWFj}vk#o~MOnSFHq1R!Zko+7MUUgwnK zo6lcCk~!T=2;ke`72zUg>P4~mgnUe7Nm;S3WTpw75QL#BIs91=EZ1x8&gm~UI9i3J zbMZgbrFt}Nk)rt!;~mVJztTz>Mdx#FYs@MvXBdn$&>4ln1}ApAi)h*~{ww zc2t%^5qbss(RYiClH%#>i$hJnevM2)O>vtQL)VfjYKUN)K8ZuVxs%^|fwtCZe-Ceqn`ofQN41Bav$3G!oCRl1jt6z+3r>{~|AW~Hw>vlPDy>p(1xDs0w?z0v zB%4aYH6O}zmI(n5LGX&EMw)@l$Q;FLras-|SV{(+1e2k;T{x+Rw(ZX;H`b6?G^YA4 z*3Q*6?bPYkk4eVM$lq)U|1w_ZDZJu9<~zi-$LqHRH`QR=8E#9iLa){2 z81V+7D(!}Zq}mw4exhsrx+!2OP{V4U8zxSV7h`DJw=wtw0Y07nXO+%jgnm@rViY%X z@Oj9Pl8x^uBoSBn#Q)}XJf4l_H9Q{gY!85%!bU*SC2U;h&=GLzs!xdcO>hvm&oe)f zkn;&M8LU3>T{l{4*rlLwv=s4e4h+IW9`T|?bm94vHguxr$B8m21-6b_rO~tvr*W|C z_QGPrT#7H*^dn2Bx9r8B!PS~J3Eqay7Ldkls%uZ??JLbMHfz^M?T~l$jN^)GCc|cC zC&NJ+u>IZH6bu$sowph3i<~5tlU^PjY7`n@EAmXMrd*no3JO&QOs{hBCUgk2zYIbc zLf%RT3xT{lRQrURwz_?Wv<)B(LI|%`!M^hJwStM+y6?@WQ!JX+3Z2#@J-!mlahCaQ zE$1eD`Zkb{Y>lK%VNjh-^E3&AmG_8p%V*H;3)N3vC{|+Jd`J8&01o(}pD_3RKf}vB z_>AfL%KW}5&57aq0!UyhnXfVoWmBK%q*>ZzZ|w{f5oGsmH61nWT{u1MBin2NjpQi*os^ZQZ>hEEf_aFD&PkUrdQ4=x+dTW5dePU6b&*R}7^JaIM& zoHt_rPBq^yylEG{x?n9OW*o4EaZLwvVV~b;^&yroj{cf-9&zJV4YBy=W8Hq^Z{5}J zQ^I_>VMM!M6{SXp8#fwo#wzCp-9bMA^!um&h)@}f%5;p5WqoN%E#Z~mC1&H8(NvVr z&~Ac(UMkuamF|Du*?yqw3l^zb0PXOOG>SE>e=DqQla0tQdqcFCE*ry$FzdiBb9`F= zxUuYdKL#DaaP_X_hYJ>IyVgRxI(zmb;AsYKbk976W6NqT(j+z9t`uK_j z)VF|}zOD7T3WNyw&dg01MY`qpN z$3q*DrDs&O`>%~|6%etJ`G1u~lI?#d6Mm1P$9c6?*dlXh&=Dl^olRs`COj;hp%ZdA zfF+&mD}kgxBlPAge>SeR#B9|DmCX1dx)^bu5H<{z_S)$UN)PDFu`zpio^0zxfW{7l&Id0P}6`v_ubfU>tr=HMMQomJ!rpEo-T7Bm# z6^umyXWE5V*7WInHLh53&4iybtI{k6$@o%l)I_aMKEq;Eyz(1qp_mKOH ztAfA)@>NDQKBl(8$T1u6`gA*xneA7u#{O#qb1|yC$_T$1!IWbqs%c#u+EU8-ik#J@H zg`IOJ*?L{Sgyu2882l%nRd7pgxIMahOv#(kG=v?1nLe|g1GUL%KF1O}DmgVd`fks6 zqmVr`?qk=B-yL8w0oONpYOilFdAF^J^$h^UJJdbIg9$1%nLuDs&W0G_8O5jcSbuC; z=@>8CB+4I%M^5oOD%aab9z(0gQYYqSWJhTCZ4?o)`M0p^O7877O|yY)gJPy2QjII} zgluWAbF1$EycgZzqh_FQ_$OhNq~&Ze(+=2vAG zir8Nsy$#?W8Xx=533j^Jo3`(1w+#-x8Jc~&USwR?XNcY&oS~9jQszZk614)q&?dSU z`%Sy=oXX5wk6s&`)S!&=&S;J#)_a6;#b90=_-!B&wb7?D!xKWGm`NQcYM@~C!4UVO zwkR+6=k%=ajEcEh1gzU4c2{Y!;n6KWkNNwCJZ6doQKw{8V$cK&AMUT%Rpo*^U_7*=*G;u_ z(3Y&A;ikKjtKamRM2+1C?qF37Bs-5QQbOeru+7!G ze&%yF>ii0!8PhhlyglBN=qU7dA^PEH3*hgZ?uKrop}_C#by%N?{i5$L5EDROPv?`G zeXzHE%+9O{TW|i$Rl`>&Ug$e*u)p3IQup$nC}9CVm_K3YnIqK=+O9rZqA2IDsb~kt z)>I!CfSU@I8?N_!Lxk@xjf9eC1E<6Fy8FrZ4!!zH-?_w=i9hC0_Ib?d2cOo z)jME24=md7nFyo=_~nV0s@-duW#BaPFYLfwWgEAya7T* z)4Nkww14|MGk~B=T(rC91EtBbEM2AEc>Am7oc5PEkQ(|iuQY&*Y;sSQqd+yqg)HQ2 zsaA6oWf(Nq0I_MC%>2qUQ7l%ruoKv zJU{WpKrJP5fRO@fFY<^9kEXkccUY&>s}gnV)nnoG-dlme7F`nRczlebDvs-!`n&sl zIc=J_w}9B=!Hx)`0@2ZzdpyUk6R}}6ox1gK>j5&#h89clvpcJUMmGRE$(!`W{+E2- z`iuUZRvE-3Y=Y_sl;=#mKY#z)K@R%T1`PT@l$X;W4YBijEPQ(3r0$x(d8<#X+~)Z& zt=smGP-CvsP1EGx3U+M^4GWE;edEUZU9*E(T1tW$T?-X8VVbzHlz^4xZd=Vgo*}JV znk)cdTtP_fYsIN*Mh=t1`CH)K>uw*;3TFdZLJqx{$C$loozad_X+x^hz?(a^Yu^TJ zF`aR?Tgw<*d4?10MZG{Bxz!o7oweT92n1a2-Ck_y%~s*enmJCON<2d>aS+(2^CEmF zR%SqBS1BBP1|=JAI7Y-=>^2KOsFj?o?fV(#|M{RtOb#`4jBCSja{wuP<4H&xd^7PJ z*ven`1KrxgM&qIdhy%rWIj5MWo)a}~OpA3VSIzfx>m-(?SV%%k6&PN$`Fh{q6XDwE zvsF81UfJDwrbJLDy#IDV^vz05K(MG)*gQjf!*gc7`?&HZ=q&3dXCU zS9lb*zG_oGsolV@4pt7Cq@?dobL1VQET`Xitg?#CWykj6ap3*ciad#H64O`6L^4;f z=%%Kv-9c5;?LHRKMoAk3hT<8I>N?}?BHVV-gMemIj%@e&LHbiqCowArKJOrvYTU(u z@-)HQ$hkgMP@$&ag?a5E8bbV-D?0<9QOpyT)Y|<)KB1U0oO~`iGj%F#eV`z-LkCnQlHc zRiYKLx1qjNM+a^*IS2up@Tewr0K8%}6!K_Yn?yggLFdjL9_``%OYYT>Fh1*QP*7fBVjm8M!7!@?1Xaf;K^;;CYg?(m8K;mS705WC5jZ@qP zhpVFlE#iQLK6`%!yr=mV$XUkgE$Q8{2W1K)+(0pa_`}&uwi!CyMs4$Q#L!zkbJ`G} z+ejqlBbBDw5|xDMH}pryN}JP{w(yyQTz9^xj@N$&h1P&=e&px_G^J`LXC-=YO6l8# zB)iF7m~-o;$k`1)#K!dvN)UW1$8Gl{cr_w4VR)%e+=R8wU+xtJOs8&ddtNtUYq=ES z&(nbO$ElhEL3uJcTY5Kaw*!{FGd3%URgE-GzpZ5+5rZpfTZ!Pm9&}UHgq|O>m)kerp~#@rO^hFG z>)R$aQ99?X4W@hr-y zEhf2!h>g9RQ>FxUp`NV(kNaaWi3hKwpzVD+ck$?b|CKU4kd=G9-5JM8qQ8W!pH?}i z9bx?3V{8^_*lRKE=vilov|vs5Ts>LWa?P=KH`Q;sLwLS}e|^$*VXVQ|Gwkv7Y17SH zREct+sQpdIeqz?Dr%)UEi`4E1DK!XqcS>w+d$T7x*1!jFF|h&DiUHZALF}$X*4UrN z5gJvE|9nu4r!Zss91w-)rNy^#bgV3gzVZ;XZ~E-iG*%4%=rh6?2v1nHt~PPU#IGh9 z`=aEnABdb;tPF6cy47OcKB~(f>43%PXnxkur=o`*V zx~eUqcKOrQR@H2QBwZ?3TSjfQ${PqNQGS_){$01?r>M+x6gz6{+HQQqw=E+L*DB|I z9dQuMR#41$iTQ%^z9$T;8SJc~n$0aft8`%un*O=#m34F*z^vTPF)kXYc3eqYNyGVBZ!asaf96ruf6E35*(f6X z5UrJSO=&)Uq7smN3SCk$a}kriw(~%0-`)BIrLwht8b9F8w^BWOYzz$?OzL_90S@xP z?Jr;H_r$!*mZ-dja?DX1VVrkBmr2by)dPiF)%9`7$AOIi)WEM|z7Uo;DUW%TFwxyJ zxCbB>`Fh2UCPi)V?#=G4_3NhN<|VV}-EOoR&S$7@G-TXfL=r`{vD2<7DS-c|xht1v zLb+n>TZE@b^!t2*Q3*qT}0E37*y_}Odh zLvQ*Z=G|8Sd@3T%YdS6SuYyYB~wP1FkGJr?h{bnnfPWhxM{IfJK66j~dB4Ryxq zH7>`^S%cTDoz_#9R~jrMmq^TCd4SzfBA_@WJ)2&1lcC1S%cZ~*ngj4lf3 z$M8WKzxwpn0j%l%nrRBd4`g%xL-HEUn8?ZcfKg<6Ji|(2+gL$YjjQ69r;Gc2361?@ zsCko(w-v6lJt@Xbh-Wmh%swA>JBxQ!1QlBR+IsXhwxXnB@If(-50~s`PN`&eOR_?Q zSBSyiiMd)4Nzb@OorA9c7$`Wd$^qq}AIHqO1(7Np8kHm67d!fi51rN)*Cs+ z(0iOzP0{-(AQXMpy zm^B3g0|a~l#mKhu13{ybN4zxMsWeqR0O57~gd8vDBEWkqb5B8jW;+|(Hp@3vHBY^%5UUa(siKUY}fo(9y1X_Ojv*LTwv-SW;isBWt{dBY8pm+m$K zHH?Xc5}N1YCzGPyqo{qaf|B-Uoc7r1Z)$B^pf#=gYW6+6X)|2CIMH^_j888T5L@W` zvDvqaMyM&I;rYQrRYKfNFm!rt!F+0rk*oVoBA{n*fWXl2S z0ZX14sU^mbNUycoZK7roPWWO&Sd7q&uhgRwY8kZQncU?wou#-MN5$=V@r<1ch^~Ms zB*sN*&5u$>x!xv9bNl0GcovX&)3>fgc(rhwLXe=p>J^N4kKAYftiyh!R{z)aJ+M z(a>V94dGPK?GhUCK3bFt6y*2;k-Qvr5l#_t(YGDqUFL5-yjT`jz(Qu-wHMWwxAUtb z7iy8I-#;_*zw+C4(hM-T3|kN#JOHYVLuyUlC1gwkGG$S>NdbZdY|p8KLi5BiTp`U7 zwME9feG^UAi1Zm<>82I2CS4EV6`C_*K~E20_wMk8^`7-5;N*q4qp7h+|F&OS8x5ON zz)DJr<_!mQqklh_V-@xw_jB?=rX)Mxb*w6{G;6f04%2XR1}edGH;TR0-+m4{9X0Ci z=$>zw!-CbOG*jrGd-mjiG4>rmO?F$?uZju+(u>lh1W=j?f&R%=1wRcYU zSgKT5z*>PqRJ1$fcjlmdKx~)77)`O{ju829_uJ&!l@l-9a1sqb^;7cc$)s0c2GwE* z&m6ti)>3hY@A(kZ=CkP&N3*(2P29ZK*34HRz$gs}o7cbl2+k&pS}qZRYI>wuOcLJ0 z_K9@We$sgoI=t;1OOt+%ndILSG?_%uX89iuow2Gt7wWQNO}}TodmA^jb)cc=YqY)e zY5(YH!f8dH)E4Q<1#!}=sqy?Rvc9K9+Nu6i^V&uo5)LAX5jI|Rs}Tnuk7thu?2dSJ z9hFb(u$ZiS5i9L~ZDFz2%*X=~DSW=hLe?6Mr$5tfy6ttkgi6!OnxqMA=zFe9k75|` zTKOBa(tLA29m!NqzFwBOecX#YQWJ?0R{8d{-_X;neb3zo-SfBXi*#sOG2XZpBHkoE zH|Yn^p{eUmAyUj}6Tt3_sP>euHugK-T>`|or}g`D5ou)tbC0}+r8=W(f#VPlpwr8F z#8C5BY%?Z;1ap&hkjbduis8(w&Wz9*&zSetleGRCktDcw9vR_|8{;|srv1_mrgaKC z^yt_e3qG|I(P5`87PGiN_BJ$!NOMa@es_dyn3<~C~Db@{|OkkO6m=TCF98Hh2 zkZ{bWbgBMp1Pj*esP~rXi)AyYzxl8pmjB)Vr7!uQn*~**A|$z;``UbciLx}Hv+1Ky z>8Bo5!pqNPMxz$JkjCp(sUSwPDbg9=C~jFk?zeaM_(BrN}+ zJKoV*&U1`p<$6rLt}GD|GUP?g2tW<9zY?3XZ5Akm*suSp?A6_ z<3yx5YDtstodqp(+NvJ#?Tj>M4OU#JV(uB8;8xXHf{0jQELccEdRrYHqMBu_$7@X@ zi>b5dBfb`FkYZfsh<)aQ_Rf3@JHWrmKOlMrMnKA#XO$YgW0EFtTVPM2bik$1=Xw)? z&j9*c!a8O`1Rp3@z73H*npKxNtKIy@x;cptwvw9D;X8RkcaiureX#y;xiHO|*}iTk zH>{cl={esSrLF{>hGOqxBawq0)$k$R=ED*0_x#d5bCUz^AAG5TA5PrTCK36d=6!Vq zP*j~tW4SvMp+MOkcd|BfvS#%_Rl>ulkUvD1nZ_?>VwZPUSa$DUOv3+eZ-?bkB`jQJ zU20@bXx#6O9|}GSXd(g)5N(%O-D*)LQ8u?&jrg8(P>ep>vUj|ySpAcsVPcDPKjXmK z>nPuOn%?4F`)heYn$ogw5;_ZgeA^KOU6YF9YX!M>j#q?cWdWJ)aHoRDvA3mTOFpal z7W`**#3P*0goVjohs(ZD58SlxTz8?1rpu$N1r8aAGzq@TTs7UZO5V$>3&cuWs*Oip z12(3v99S6-bi+wXNsn33)HYaacAUQ^d+KWG8K;8GR)BPQP3h(#O6zR`L!Q@P-N%J_vL#o@m z8FL`#NyJ$wy&ibPa91d%8Q5|~q zOMI?dOv@<-wBHXOsI1zty&OLxn=OE9b|;6;KA@C%hl6n+Bi@`+?J$L2g3oQ31%d{& zZ+d=ysPReW%o{RN*_Q-FQ^?hbW-qEIk*RYg7JiX6)TmXO{)bGyXO0K5S8TiMcRF@C zVnx+Mez(|o-2AUV+{3s*vgw|mr|lZ}fwE{11j;6Rc}pXZvtcV@AT1z{I(jnp!kb1B zjh~PFiaK2y`@?@&zJT|mk6id6$7wIXH@5&n5vfZlOnV#q#XLRMeRE*% z+tZMPcf01-R#KWt?w3pf9&eae*ea6A;oSuov7(seOz$K(g80+iBkT0jETAgNEN&Qd z(`HPF^qiG_WjIL~NJe9$eY(>DUHHXy=r$vp=7_FdBF z-GgP#HI?R5Li0>xWOP!$alt!*c$|N=PlgY}%E?&D`9^ zHVgRk*Y|fuKU2dNpkzNzq-NkUK2Rh)g|Sy!4em9yywhvwpi+R%WV?J7o-Tx$WzcT& z9U4tL;h$B4C~7{-?B04?_RZh>b#14_T;5{TWsU2?-QkUEQ-F)7pjc|CiqO2BlDc=m zQwIic@183DNz54uJU)|&BCPDXkLx*XI0_}X>{jn5JSF15wbrUp2~Ms#c-Xc^GlpcV z?Uv&y=0)fMuAd#v6vP{oStCsYZIe3wCK z1zfx~x5VDa&ee=(;UcX?n+K*(><<}j=YP@gT$f}vBs(Ca!b*@x8>if?l`sv4rde`F zrL($Ok|GqK!5edcDPAR&o+3d%bop;?Sb zbHl5O@Qlj_rv~dd+^#fXsE#kD=RyGX>->)hywSTO?9c5NRIv#rV6Uz1 zmm%79W9rzhOf%>ZqRM=wW$6bghB~2LWS=0=u*C>!7~H*Zw4-ypRayCIivv0gkPDhO zSAIK9)^1+XjnS|KB+yb-Ks$8UP$@`vQ8~;4q!h7cmYHec5{I%4IctkOfa z>Pq%-+X4vduOH63uS>dJpG=uZdxCJ8>ph%!mSbK^5eg02EFd_jc;)4~(Kkd2R5# zHWS0^FHG$Kaeat!Z}?X~e)NCcxJlv5Aly>%yz3XLESGwY5Ke$M#EqBSt-~sL8}NC?B`kd>J@THu(}8_I$4_w| zb8l;J?N^ubMO-yt#3~%=;pf(RjFX@{)Z_8G=Fj)$D{S=?HJ<=Ebu)(V^1|_~z8q{b z`co0Y1$qg`vFTVclOp)-ZJi>qdr?6SK!NN<^kHPSkE=|5J5S0@l8Sq3da8}hnd4^m zqh?+PL}|@?ru*!TFHxNQpcn)UfwQNq!&uu=wtG%DnhyFL9WDVrV^?h7r4&a~ze$%# z!$zlQ|IhOx&+-_Qd`j1B5eFRKLp4IHWSi;UP9c)1oP`Fo^VW0{RgIUogES(2T>9>! z5;#axYu>_2cv6Q8YJhF2AGJk)>!^^r{y^D=M%YmZg=a!VWF@1w8LhE(NFRrx!p|#u zqKcfnpH{?Co@54WyTBmWy1;xtuU`Z^&=dB*O<26@Cv3nvKCmCNmg5}#BlNaMaEHJAGG9L?shvEQp431Z%O)SUsYoDn^S+FoGxOrRSI^{H>f$vf5X)Z z8ys4&LV@=`6pgbAjc&;uwcp>&nyeCUa^8-vw5RgCH(8X4c-~eFxL`D-jst14qLKeh+Yq*c;(7I|zD&-7#^#&OP98g} zVJ31^3EsT2^TiQAq;K(F)5qT&V>Ii`OEN)6o}cshuI z-f%v-R7!uJPI z_SQIr%wE~j@y6-txD+WwD!hUMsW>T3Nla-@NleWgkf0f$-FV-LkGipgwDP4hZ->CW zN2CG|PkCDiBwFOc#kQ!DOQmUaI(jWZRUEIGW{W9T zt@_iK8cXxlQp`J}S!}u0lAkNYu-fOVr&)dEWV>pj1o_K3Gf#=PTTa;r?B94Qb<5Gu zFH2651QfLRQ6TC~AWZbmXNGfBl>dDEskv`tFJRddo+~1lh}bRj@8%+_*curc@6Xjd zC!Mg4aTn;jrSsIro-ij7fMdz-7@u2%X3NPIe$sPZXtvrp3c?r?4($5RZ%0W#u9nc4 zBhaN5rcdZpvVyo=Hyneg+q1Ry4o)pPKGazERmqLWVVC@MHIVu0y>Z49$%0W#&jM$^ zZB0(+5U~V_57G*;5gZK0J%UmFUzA09iPLg&*};%+k9b>RRR>?nNi=QuOI<3BsVv8J zGNtL7KtaWITr15-TdG*CfUoo03C}mVp}SOy%yG7TT)B#`50(o{U84Ag>m=pEz$#a$ zj!g~Sf?y6)^>vyr1LZj%PNdqFED>D|l%$4p?m4eFr4o~4WkaLWIbYv+|D!t)QVuw_ z(*P&DtgA0er!20WMiB8>-6i2DM&U38tTH&BpsH#z%jtN^&&-uznVl;me2xSpOl2JL zhB)+}&;4tZv6@@klq%&8cO%!N^s_B{s{4%1-5CyVQY(6g^2qYEhThGVcq={IAX8=5 z2roDPYwjx%b*o{Uv{|07a$XEw>fZVo+Vk}h%G*4C%+I>XgIjNd1q`NC^44#DCQ_k+ z(8ZsUjC1y##XPzQ-HK;m4isL2w6ZTi?mA#JXx2GEI_{~iG%#;2Tq2ai^K`QhS z)0ftO*>}8OYciu_oXyp%HHU(UHapILWCpQEZGn+|-V1A4LE(Wg{)Ay3Abh#FKiK|{ zkUmGnd*M8_im$#LN${SVf!N&;cZYc}ZTjWVyY&)G@VotlFPc%S)Ap7G>T28kyC?I) zTXl3wl#1N?58g~NLW;S@PI{iBP~Kt1MInemvGh^?9<$V+7b0@Y-_qq(+n`!2%Z}q4 z?ZFR(qHic2FbfldLZ1f8Rb1AkR8=r6TSnuC`V7T(bOzH^>qp1gyK~x^`0H#S>kM&E zu@m7e^JN_+h{$UeK${YElB30JYfwkN$C{yi)jR0nQbA!+(^%8o-oBNY7i66f!66X5DJg%=~r`Fuow2!Dp#5^wqEx9+Ms~{!vxbbP7k1$mTs{@N8=CX3ex`VUP6egsrCbs)2CwvQ=hCEpyV=U z7LxLoLS`&z>{jI-{>CC>Aj`=*4n@%JJ5*N9$opV)+gI-NLfz#;^%Vd z?!TA*V&tEe9xA6C(y5x_TJLt--X2bB8{?q^X?;W`N9srgrdKfV4`k-DD83hf2vqSs zPt5993012%M8=1gKRkYE`GPVqu468=?8C^D^>lpM!n>zrv%os-oVIg^H6J2uFr+K9 zLzH>-awN0a>vdSx?X4c62BXVCqtTk>%byGDDW7nq^;?9#FmZty0rZOCkxn*Yzq;k^ z<+OrK&u^wW%(fFsM#4B)yj<|KIo>KZzlT9Ba`@*(q?$S zv`Q<%g*7RdRz)wudJg3VbP3h_JWXToKAL2nsBzqY^=7&DOFUa=*_#I&F=2xOsfjuP zpY8ZsOs2F~r}neDtJso3#wn>!O`67F5Bn>iN%2fsqX?ITK+$j8;L^>w8bA75)GFng zPqLDM?3o-|^5{O%!+$>S$%vJB4_=Uf?7DSEulrT*zP^F%1lu~Lz3PZ#E51z9+9f_} zIP03`$SIKU^qSogOpn7HQLb}C!feSMxU_S51Mj#Vn5@Pb3ClcapeeL#V30o=ATK`+ zY$cKFRu#b{r`bhQPwUDjm;-m2stUh@TKc_#W6zV8%WS=scCFO7h!gwj@%$&X@5gRm zHzBx2mf>gonk9fLjx4)2{GC9}6BYzJ zY$xJMXn*)x;5^5Te}7)wzc1GC)S2$keBk9*-X^%uh`B5% zC5;{JO*rD!KOv&T&|QrHms>Iwqp@OCByDeGJe&oT0D=q=6ipX-wxXEbhP9qKAnI>s z>&;0?pX|?La^oN0G4(yjEB%ZUrdPS*ylu}o;Mj;$kPpp6-|BElpq(51E*FNSu6Qq_ z&fD^K0g#C+RHrk=rjfcIGsIK|eR1=oEJGL!%Nf$J+GxV}(|Pi#VKT*POk*6AH0Qz+ zHiUmgnAce>xhZDDqk?! z(iocT)kLf3CjU<&>S=$5N6GiVibNz4ju9(qXeg3JWW4d7MM+9?d+fF7!kBHY7}zF! zTIt%Tvk6P~nIHJE9=oUxKy&jEC5q15dpkBh3?{o~-WkKjCR8xEd)>BZHnN6sg!CQ6 zcPl&ABdC3{D9rr|Hn7s*8{Eq;ce=*)F$uwWjljJdF){0p`pV)lIXik^%H*CRn@{Do z=GTg^NsuY{G+TjM${ZO=CcCM$v>RC7^4@1CAkd1~SBJjKDc2@$rsGq%6IRQ61TnKA zY*!nsC9}jGGiTc}xz$($E(J;Vn%E9}2(lT>4YL_9&uhq!)D(mJw z0pu39)zJSdb%p-v{+yO0d5~o={#}D_jA-4~fC6?RkeX7pO0^MOH#)W#q7`q~9}%p4 z#pIG{%qsyZXA6lQLUPF(|F+d=nq?U;bp)GHh@29H1^)d%+LdWN14IJK zW|sk5Pr;um@|vxhc&)wIOxJty?%Cv1+|jHXf83+@iN3u7uGg#r&4BLeioUqAZ1U4e-$JYWhP#s< zRj9x>Tb&1((ZAdeKZarQQe1nU7bGI7Nn$Q;&nMbAO7eGWv@Q>|Q=lO{*_v|3MlZ9< z7I|DJ@ia{QvLv>*xD(n}_D~~qDw|t3G}=oV)|Y)p*0I}&u^$^N5*#-g0g#W`Y@-l2 z^~OD-i@7>e1H7)_Co%Sp)+a9=4yisb*Um?~Dp>hOna-G#tjy1owsslV7QbMH{5V%J zsKQ5Qq;X_7_M*?DUY%C*vRKxN!PP(A2M54?z*O|TsBCGdC~W+ro9oaJ>x7=X z4~=aQPu#sZl1tD#hE80OLrr2c9T(!QQBN_CI)#F_8>dc3papi>A}by-Zsr z``g535NtMnBjE5!)tJVv~0LqkY zyox;bDNs+tG|D`d-hFEN?5C1mHgM1fCY{>IFKLjOp2h7UJAb;%x0@KC!+p2y`p#ID ztu8}iSm5;Zn~ci!TuHKT3`S1=G`_D6DVglP_-_wBxZQ1Vdyq4Mj73s(CHBKHC3^GA z{qNrlzp3+@QJA`qzsT3fl1X8uqo8&{9|7mZ#jIwGMqd>W5PE^S-|i}S^FB>5luryU z0~DzPXTB7J?5=R>5cEpO{onD*{)c$oVo;GpTh7`}P-^grJHFC!7-)ZOGuc&H(dpT0 zt99GwfZ}etYkR{&lm2!uIZb{F5_?M{?9mgxytLiPI?oQlb(X-^5b+0xl74;aZTLBc z*_rs}eB>I9b+Tf9@0|*}+VtRC9bgq0*=A7{x191ermxQpWsHq1$GL{!w}VYiNg;<% z6wUE{;xm(x*YDn9T`yT`29lOkwk1#v6fM#q8K!$Y=5!Aj3rZWeR7BiY^-PV6IazN8 z%6(RTup_Y$E8h!flh)AV>qpx*b4z- zkoeT=c%~#bwKCrhSRKQ4QwKQ`l0h7t1FTba-qvLh@J+~A9quf3)a%;tFf+(?Ke)_d z2{;>d@}!GD0E;Xap7DB688d3=sBRd&teXCNm1Bhdw8}IpN>`=_f+d%x8pHH+9%*AG z=eA;&;tR6(0(j>KvI>drtwLWR? z0=?h!CengbA)XkM8zc;viI1wU@IFi8eZf=pE|X8U6X|NM---jkMNS~)GoUn1HOv$* zZ-@x=^Y&5?q|TqRubDC`p(Y|XJ(@g`lsR-KdZQ{>*M2PycU$=ui{xu3g=b_(jK0TX zx$xnjHQAH>98wRr9)X%<0FlYsE4`A=WLqh&$sBiVJ|=@|Q1U5#G06xR8!R=I6-Qj* zD~i4MsCKWmZFRh&FG2|Z5)dOXa$IJ1{RYI{!@z(7p5Y%;-*Xfx7(RE$VBraRU0Wys zT)+=7k~AuF*sG=SqdHkY8l-0oLgCQ0(eZU{na8ex>U_=R-98Sdhsp2FkU~KX$gbV7 zbbi$DH&JcB0@qe%1qH1e8)p+~6y(Inpnt03RMkDj{2ULR9eyjy5`{0;OH z%dpAV%A=MX`+DMo+Y$QotZ*Z&H#p9f9!EI0sRnDBpB28yhs2z)&H_4@M#^yeuW!y- z0c?p?gh2Ie#=-eHyc|$huc+{LQ+x8+*zevqih zydaNdXR;c(+4@qja*|CJ(sWNhYy1_HSv*lCdKrDhv^LI0Qs-U+x z-PEuIP#-`I?$-OG<*j-Qt90_pSnC9TpdnOKCLY9Jq zHdto50pL0e^_pe>*&I`BP<4k`W&t!u=yGj5z@$B1>~*EsEtWbD4T@YJ*D2%I=n&@JqN>_YY=XKks zR7O|&*s7iTw|#-X?H74VaFN9z?x|iSkxK&F{|cXML7E>&6LS%RYNp)N4uD+Hny-e1 zyi12-QZS*M(Mmh(#5^P@)=`4%RzBtDFjMsQtUjg6{zaMQO`cU6)Lj7}dZ`wCI#@xG z51BcKpE72ae2)Y&o(Pb+!fA$qyNpEyx@DO(ENw#uwH|9QZg9UF%9UncokH4%VC~m# z@~$TH5C7uwF`4PT8(7;bh1Yle*yY?fY~r2xt#92)~A2(q@t) z*?#`M2|%K*SS2(5i&TN$mpSbN@VNmG_LD85oz37~yP6*@q3d$<5mkMucYDG>jE>Df zK$%Y!Rssw3O%T;}iOEhgulPIKCg;ZL(*oMmw1YwPiaf7!LycemmnOKpV-S9Cg(L=^ zVFA+&s8y7S26zw&W=#L_*e&wv^Dz-X*9=>~*J8s4+%>zl@e5WuWM7by+hMH=CAP`~ zeg}ZzlP~1ify`j8OvT17105shN814F3*!DvfsMiP4b$U6?aZv!)GFq8$UQQ2_q!%! zd*YS|6)z^}^PA|@=>>$Xf+u>J z0ByRzscxty1QlgRpj|23{L-f#U>yb!Qh}3bR5(|UxV;f*z_Uxz#tID;Qa)}(AbUix zsS&=(Pi7w_h-&zZ(9x*eR(oR3>19s>Ix9{TS=LR8lYC?=)od7qW@UieRE99#Y^yfRlr_|YjTePiu zRMj5=fcY-?;NTN~b+yWcmCX+&fiM8Dk3%_r8hiSXg7i@cf(o_DCWqwFW?P1?@@Sp6 zY-2Fcm2M3PICFv2jq01|S3p(TL$c6TMD_VIcV2yVO)b`Iqse*{s4!9|;Xm}iDwHa~ zOrDZ}<6%8NqOB`zP=Ym-ngpcJ`$y755U86;_ zHpe*rNm8~-=vyhQb6C`-nQ6apSF@B?W?jz}5ZhReiu-xniP)FTRU4MZOfC`lS=mhI zUYT?Whiw#GgOd<(f+M9*Rkf$a*Qj!Zs6C0MZZ?U|J||ODDv%v0ai(Cojy%(?yRAPf zJ-EI`*mvKe`|)fG0C@-bw-jCMDLaYFh=gdu*-Z1cVPeZo->L97MHAscXez~N$UM3H z6PMA*_pw#6G#9fMfc*TLJs#FcSMTxUwElCvCM=47gHM^)Y)65~ej%{xUba_t$_iW; zfMe7nG%6()h-kNRO%jk>K$Su~hE8T{4_SWiPfL@eIamXwNxPg=tdF{m75+>BgM16M zLb9jXKH)p@U#4paVq{y|^n!OZ)YI(co%%i!vvok;y*>-N84T#!WOZK4e8|k9SG8|~ zHJP8v6R)xATGX6hx7*f0`T+)3rmf1cN?$0)%YN=PFh7bf@%nYNL_vGN1AtpJu754u z479u%UA_8{%umQ~WAg-uUR1q<1%~nToRvbKB(5c2$l3l>kol^5#@96+MQI4bJ{gsx&HHRsINCZKmbvwF{#xnvcK5j|lx;vtqcJ9KZ+6k_{jSbU}Q8LPF)GFYnL5W|> zzLonY>>-p*#7QtSMyIk&)Z7tBV#KuBW1_Z(J38^XD_>+yKr{V@i!SdP&E=&((QIKr;xe<-*K<~j-?Igg@|j+Q-O*hwR?s-{_WEET z^b^utC~WN-*W7~m26HnhHN)+pr#uj>u=Otq6% zdkQ1@8h-mFmy>A=1{C&)T7hof_+y3#(J_TRV`ZUVx|PDX<$B%|Ak%m10n;;Rl4!60 zXuDb^kPs{4?@g9u?5^Ko4;)>oz%?!ycjaguHq&re>aDdjz!3Mvvdi}ZoZ3o3;cH8W za0ankchM1>)6Pt&AT*CgrrmnOhK%ccl(=ME;1Six%}@{oK?#4^v^!du{NZ~OpX7MF z`SB~XU<@o4=-f2Btp2IsC&drrDQSps^V04@45leS`@6^l% z|G&A>agP}v^>}V|Z?|_AgH3IZ&q7X4F3&mhS8mY;kToE4|L?iwC=ezn7Dt#JEwp4> zd3FcE{UZ;8YjdpQYH4S}%%8x8c8ULrw>QU!6fv~5jYgQAP;;`_-#X=dqxZ-JmMNyI zWomq@;1nhpZy|yUHgnfUg@622-GnCjf$e9_~X( z1@h-a&|C}Qt(UfWPI+@Q3P|du5;=Uq6LLW~#+R4LuUi5Xv#iIC>!CArqp#LUdbTdh z6_5cjBf6$;vmfvI9`B;ypU{eOj&buB2dpRTYwiN&rBXVdjdU0b%`NM`)E6>`i=0Ch z)*3^=q_jZR?q^%P${1?Ymctbi&<@X%Br7P;$Ph5i^ZD@19enocliY{Il<;p4Kjoce z1yjt9yHC04zI|?B_()FKKCY+<`YWsGfBF*^(%>~STpP@OcWv-f)RN^R8dyCKohj3G z?7VMTStcBE=jgdK<9b!tR92>p6e|p(zWC`yny6Tx-Mudxt7FQt+Kved^xywVtIPlk zH`&9JI};VsdALbp{TM3gJlTBhv~839Y)6e;r}mdHE6{No4+72&ss@~vNs7`?i)e|? zNgy-Q1t=ss2OVzqK(k`l`+k_d1YbIqTCZ(`b^+2znWipk6d3=Nbf49K7VufWBXu4I zhWUM%-*Qiu@u@%f@s^_Zth&zhJ+$FTypcMrV>4M>c2-l2n@0%rvU;t~wkG|g5*-}% zFX*w(^08v|-8W0}&NLFcaQUj*ib)=8Hc{!d*0&=%?4O{C%phevUl8!?$~TUL+a-@8 z=&dUH#9Zg56Ac|~x=f!i{p8{M&KcMQcoiJjoGJF6CO(P(Zs_a#@EfqxQ$JPF0He@* ztFUM;?bYieOqyeQ$oN{#j7bhuw04hp;a56)@~3oGpgD*Pol=_!(P{ipH!C|D5;O z7qcqvgy*+Gy4}S!)5}DW;1!&YVl?y8mPG7Bux$(9g>!$+i(E3439R~TbM5Y{goB+S zjlK!TW_uIQC6IM!=A9QR>ILd7F+6{ncLVtKhYT126xzu>@^8AS++~h7<$2b>te>fZ zy~)CDL3)R~rG6Qnr@5D=Up$dX&V}nQCz|>~T>;&^!O24ZFvN$_zF%qs+~)NuBH-Sm z{wVFoz)awsGfAu`L&T~#`GWQHI`qa5 zzn`NJYJHeWb%OIWpVFFBkQB*2VO)n3^XTuiRj$lbsE z8(S5pPxM&<5%#~Ab2k$zt1tBfgsP~QVJS>ZCq0yWZ<-fLuSTP~I6nb99NMcTKYqx8 z+bB*VK7WOkbFCOpJlmXDc3#e_twqlMBI~_$qS8RXg!9^>x_8e zSd0KL0p>OHrr@>P&)@Wz4(6#cF!Wz>WWZqO3COiFOKn_w2 z$PiQBxDJNLFS{Btm-i6b!T7L!0QIaSpShc)FPKQKp{ojS%e2MsTKveAaLs2=M~{_R zWH59CDcxG;V1Y)&td*#S3|QphVD#rT^vvhI#uPMW)}!_+LqzsHTRo-4+^Vmc$vX|e8_7#Y5W(pNBfj+p z*@1gmb{FKx*BG-?^;#_9#oiO#L4L0enfc23zZ~0Hq$Eb`(4s%OiBuSdjOHi#tY6$4 zA7l;jL<;&E&0r9Rz9*?Cfs-801fTe|Qd~iwSVU_lr1esL=f`j@+Ih~-Vg3##i1IY^ zm23K)R%>sST7Aqf*q7nJ6!U;-O{d6qz`aqJQmrc^AXM&zNY5-cm;tE)xg|6Gm%2$H zu%yUqA0npTXG&JuPmBhMwAzMRZ_Rczwp?pFSRBa7z?04?U3bl7y5ODYGly01<^*fo z(@hH~H$y4(M`t&BJoPTGD0|7w)YK!Ei=u?R^%%0i$OTk)#qJ6USp<~ZR(bDqTsD$6 zC^o{aUzi9j90NMgW5$*UA~SaWq6;P7A?3HXY^~Rqxm+kVGTfu@cUQ0^Pvl8NSth>~=4|_`zA|$4;-MBc2A*kgFQh6-U;fyykDrgs`;K+yN_8b2xxqLAhFIX42%haR=rT%c((|rF zsgAooq;2((w(o#xtUCKQUB#MC*1bOMsJI~fvanj9FItR8&6=>f`Fo4Mf0gDBo>zKn zK73`qWry?GAGv+^@eOBzy)4k2e=}JOG4~L|v1MkZdH0H##DVAvM_i(}3^VFGHdOQ2 zKD0Qan4$YnjEw}j;lp!}q9e+wI9gq$flb~|R`H^)q2dpwM&*)gvVD#AZHb?LJMq2> zrTo?#>_GqBtM>%ZcI!l{?Iv28(Rsx-4p9U+M82_kPto+WIWxkLQi%BGBjQr%UhNpc zqReLIlIJH%`I83A8*WP6W`;qEVHxGE9+qJ)w^98EKPQ8%+9M?}vSJ$wH-i(lA~!k4 z2RxVUrt^dJ(V2*%+P#2Kw)q%9-S&}IT;}y{l*KE8J%ZKK9Wpbmwq-5JY&!JlemmRt zC8&SX@F{=7X@mm&JW(QSr1&2cXT}h)ZCtUS6wBNTg^U%n!~fB}FdH1AzDwHWe=yT2A2Z zyFH^{O8w8f(*k!$u;5S1v%xM4vu*X)qBiI58H^#dp`<9Ibbe#DrF- ziYTIC&+UZ6p=aW;-}-LeZh4){q-feu2c~2*euek9p{4oxZ)WlRyFVQdQjnd9oD<9U zx~TCEZWhI&_cvl=IW0#&JQ}5J{_6d_gR1df*IQhUI_-4!z!R(H$HnT7X;`9pZ~kXb ziMezxI*Xv|K5lnJa8Blw6?0KZWO7Dy#C71UnAtFRY!QW-6>L}VV7la|E#Ksk^&WV8 zdi6le+55PQC{z^^RWwrmNk0#&Jv4OZMsFfb!YUe~a+O5O4z6Ny zr)r#KMm%>XCC%!g{hnyi+~*6Zk2$}2>V)&ZaZje!A8?N>MK;k}FE-nKS>U1@LR~+> z>b01q)5*iXobP5BRW+-m<5JLfgr$f{5FaRdkGBV}pgS@5mK>UH@xnVuIi?!|xzvg*#$|X_EDSaU+m>=?q8~V=N}6B11K@b?rva+>G-CWE&BEo zNTj!IlcG$GT=&D3x;P!;_^NkwDwoEcjf)gJT0yVw^Cmb&KMwfoM>`dtWbGFNcYYKE zwqI@u3F`W>a*DEWj$)wth~NzJz8NZ{y=Kr{zM^oiWg>8%c7+PiUyOUdWG(3|rVwVb zioVWSmw2~F1n>SZdwY9u)b}3jJ&RUun)Kcmj_$#|^eWEl2b2803#gw+iNh*_VKC|> z-8TW^J;oZ1on8onj(f&US~e+DRz3a4=yvk!);~R6Pw?f==E{gN7z9il%GNpb|LMEc zKl9zn$dROxAxYZLH45Kux4vmyoOBV;v_SUu(KHWb6HYvc=V{GE4Ke*)f=HE>tM}plaWBW_e|a33ULU1*a2)ZPtq1{Oym1gXJ2abFa6< zq(A@T%YMrJH@a%xnu3*bC2+ZF*_#2+eq~#$@IO@;Eth z+$gJW76?C)TyJw5!yxV5I+}cp?~PyT)y5^+nRv_)^F zf>EpaQGs+tWq!dxpJOQR6|P;1iQ{DRV{a*o<2tsqs^}BAV{NdLjOoV8(OnAwmN+Y? zCdv3lIu;0~$2SU7Xr#QzOczVz{#X8>^r!uG&1yRseln0#1~J!nDFI}-g61;!znz7^ zBjr83FBcXFgUAY=rNb|-OcmLE`T@Giskkgpq4WFOn_Z0~Hux11#9`f$0j%V>HkZ>i z$l@3hZRRs6WOCv7CfpY{a(WVl-4a-Uu=8{S-k4NftA4kGXWnVi|Ge@ulE16hv%e5yR8pYfS>m!)(Ztg0d z4=mmGId1ywx`yx+=FFTZZNtwq;`0QTriTgC{i+$w7%($a=8!_z3S-3OZdtq9;#0jC z>M*XW@h87y#{a{w_+>DDvj#Zz|C{{_P&I<8ZECoqdP|Em%WXqOt(Lf>`U0|iKW_@+ zKK!Mq!p|G%1}|NCdD`u|hQ(XdY22I6LL4u$;q&_O+bg^CHic2Gje+RHZqI12lutC& zZkd1kVDh3>Hdy)9g;LC~rkDMo-*$$JIe)+&TnV%j`D?}E-Sj&A+XL+#;z zktQ-8H;qvusUoOGY&6o+>aK(_olKu(;=pj}68teKOGFgN?%IpVfJgoD3pW9}i)cMa zE=hnj+wULmu-vckZS%^@P~}zT%sG?;4h-#w9I^il7_Gkp#&SG0l#|PYhYXb}ke0G1 z#*YH87{heJNv^;%aLNHej>riNY97JUw=n_3?9C5F+gPNK3Q+?IQX#YQe0Q^T@7;I? zL6tO>-G3tX;xB$l$Y~{D1`x^EZu-`F%8GiO3raYCPH-Tqb6Q0BT>#e@D)M7xA zj4FCL*zcJtTK2BYXs&}u))97NmcUgdEcj^(bpVyXT-!hdZMV91OCR{_s zPCHBKz*w85h)2{LZu3B=$G;9~Y{qX;&i99E0yxBt6A4@!7m79I)?x6z)MSjRK;l)( zt8!GW$d#arB~7(I2p#r2*@AX>Y-IP z?nC9x*d20W9AhTDo3cVI#zNfvSTYGGILJ)WUTswU7hpy<>~K)wR6_O`lP2fA}=spv_caKGc~XwkwFs{uoPk zC5&v=$ER7-O6fBxf-57$;s`p4oN22exCQT?fSFYR%+r;rreE0wmU)rb_vj7+XH|q5^G$P#&>vgEtGmZ%$`2G}lBH*ko;N95p}}0!hMh4( zTECC_tGD;h*sk{v-u42*gkvJ02My(`Gx5Y$z8q*3b(kERI-kA3!#9dd6GxuWut!

PGv0ndS=R{(X!`c)sT<(+mOTWh@I0;004iXt)!C%-GmJlw1tmTboD&D4(9QcBcLa z%MLo-*XNaV(7>3+{5oKy)3SGlK1!SE__rAH=j1TFD`syw682)X^f^h-NKZIO8lcwc zG!ktF$3$zoXPERaBru3lYBq*yy)-sCdap4yL&_f1Ie1x%$T_eq@DeAo%Q7wfxI%)o zmod#F@Vh|{M#yQl8ly)+{jQ$pt5BX{)vFgkU#qR8$GV-Q+p;ClrG%9k7AoHeF@3B7 zw1KGsT}*1JqTMV78d>YU&x}zDz4birr9$ufo(EXSyJuE1lgrcFZ#LB!_imCh2#Qu% z_1yuSP_Jj$;PQcX249{($Y2`QXR?)JJX1gjKiFIFY1MoswXKbSn|!??YV2}=bzs>>$l!_t@n@TkF!>% zvkrTIzn|f{ulu^LyX9@8#YT|exw}7EXGID=xkQE*53!*XpJ;DpD#!6J#7%(5r9#&t zGlsPK$)t975Smt#0w<{Ennq}vM}y-~Nt*UU-B^pD7w=q}T|-p%p>$$xEh$pv3p9D~ zP!Ext_vFOyq%3{?{^iYciOqCY0%9i8-Ky(@Q#%i2Yd3HA7h0Hoz;iE@uYel&d<{j% zqs8NCNtQ2{_P#la>_dPf4SM~vMz~GbQ9Pp znXjCEo)ed<==GT@@35##rf|L%tnHT{MSD_nKGgZfRNH8<6Tdo~+EV8)6aQMYx5>{x zgfzWLOthuvyqi`Q6M^!V&4=D`<8@Yfm2Y)T!$gwK#*}?}mv~!bw`>Q}DOcC+%!&FCvFAcP$u-3kin=0>y~!!+Tr!d?k&vl+RA zj#K49PFvFG3tRK3MRDu1N9&ZqQi{7m%n4@-I)l3v49>sG z>Nz0R@Q{^=g@`RWM+uYrDE^wH+^N17{P9XY*O`M}%TG|T+`$MS_5 zd6JstM~aMR^C~wIRy%9;Nz~tYkwrH=>t=ayf=POioh@QVkO|Wah8QDz3^9hEU;*Ug zw|fkozB+%W>UI8>3x*(#fz71&Cu>h9;qFEf8(&6#n0UzF_N}A0wyFsBwQO# z-9BHF`Bu_*Yv9B4x0AdZ?%4T#{bKaOsov(N;J%$bgH(ZFfc_7fLjIt3W4{XIe$A(A zD~=9wV&u4XHu%9mDBY4~Z85B}<_M(6$5(QBtGmT|}kFJ5A93x{XjtDC1g{9TCS zFP-2T@{*7FeF>&0{{s_-!hN`&Xc>G9_rO>p4^}54jTDra%lEAjqzz%v&6PJnXH^3R z8(DF5j&g^_=41ffT|qBMw=z@-$TyBp?=J9~BwVVvuu$?XY4d7hIKxXxKDWNbWuCCN z>Cfg{u+q^W$$IZ$vrzrZlT+Aiou3Ph7d*;%Dsx)hNA^ME0Q70~4!`_We$dJ9SVhm< zFJ_56dtgXpM7(WB@=&;b3D`kXEmE6W(JgYf|FKJKOILk=bOZ^1$-B9?19zX@`WSoBM&foG{U!rTMAOb0+zCX+$ zSx)31c;KDWn?5H_i-VviI)|Y0ACUYt5>>^yo|bDMh0~Gn(mx?FC%733Qky5)8s3W@ zn_q9YNv>XQtky7XAd&Qk69?2Ml!{9~Vf5K)2MBa5sXzxOTj~U>`6!akD)FvxoxKFU z`vo-*7KBLbP*40jk;nztF$#yfXJx5-OX)iJ1o-m^6hq&fw8>wu4vO$6*$!op=y6}` zPz>;!r8V)|8a*oHIHN_g5|}R|ZFjceqCFAP1C1;`*x|RN;G7>hci}rt$LLG#vA3 z=YaPk!mqUT&2n-nPj1EPHcX`VnLiD?464QBMrZshRK|)bAaGNKv|fVu75<4IKtd#;PL8E`ZhGUHt>$3&hDWMwX@~H@>iaU-9>C?3LWN zMg!YOX23(3)g&L)Bcm!cr0Z@>uKf1veSOZJ5f2P;wG0XM89@j^NZZLhNusNLMiDDc z#4ah5Vw?XiF|1l0q%<#?D)$bELYYC1(AAq zo!o6oQvc~9?7w7)e?(rgeWC^l4=+L85+^6IILVfXtGUiP8LHuCy7ED=^a8x-L!fSFH^q03H!it zv}DP-W_1Rysd6>~#HyL}TozWqo?SAtt3(j2`z(#s+S2o@;T|&dx|#eozj!FoT1`*U z2OnlXM8U2*`RKxbXyW~E@cs8USi7HjO^crpGHe+f0Bk3=iVo!5XvW?kZ1{>G3YTQt zoIIbJIKLL>^6QV!XbE0228H1y_&jpE^Dp@?WQkJ9`n_{_uw;YY+F+y~-JAw3o9OK% zy8tBCWR^-ND+B#>dUPOa7hnJpM!Nry>8yHhhBSUhR}ir#Z;ls)-r2EPfV>udv^ zjc}S`@Khbea_lGl^$B&<(147X8)$D=xR!V18r=a`j(^J>`a9Z0{YIO?VJ|M?84gM+ z3EoR_?9mOd)wb|gXbob6(81K1ub)09Aa!)Dts!C%g&CAFvB7qcV_~14Dn^LHG6+G+ ztaEgJyjOHcXgv8wwLFte47pPq+tu#Q23qfurZ`#4D1*`{dFnIBioe;y8@;YTkNSY_wi81hL_+!|UcdyCQZG-IK73d`KN^7>b+pB>}VrS=vEFvZ%86M0B<-%QEY0`N zTqQP1cQ@HThFS68ZQEn{^siNrtuQT1r44y;Iv@RMIIgQ7yWZ=V3K-K|zSHkOtAm-Gl&0=F*hpPT0f3iR$0lj|U9LMCxZ zjQCIa)cg^~?|NKlmL>-Oy}!3Y}V$2Lf@KiBvAccH4+K zX2SQ+&jUY;G>HfFASJv=kIws?!cN@6o86U<^hwXWe*Ju=7{R#UYd*GlWYK4e7DSY* z0nXqgZGnU122n`8(6!=wi$h;0e!eqBXEE=iK7OwgZfOQMM}HKdd=({&FvV8QxCym_<=r3us<{Ij=ZwlE93uL5ra`5@ zQZ8!!?9tV+D?VvV9fYsFjT0#QiF~a+Xgt3oUMujLeQ5|r2eCQNG(&jI0QU;l&Y{CQAKZQ{RXPJ})GALVHNK&BrH)r2 z{t%huCztJfn%c4IM6iiew}K+l3W?_sO++jX^Kw7=mprcUxN|ylNPgX_LQ`$$H*O6% z809c;EIUoZ!nfO#aGV*(u#LUKqZ!X~`j8FZN)datL<%sVsqjb&LaYfR=CM|xcFAbb zTe;i6Yhh}(f8^T|!_M!o-sJzwZ<*b2*f5%u{{03T9a+pB?TMS)+JA zA}&f(r>@9wOAzgPD8sni>%aCgx#ZN@8-7BhQ}LV&66T+n9bm^@U~?O65WyTe;x6zw@{o`5FSn=eosTh&b2LM| zbIokE0*lM|W=+8yCXclaeqn=tqj#DFXZ?+5;*^_N@$~jhERz7t?k_%&DuToDf+}O^ zW;eI9t_Lt;lfj%Rf=)v&&yjEq-l}gdlcuVxkcRKj?wD@esk1Q)HjKed!8-!f8oYD* z15>+KE5%&u!oR^4pJyJRG^jMJ477 zHS4o0-i`2X9TxhboLe@-L-Vk{H?H`hK@8d60*Q<7e*=5aFIT z1{aE9Pzq2H{r;yv@`j((J7jdqug-PE`{I)FL zqbg@=+%iOurO>KMt>aAtt|t~VQ|9o?L$_L zqkr$`D*Vf$I=E+U$hs%j=s(R~S1l#M`fpDQT43$kV%kP-Vm#T+UGIll;UqtCLIAb$!P&FBRJ)V5_MwTaVwW>iQqg5v zS*9|q(y#dyd<%WHR$!h%^F%!}y=GP%{Nf=gBDe8?tPrP~%?U^Yjw=P$5eCxt>MB#G z5wZj<8AyjFrk2A%GNeRDF6cU&;3%0kR};0F)B1Ne3@j5*AN$7=4@0(YpjyN7j>oS6Y1CMtamY7y^4KNuC35C3_Ge#np2XFbo+q0M8L)Is78YZq z6dSblrKD3zGWzVJgkeN^vZ(asm+=!x`(3G}BOVRB-J%J0n3j6ZH9zc)$5Wmb9&aAr z@st!7Z|e0a*3_#}CTXXuNJDUbRJ*p1T5UP{sItM#LpOJ(+~qs(C^;kIRKaJ6u~}vq z1lSrCtNGk<*Lu=Mz@5T-nXBewhV)0JL@y#6N&lg2)ftg;=yN12E7S3%p~FIF{$AGl z{Vr>32Q-y=&k{gF&iypQruoQgO{uSu2&xK73y;%fDTAkJRexM`)|+Mz4TB=}1tr5V z_5glR1-Pj!$Pw5^#YYRr6OK!>oS$VqsXp8D%1~oAXNb%>pL}}?3|c~(&n|Pn_3OI7 z5D}?hzXmjK@49IvKBFlAzP-ZIsN~#72j+rD!WSm5$V{#(Zjc$B3SxH&9moN9@9-MaIWaqi&IVa5wY3X+LlG(k?lth`v^ z-_?;xm06KqESu%u&!;OqffRW+Sr46E$1SNl-PtieVAPWhfAy|?v2mZKeBcR33^D26V9U4mQxS@kAmeLq zb*fnu*lwI3EpryzPcH)6N6o4ICsA0DRcb!nFIi)Gc_ zfcBCNIfL`tGZ4nfEqvmTWX@6@%d%K-y$o1toK#8YXd@-=&zU#1*QRjFv?EQI8B7iO zJ0sGeq5LM;NyAI~u~*+lYRbFDP2gIej2{o%Ju30@ON_OLj@cc2R73Zc!mVQI+MTxk zy8$*rT0d0dx0!8+vBDLHzc@M>)j#a+_apeWy? zWcOL#MQt4LAh+D!n4iVW`aAefAw=Hl!b+`4A9bn_lNX83QLoRIPyet^UHS*>)Suvz z=pbpYpHocb+L>XASSPKiq-|vZ7OcOZxt`3?@O(+5rW}ZF8lmp`4DD zZi$9|8MigLyFwq?m0;{n!$YfiHoIW*)!Y6L#2mCUQmYxSX#;_dR@nTw0E1Ssd+*Dj zZ&hyWzgk5?wQ`x+9PzR(&}`H%_}V!m7C+z#^4(IX`SdbW+6{v2{cL@8W1}EPiabmX zN&6EflUj?Ec%G|fGQ0=|%>`d_wHMcQJSXFCw;&;5O8fxVnj(D(*(8Pzx*Th;AY|M? z)e`v7IXWlVhHXts3H3$)jGkh2*_nhGEc}n1;a>|wNZmSK=Z)D^upO{#WI(*0gGM}E zac^OKb@!d>y$uyw-kxH}9eNEMJZn3&HRr0xKpD(hu;g)@jA|fmv_x`>=pIy`d8J=n zxiYB;ITR)Wc>W;A|GDwIPmU}5z58-Ib-uQ=&WQF5*;A?No6pIP$a;<%eK;&|yw+wO z&Nix#q5j9C9icpB5s7#xO&nG>g%h}vAGcOKoVPe)^a7p74R zK_nsoiA6Fnf1MwZn#va7S*vbxW`mK(WO%FMvD(;_89f0p%ZDa^pGHiNPst&-4VA~= zIFbh1FTQyG{)9ve@r^3}`Jknu4?HhR6h`Q*3)QJ25;K1685Az3jd;9UWe#WlL|*l9 z)7Lg2?jwweV|riyKKjVn4Wp)c%GJd|T~J&%=n#Y6T%NXYG4 zC)>5@4*n#%;GU3}Gni%w8u+g8>U_K#@^&!ISH!;F1V&uK`0Dp233MIH5A!HV43A^( zoa#>Pll2_#;l>u+YhyES8ycoB*bezdl;fXwsiO$1sw8B3BBoJ4@Ir={Rb#Ou#G-Y$ulZn<{;5)slU@Q$DG-c-vA<#uH7k zx%WZ|G*Ts`TS4&nXdV^mG?+O_pMUUZ+nGxF*~F-Da`e48!7(Fx=k$C|#eVc4WVv0b z5p-~&esqz`#2URP4a3U6mcIyRe(;(pPq4yni5AN{rzsiOG~gYi^#TvrLAoAts{NuO z!~9UDXuO{UW=a?C{AL_(@9*5F-*4^{GoY@_ZAXCk5=kWRZ!dcmg)c>RA7Zvn*?yro zhlo*l{9~5Ke5z&A;>nK-=0A(UV1k((Ac-RZ2*R4Z1euVU!3f@ab-ccj=bb~Dx9hAn zO^H|inZ0X@bVjuoBn58h=nWmMKsR||9-~7`MYsneN{LkKh0s(B-pu%0st}A(u~aLB zQaNtc&F37u6yjAUu%ZF~HSk5@z}@mw3UxV~Y;hz;QqFW4m4O*M6R+fj)#<0l?QsJ0w5W?**pKQO z=0o|f9xH-Ut-f%@HE4XEsVchAftcOmrj~IKP~n(+U4NJDvg7tC9n{wZy=%m%ttpvb zx=Y#v=(PrNtj65T?u80zrua|iruuf-Xj?sk>yKbgE-O3Q+h&{!BLT=4ScCM?99^)x z9+tCxFfDkKbi61iI%MbesMN?0Tgcik$(Zn8Xnm6eqvOmVwJG?kV0J2$Uz5;nuqzcn zNlfG*oy}56lQgIzW?JA(AxN8nq3V^Zu0aE}vltAqB+=2p{56bs}(uZ?NZBn;$uN<2A%6U!^EV&oMV~sODb@eC-bD0?7Hr{ZS*d`)^Y1gt;9*w z+9xriM_c9l#8;BXAz}^bV46g5P%YFrh2*QcU_P<8USt?M^5agEt`AYd+MwTX?YFRe ztHS3o0n|91#CCO!VMg||VlH@Kc<0^Hw`+j}JvsUG+TW`X?C&aMyrC-{7j>XIog?1) z&gIqKU5!LF+^&5&{jt*H{IDP%I`94z-|b&cVZPT5EX5;B9RX;1aHd1diDT@W!*&lU z1nJ+Sq)Cj9`;qgGU&RpEN#0rF0M(mgj8n^?Uw`eP19`svXU4Iz`FfC)LixO2%?qNP zPF}fVc$uq4tqPi{S3fSLmI&&*Os54VfD^lKA_6-j-y~F<@j9@x?rnx$*Ju{z#0fvz za7b+xLwn>!_`L94f)?9|w;~xMm##8PnqMWhPhh3E0-`HLRydYtS?-)_{@N^80RO%- zgKOm0d{(k}jM2zn;pG_&F>5EN8_Z{T6e0edy1J}_k=12KyPuhx1|0JTpU(IoPpj@J z%p?5M+W~*zy7K#JO}2Gq;~pJosDWf+?X&4n>_e6#Z9c_O_vQA{JPO=a_YNfJ=vt@w zWqf&=TV`2wJlm7cLJ=ctk~5`S<~Vz;gU_AXGdY~=9;gts3mZB2w%3JdBP&Nx;m-wp z$b!3<7hb3sMQwSGip*Y`8x@aQ@wonEN@@^zDSid5^P-XhX#6J|P~7o9CmJFxQeS!? zSHaAw3OqXMd7b<>aEUq%ozO*fa$f0#sKmHH-wG}ywGNRu4Ib?vTnrXgP0A=;=^34u zGe6_UxDtG`)lrjcjoOXGhED*062(`LOB0cO^_p(Ii$o1^UQ)@DVV(RK;C$Skv=B## znnrK)AUpnyr`!I8{!J?C9uzmh4`&_%8Lbf-+5h(-NVg{Y}>C>0CqTPgQMAE%nkme3zeoC2V?^Is&B-H1l(1Ffa3yU6@FZE%@%F-Gfsn zpoiyhgRqn<>KIEFIh*UooGwLd6WvJsEI^QZr3y;hhBW>=lp7@1u>Kj-&YRGK{Drl= z?+D?}Dc(*G6p4MX)>QUam@|N{GYf8TS@A-qxVN2;ST5)Aml{ZuKg!ug9vNI>H>-JuEUlU<}yo$B%}ycWq)`L>EMb7tMBD0r}_B} z`?b`v)lKmPr}}@wI$K9(NMw;05WV&xHweviP9uC2B*{rchI{y-F15OiRVH&WF#ZPI=_?8d0Ws(C0-s+3n01 zODtw%7Fl5}#WfF_)o7YdS}u4MF&xo?eD%gif+$-2eL)l>5B}N8WZt;qdQ< ze{P65KXp2+<|ZYn%53ZQlkz| zX(%)#X-Gdf&a2lZhz`3u+$LP;c&bR1=DPtIjBi<8M0@R)O}bxfT5TS2#2C+4thVh( zaIl0_&{DRmf3=!4hF(GofSUPQw^0fTnhomUl0}dfbr~?hQk!HO?*a0|_$4E|vo(2D z7(I{ky}H8!NBp8-cA3DlNIPjp?P#kWg_Khp=P!Bnp>jh6O^9g6mt?u_e0~;2NkgO= z17qfhYu#j>dC>{Ch%2wcXy>Idaq_h-y81Wu{!VsCc%)F<`KKf0O|1AX$7#OWfqB>( z{CR2#V*0!8E_tIDJ)fAo56C=m^_&yCgX&j$OD@C(BPVHj@-OEZxpya;ISu5u77LeO zWuGKo3i$~%bDRGU;@+vlN20ENE0X08J~Qf4D?YPBFVD}2B08|YPl*mm3K75hj05!M zHThB?%}?;Mb9LnP{^0=<27_;3eB_vXJq!dhWVS|p%|_mNQ(a*pAuSpg`vJwL75no2 z`YW+XyTGrt9XjxOs<--QQH^{l;3XHS{iH*e9=(Ob@E8q4=lkj-0LzqV@*3hAvwcR zWQ{u7bc^#B5y53cB6gOLGIHGRELF#yyYa28`EUpJ_y?G_(m*^0MvE zz!`nY`v%0$=YIaAKlmJk-;+p;)fic?ot(i)Wn>_lUvNZw;?NP&phTWr3gr>axWjn? zn3Urc0`_W=cnKsz#-zGziA#>FMu^xuTT2pGds{`Kkv2U4K`$}!7zJswoqTTe4S$aZ zoCIU1$ILZMR7Zm^>g1OQR=st=+eVa8<7%f~kffoLm~0q^kh}zKUU7rk(!EkioMdpV zqW=(2i{h(h_=aF|d-ZositukuimR`s9s9)t5r-sEnD#@|w8uyR^g@`irh^Io!=dE+ z{T7oTn^F~0t*SGhO_kvdGHG-*HwwAw9+8AHvIwf)c&MQs_0qrz2VGt*AyIh}W zMfi=pl>9syPH(9c;oo8E_jXH3*d61a5=NqM)Hcs6#1&UI@?c;qaAO${)E$wd;@Ft_ zpQhB`y=^?l?{DUC+wy9r(i5k}Ar-C~I<_-u>YgBNG;4}vS?HZKN!7Tp2nipn1hP8j zT`$yUI*Fe9$(5*uKez$8LjFUn0*4t7j$eP`x9DdTCC|3ajAtJ5yR|e@GYOrud!<@0 zZl7T%t@*}tdW)%!zAg$RP3T;ccX^rvNYMsRX=w36BSBC0>U@y zg7TGPR-srjNe8W2w%AT~ituqFzB#yP|kV39+Q&~ibyQX`Ksw_4~ zt#*6a{4{hP^>6P+zz_tb_1rIH?(Jd_nA$~*MA@xSv7k9(Qp*7|q!pv_XNIosV}|IB zy9S)dbe4b8hh|OS&wE47IE)BVsfLbXKO#X4t=!L(ZU}P3>REvD`r~IiWFSU3gTyob zMF8IA;*bQ=0!&3re_#N7A#ofzbN0^1RUa|3)89Vn%SB7T&>%KndiJEi!@lQ-I83Hr z<$n#~r^nVMk6WNNngDe>|aU9Pmu9DfQUR;!k zbl9XV`dD*A`-RyvoGtZ?xHTKDv~dXa5JrOBX3+6&O=-DOxRG*rmt2a?r=>7+F>O1Z z=LDbPQ9Z-`rBHP$?WF4Q48SxZV;_5vrxF{m8bS@uJ+P@TlaJL+?K?rS7=*HSIV+vro;o6 z+=91G5uo-nWjyn}lY)@PQ=F8x4)-$Oshfz?fmVY|dTRui_I~WJ zCuGd*XSoYhu0;uT0`Kd{0@ia|j|r}tCm!MJ1k5s*HB1=L&l^h^;@XlWBv@`jsTh_x zC~`^Pp4%M^4i{a}DrzJ#L7X|hWVKig5Y{MUx;pOn&{d%{wjLv_ID|=KAqb-vFTkY+ zpm6+U)2zP5I*_RRGKaXG#(ryr`tkR`b| z@kN?AdkUORvHky5Wno8v#J(a;Ib_DkAWr_NyR$ST10fNV;_WCKcbg!^_ahPKG1J_r zt2@TQJ0dP}2fi&rE^rIzHgpk{OZrDX4t$qlIBLa-`6e@gi$Lm<)Dn5SYba*V0ZaZj;(HaQGy5l--s478S0~ z*KZkmm2s4eePTbQgbDQ}G2}SdbH#N70y_Ec3_xq7qJ`d6HaOk+G-J>oHMBEW>d*>N zRR)XVxRT;uZcP#~ios`5xG|gJ+&3^U44Iblca>J~ZwV08$g!FngiCGA_|HI?Pm`9V z+3~sismkniozjBd?4*61pY^)q5EMCaLjL($<7G?rbm@Ej%Sk1`)9$zaA5L&AJNRLO z^Hvon^CCX!12nN6^}{^9e`g6)OT5WW?V#G(Tj60i`@zVz=I{|!0^Io&2h4NStE zm$S6Z%CAqNa5^^JZupUd-hCHKI2CxS1fE_BvlM)N6Kc?NM6%;+SRTJzj8Z8m`zfn= zi9or9Wi2kb`{h=g2ofVtViDIB@=5!Wcta}rxf%oIym~=kp)G-&Ds9O3Crl-6vw}`l7uPxntXIoxR zt&hI%ee{y1G?tMt*mtJ1;G}=lndZUfWZxxcVByL>S%j8?8zK2RrYb3bD+n49UR4%* zzvhyO3#sap1AqBGVa1UPGe6;aIEJFq8p8zHF*tSE1>ZQ=VAv={Ny?8lHxOJ%h^%uQ zD@jqZ4L*CxE`;ckfm$7dKi62*6BHpQOn|MbT3^suAMz+JU)-Caw=XaI_4Vrd)vr!B zyo$49jjL`hqaGw&eU>>WO!@Ij5Gqc|j!|8s<*5PxJ?>YZw(KW3+J&B?vA(9p1VJcc zjXJpl1mq765HcS)NF>WFPe{%l6h(SmF*`~KksO*7Zq_0*m1!)EaMb3{8d~Co=0RTg z8X)=e@sRG?Lzd2;s=d4)F^i7 z)4ct$I#piWgU&nNrz$Zo{JH9EGkz`*R-Zt^mGsu{A2V;o8H&1wi@xreTxj0;=GJwj zQZ^M{R-v$(yEJcVw?c62)z75F9z8r3rDs}mIty+^+2756yB0hC6%LfAr&N*q&*Tf+ zyn5Qia3NXHw(6dG9@^qrJRht;9{r4siVi2`#Dq9p7(ebOOlMaRdeF}!OTnIb>B_7_ z+8CQ~m1FR$c4nNO(CB4Hf}p6G(4aq`4DygZM@+Q0NrAFtRA)EO#U?!kQe+zT%+?i3@0;8d3q+meKd2C_>?iqqA^l!c z_Ob`meYl!y1T!C=;&`$rQ`lULmIpI}a*yzjf~%|;Wm`n<$-qihsPzesJo>%UMgnOf z(P))s*Se{-A0TqM_(tV}^}Uo($B37~sCh5eM;SEXJkPIL05y9bu4KiijIKNJC9gnl z6tsOw;Qq@@8~1N?9U7Ji+)qw%WJcA67!AIZ($nBm&Pw2F?l)Tip_VQ>8PQ11!a1vp zhfx5Qg~N?VizGsFMd?e4p1~(4-6(}OPk7i{B|BHH>-D_S!YSjC8uSq}-+J+j*mMP< zZ^9v5?Ls)=#P858^DJai$&&6W9p0T3>o{``TQ02e`>{~|tH;7jkq?v4E(y%@R$f;X zmHVg;dKuFWfi@=BX_n7$-4-c`mujI!Ufk~zxvG-Hn*CeV0CB^t+-_e$mI9)&S8c5CO{Af=uQC3iPx1*MZv zqj{IoX_K*)qoYb_LBpu|y!AbSW!8fk_kS!*MY&&E(Ge&r1_#g!zJ#oERy{f6w-&$Q z(iOz5x5ll9`c@_5EqL!o<_nQHvGx2$BK<;;Zis8#b=+O_TNX*{^ zAb4l(Yu1`UD^FC$OZ10MJy&HR+zDtmM|Fy-rYn5>H4k0OjsmOsJv zEGn3Ap131+oDDZNr4oOx9zZp1Ym>1@Bf|$IuV7dRDey8G_V;rg}^nAM4L`!N9w;FtkGTy%B20GDs$h{8R&fueK zn8MZFNXq9v@~uYS|>16*1M8?LB!RC&t(NRLR}n)sw@g^J|eDb=mt9#4qnR) ze6b6SJ#oEbmsg${VDNah)%72iZul;Fyi(6Ra8J+TaTA@esF8V_MaFQv$tj~^-HfJx zs4Y{RUesh#2qt5{VE|`-9FYr=?0*ma@dpRe3Du)~O>k5W7}-RP*|ouRe;% z(`8wi1scs&LwmJD;MXvGmT8(*y_d!U6YCnPA1NKd99O7-H(EmUh8-U#*L(B@qS&i^ zF0DTnx*dMyu^`L#CXhaHFZH@tSz!6N0Sm__o^iY^)pdqDn*7rpGHf(l(o~{ly?1Df zoJuERL;A}g;QR~pE3HuKQS#kUX5W#@uVaWe?TzD+6ZEuv&!0&d72(O>StPa6QD z-6CP)iRWkKp_}^myav5y*rYTyZbBX3(Ly!fPO)gmrVdt%-iyj_Vg`Pt8+a6O;drbk z`8hxn5+-^PWbOc7;#p?x2KDYerf1 zKQOw56|}q`6`TE;G{8|^6e__p2~y8=FE2Yui02ART=a?)saj(=v6rZ6V5!WHEcRl&)=wgJjg;fs zA1Gz<_}lNP{`#(N#_qqXFE!YDd#r3HruzJ*E_JrSp>{W+U{%3g!8}BTl0(J z{BQc8AE|btG=YYA1I4Cj|H=L-MzKrh%%H#A5qh%ndR}ZHeJ?bCC|d2whe0YSf#R?m z&RVc@>+(vtlJ@0nW>m%_&k6nxHaMu)q`7U<(RZWG7U)48_ZHaSL^Ct2r5^{M_6CR< z6C3aikIa|^uG<^~quC4;T@=6i?v_pG~z zALwQL-U5pMW(!=p4lpj{oQCDq~|Y zx*hutDQWLRMCc_UKH=50N^Wjk)UEHO&F(8DTJhP5MzWSi!fvdGI9IxdmX>Vd-BME0 zD&-;EG)GCRWYd3Ktnd0EiC?>`Pxs{4l$Y&qm9=*%U)>8aewKYa$^b=xGRTZ2{ro0o zY-5If4hDmr-1{uh^=LP%hE9z{FWMP1N#FmyWq&cKk(*0 zsyeZcs>ErZX`Kxu6lTLzN6Jh-HcdA^-6+ubmB#J^Z@oa1vZ1Pxa>LB|Q>l_YBJ9bZ zWh(nV=e;ZHsS@SbNX}eZ*ZlA+FM><`2hL7n>^b{vqGWg;r_9&c%oWgbxT3L$4(w&2 zm%>VEO!)ekrt&!pp{cL;2lx^u#@?oN?#-7z``IxU1T>w(PTBNf>mn^Ie%go_HQ)H? zqI&xF_lwm=1_(@x7{}K|wPnUr^Bb7MWvWwnJ1q9cO3Z1CR;~`^U#@2=ySapL=yoL5 z+uVYWFS&aNEtl@03X{3N3@KjR415^z$vG9rigV|p_=b|jmnbyJ|Mwi0Wj?wOzUfId zvma{`=wXzUcg-D^ppq{dx7T$Y%{y8BK!E)~RBzPl3n~qq5sp`|Q|L>D4OOuLoha=K z+Ap=P;HP`H`X!L2>@&yRXl~u%@z*!TE^rJmGBx&otH`KDW&yki1JAVb6324$h0;bI zc_Dq33*XUnB9>a+tyN!l2BLaDI=Ak2+@YR7Owk_tVk5^gU7^~qeI_*O=74ab(#~#B zv2;FiMxWg6Rr1$eqlC#Sh zM_(osaJEe>Sl78*=W#2~F74bw1?(+p1OLA+1rFkUfKC5cE8B#~{F+Zsb+0sCAiaH% zHE515Dj)DCIw*E6(x4`i@cHrFBGMQmOUv(1^sEJw&OA-v>bSJN?Y|>BaL4pv%)KDZ zf`w*NL#KfmqlnbBJuc)vpI1Bwo-o*#ZMB_;^bptEal0t2T3M=bY7nl^ zD+P*xZzd`fqHVG{aD{8J4JOxa`-u zgSOibeHC*2p_g%Gz)f6U^U?MhhXOp<4DDAn&9~{k>8^n<>D6-L`%#3T#vMoV)fF&q ze_q3Q0d&6WtUg4=ev#%^Oa~*GYx6ExjXR#HBwM1+^~a^NMQ>*VZPRfk5a@mGe$on{ z$mjVr-0ID3d=cy5ebByL640;bD{Kc};K206PZ*_cRG`v--81@%XjUm#hrMcl9_C|f zDOOqAqEu{tu_IQ>uFxqM9bcZ#v?D%N+Q7@Qdc)2tCmAxGd zrwcVZWWjMYD`PQ}FkRuYxlEjEN}Gjf9kd3XPCZOa?1}}aXTRr4bgI@IN$ZF>l%Cdu zC3dgOyzAR5;F5WIN+!z28oSL1EcVs6(9epkkQ#cbFdVG6%&q(BSL#WLz2r_taXmBD zk5xtN-xs3@BkJz)X-sSA!71f<^E)3CuC4@jB0t5-UtdVm!Gj9K>FA^H@jRmB2U}G> zh1{{;%B6AW7nJ-t#eZdDU=ElD0oNJ{K+ERTK1QA3(Xo2A%8(qDUKd06!77iOn1!!8 zbK@0^OzN<|>1rYa9o^3)w}#`QTs!h~*%B;R#MAFv_FPsZLB*xT2Al@lsx`!sccd`Q z$P}^Piw*8it0YYUEM)E?ym#90qiK-{R>9D|nVrIh_!#@TT!lisk)@Sc#7(_dQ5(*m z3N8X`m%0v9a8pWZZSczvH^B;>Y}HoeK(AHhk1exTCbm$b}#V{QhObF6M7;G-kBZ#EzDB#2UbM};MT(8C*gTp^led^Q}L&>z4W6SYBsAjE6pWe3PfRDn!Ulh~%Yg{(p!OBlg7lYgC1-`7`&>v>N z@&t2lKY6=L_rqdar7>M$*dmNOm_95`$G+@b&NWF)y`p)AsuTsgG(N}h`o#eSFmcR6K#g?S&q^S@p{rGT~KNXk+< zpstZjLGia!gns`Naqx{gCdaG@r;PZ_ne1Z+iN+tw?vn=I-dxLR3Zcpe)W=XPWdHpD z2fXRYW%0~7d%??Dtc2&3-%+tIYba*t$0m1n>h?2>mnve-=;Z4W67Ocv?;mBs^Dp=Z z@}@K18C;LhkLZIbm2K+LTYj*a$W`{g$xav1KiAzCiEhTqil{??K&s$V^qWdW_zd1> zD2etlpS9vYV?GLkc=pJX?04DY|NP4DQh$GeEaHFn1>h7gD-U=GJf=7&Igd2vo5vFK zKSL}c?-rEOL(~RW^Bpnuvp{_Y67DYoGSt7Q5?CU2fgWtRf1C;qANxNZ*NEJ|`?e3@ zSN5tQzjEr2U&;9Y^{-UJ;$L)v;VJD^`9hf}v^iSddLs{vyouF&zc%`8_W$)qfOnk9 z(;OP5p>_T!tm`Ki_J6T)1xT@C4T=)>*)8Ozfja7ar#}04TNJ|1 zx)AJBxbDq-+T`qfXU?y1KHyrueG&avM)I8eJ|o$3J%g3JiN!ljKCmAd3<3JPH^4&_ zS=RU*CYj55(^e26z5SwFL}mW~_`mtdxrXI>z;wQXfUynm!oW|CVpqNc3WK-K%VXDi z&mY|1+Y`k5SK11Dv4I@EFn^>e6*#U`sz-l2GD!E23?bh~5C1%l4gezjz0zgb{>4iF z;~0^Rrue-r&;Ofa?vG3LNiz}h5uKTm{>^LpYo zZ2LcWdEryP*RL<-zd5S@czy|?zYmz2;D2-2?l(pb5L~(R`%oNF_&2-!_Y3sqVEg@= zZ2v{gNA&;pYkGuV`L$v5D^{cVB*{S{VODP-yR#`0FDCwO^~fPR0ZV%H#D6Y2S&rZD zI`z_DKCZ?GHk<~5>T7ays7RuY%cOeduUx#jWNjcJ(=ACLu^0Lua(kJKsMwAHohozsc zfz0Tq%MPU5SV7p_D-U38UgZ`~C;-(phsbr15t(K8=k=B;c)Wlsy$^DetD)am2`?lZ zNY8jmyy}j8r0>vhWX5$e`FH&R;{oBE{9Vb{XU4R#;BXG3cWwa){NO%wU*xztYnm;n zTZ*#65Xn_CqEE3M#pr{;cmVmBdyQVZfqSE5UP1iDMd2WRFM_aRsf6$9x%8>42^WEh zzc&j`I#p*DGf*cMf;sEt3cc);A>c&TIaFK_06n9MK-8>XFPH zkkdl&zgqnM^UE{X?$!QFNy9y{%7H^vcsrnGvHZ2wD0BH-TI}}+hfV~=4tEL-@Zjz( zLYa35-cwL)orbiqg+)PgH0Jf*)-N*PL$9SXV@c4u8zdwYpK(*Ypc!L`BN@1rZ?&oMfEn;d!A}B$h z6#S}mGU3)@$6O?=xaYQp_M^aj@{fP@1yMmKGS1ul*~8q@^l_N2#d;LyF`o-qYMva-V9MQC$=M1vHfm{4K*zVo{3p?z& z^`5C8Ny6pL$;YhCfi`;QO!dUY_$T4454gT_6-d$`b@ToG{@a&RVO@m((N?(sCbM>I zFa5XL$-uY!aK#H-L?Hs#XN2Lf%*(-AN7C*N9n%kYTa1Ws^d;#T8Vrc8?(;goVHrB* z`u#z>Mo{({Uc1-EDFg|Vtc?Aul_+y7+ibk=ynKZ>IRgu8##D0+Y4co6i3^ChETcdZ4rfw9h8Ue7VeCea=Mo&C3* z&8Z}DhyRDN?+(YhZ~rf;D8hw?k$G8>$V&F!E3!v2vqu@Bl)d6g_RcJnt+GN&l2utn zg-9qe%KV-0OWk)p-{*TA-`^jeMVO&=54{!5?X5s^kvFAXF zNxTl|A?R3(1&{4ThhT05{z`gx5D5NC+_?Ty%^Mt?q!POEQ=BRDJ@?ETn|EQwpq^7D zGcuV4lel_%joJ-5V;(>q=w`tM_hM`-BAdW`*|J+_joNKsIj@oB3@E`;z~({+uS-f# zPU0A_2l6+7A`mra3LQmiCrv8F20uQDoQ2^Xh>}$q7N7y^iRG>Jo&-;?ry#P z;(GA@Lka2Dl#<6vgxV&A$Jldnk6ca{f?8sy0@4`!YCg@&zq7h$65AOv03ZL5XmC2f z2&C~dmP5N)FAULQHHI?fE{6xroMHC)25gywJY!3U^(S=URpF1a+0C~=te$;wvMY)( ze0q|NI1^|f!b3MFZr_k_=RLGQbNb=YuQzfovns-6D?WmD6%NnZ9WEQ63mN>%n9NT6iB!Y<=tvvR>!&CO2%><>Cd--|Mph zjA#&l|D8gg?0J8y+u$$Y1Ag;Q6?OaiT_QQ+=(;~myRAb1V+pX#^P@k?_ClG#3Go;| zB<$XQZ~%y3c=P(ogTIs z7|B!#rLeo6JE>nfL)YapOl3$&yTx?G<2h%+0I=uHJ`k7i(!&faL_a@h+D$^%IoGXr z3-NY^5Iq9805f2_IU*iGanA{;Nj#zjQO8u?UPZGfE3@lut~tVP z{5ZCG@gWBJ3KcFaPY>+jFs7El^F2HKp0Jm{X{$BsT;|kXfWGZ_Lhn5LY#c;qQ5m{h z04#SHF8|1k0M#!`82FZa8#7}h|F<;2IRQn8$Dt88G(%nkjbsY74+*kSsl?v$HCw0S znYakwB_h}jmYnYdx%{9uAP{I+`cSneQpNK^i#-doRRV)g-`bm)2-<;XR&CUb>j zh|^&-+p9{)B%F|WzrotaNZy=vL>0voM8<}0r`k8_jP{bNL=jjCnlafoioTifg_BWK2!kAnit44BI=ai zovAbDWll;64cXmp_6bKL>F&q&OP|tq?-E}bD19_%RCz_YCwlAoB7|@lhV~7G-=2Ls zMByh)-R?m(&1S)>Y$#Lrw5^CHKM^{}HEgvvC7Fu1q#A7TFbol)j~=r!7e0bXLHd{V z0dH1Fg$wP;f18yDzs$-Gra#S!t#o{lfa#ifYXyeuoxX#14r%8N(doj%XD}U4bST~r z8osK5(?vxbT&n@y5ou_0s9SYs=3aWwSX7y~(AWpk;d4N#1ROzcfla z%_{1VTU~;4iumPAJ?GyFSye^L-*R4vN-*#9l06@}?dN~8;c1KHkUgHY2vnH z<#Qo_#mdy$Inht<=0W&7a%s3^@k-Lel{dZ$;cM$UK0Gyf6Spf@LSh`!ftOCrb1t{~ zK`--h)wKlJx+D5BB|%~pB6fI5wM>b3u&(#7>bM?srEKu zn!Vozc3DFh!xQB2fRD#IL2WL8?fYccjfA;gHJUu`8ot388_CT#H4WBiuul)>V&}`C zWyI}yb_~Lq&s&m4ky`_$Pfjj~mKF;CmN{}evcT>w25KN@3jd3fE2;eC4#T;xU8&rw zEH2*;K_0b}XX%C@R8#vhE?j>1sCnP90oZV0GvsO7aTVJKWrdvR$}z(uL+tWv(XW{8 zHOZADM5^@Tr%oZF572utB4`Fdk0Tw=72dRK`^@G)d5Sc#?mg@SdY^eud8< z*zLbG9;bLvrGLQ;Chk-{i9Dp(5n^v&A`%U=;~`=>WP7_ytg@`InKa`kkQHViR=E#! z;=xkz&31+lAr5@#cvtY4>X_cz91z(XIh7j9g$B0qBllstIFkgme<@_xB?%vUgNtj*3Moi4;yA+lAkj7# zI5_rwzQD3^rT*(6e8u6+a#$2H2m!NWz3&oN+SqpTT8{U6?QgM=nL@Uo2_DW zbOsWv8fdVnaMe%^vo9*s;r!yntv9J4o=Am#JtP91XQyHKfKjmj_teo!a1t+Amws7g zalCphD_{k${W{OA!C%2t!~d|#eHiT4&&(}*M9Hos>}esBGe&M3^{y$sAPN8pfgrmfxETIGEN z-f7>@ap|K&NyRQK3~Ld%2Og6{Ft~uBa}U?k7Sn_h6S_&foMtuP1EbX#5tV<*Hm|p9 zm(zkLWdF<&@#mSqfd)!HSvzI7>u2QghtG~eGlPe@#NANBRA2aECO}#9n@h^@02Ga-b!Q zKmB)mXm|hH_@ue5g9SAI?f6sVbHq>Mlv7O|203{&mtdUp(AAy0n3KzaSch%N8AEN= zkP8o-orEFtetl{;tyUquoAqj3OGi0GrC(x3Npz23r0nIO85kt-2Btht9h_yE2X-^7 zON~(kM*IiNc)bs$=!3T?Z0G8}YrtLi@?t+GiB2=DFif;yXPQoCqAHGq<~l3h zB|bZfcPyGZ+m5AN*s_q~w@DVYv~vlkZ^m1NCR*eS7)9b>fOnBy`+ci(Lmp2|ExCP6pZeI{Hl9>? zu@;7@x;+2!iT0HnU}hR%z#lW!Q1=D|*8V-G__u=)di;T3QeYAMH@!ChMG#5*3${T~ zbpckTGW(}f%{v>kn8maVW4mOz3RB-Jk+Fi&0ETK^k@K9`U)CQjql!Q zO?zp7_GUuF7nmqZ*E{_k_yDNUif3pn_qF^l0zfDp8WZ)4)&AE460vUAcJEyODAR^lntVZM$=;%SWn^L)+tGR}e8} zPQV*4_0dsxSnY}hN+}qM-8p1_`G`)A*iYe+{9dpjgBx?!8r6ynNcvL-*g)n z0&eM@)I--U)uxZ2zR9LXnD$cvd=CfsLx4>XYVD^0I>VFd9o?_cdNfa>iWP2KF@CYrgsF)RpvVl)(Qh7gjkQ zP`y0z^2d8<2Y`>^gc`v<(Cx*GXm=Ujyv5L;6QlnJ0So#iqJn?L6`HE?xvxwjAX^ko zlDaIXTy;ks_UtLd&<%2_bu^{Jko!w8c+sf8)YRB?k`S+uukbw^I>QuC`hHOf0CQX0 zHu#w|{|j19k5!QX^u!JUG`@Ga9{|)cR9gEnqP+jXcHOy(35Ce7Y*^>1euv16TBm>6 zvHbtg`uL_Z+=MjoQi|airyl{(5Pd5zzuvvlDlMUi1#bnVo&p`6QAPAp(7fkAf9}80 zzz}kT!+1voOI^oI5ozbx9`zZI%a0!E5I4t6C`ztsU)RYqM2GtS;eT@pGc1|9vk0Gu zM;Qrt0ZSD2wuKHtXPM4oEp#TloU30M$?>qlA70iNJ_4i{e(25rkGZp>-A-rE9Q!kj z_K{OodCz%}fK(-mMLry%t+tndkv@PTg~&s^)z0r84uaS_!gwhT)kVM!g=d}IVa{|y z^oMP5_Xl3-*A@5wp#T!U6yVrj3P6G73@k7p65t4oIf&(}28QJzQz)I~(4%qabGe~e zvubkRBGBpn<2^!`mmP;eryZMvh~G3`**fc%ww;;bGqvz7J0`?r+O{#TLbo#>``!QY zVNhhqiv32lKX4CLi&vZ34}4&G9_#{zEs(ki`K-?&Fg2=eKq&%(AJ7J|%}wh2u1IDW z!%*;v5wIW%`NxP|WZbr4DZrHaO*>TP?22suEivNq%7Q^C;r!4NIt;0XQE<#*zUI=p z1r=HWJFUA8+piz^92Dp?6b9;>_5Kl`9f9tIZvt#C-Bk_4omIM%Vesv}koEZlS~v4S z2a`fT)pe3J-<2|}OoyagW9+6PEN50heH92VMNQM(UP6Jqks)}m&4rgvt^?%9Wqy|? z0;4FT;$P*4jG$*Z? zx9i~HD&Sj(cqQ&l|_811vnWxIZUXi0)khB7q{L=l9hXhv9(j+NJ>s-Wp(7 zD!2qWU`ngCJES4b)k&<cS9XM=;TB=xt$GxAqMLvi8g=XA^G*6 z-&s!agVfN9dfNFtcs&XJ)gCZL(@IbM8s~RL+tw@4215VOBH9bH6Ny!$WHWs;vQhZF z7B8cLDHI}8uYH7;`go1R$+Ua-DZi!+!`%rRqPogqPgy-Q8pKf~ekOw`(1tA*`e-)2 zVKUN85T(X_n0Gr2&BbS`BOBHiRB44Alav#R8&;>6hetQxRKw)!@_7^VgzhLfKYDf* zfecpFnAvyNld+vYCUQDSWi5)#jt!Q7@jq58zTZ5>+Wh6^V^a;Ksm{*n4=<5Ev~9np zIs5(djlJ*s@?5SM`*uo8uW^p- z;OAuKX{6cHwo#%~fFcoj_BLF?Ge;wO{B$8fzhc-szur}u9i^&6E*YZSLb$!nngnr~CfCru+ zFg%34<%;?;QCsj}bMSuSU~r31!Aa~JhSohJ(=p_J0&Q53pAZ;MDySzXg61oGrVtMM zbB`fbjOBrs{%avop#;9on zt=4?#`B_J3TPsrnkwNJt8A}tA$r6W~;#tJ=E+MNy)*my%5QcNLk?=xD{cEN@+B$om zQSA(5V~*9=c84`W!!X~~xVixCuSJt*oFLS7Yw5gy(KyJ;#D=MwlWj=P-FWFG;0_(rWBVUYD%2pQI1FP z!nmLY{+j;4;R6w0<)9_}9HavmI0ywX;;*2n|5XC+a%dm&hgZHd;xjpzY@10?2j~>8*FIaL)iq-;f0_HZ?nhu5u7`OteWfi$2hqX=;UjBvOoD?KX zcVtO&au3A9ZK}&LcIB|n<)cIyxecJ6+IgZSTzngQ%^nDV%Qy`)oMvEQ2;b~^8I&`2 zPr)gK;S?CgD#-|D@wXcGf4Wr#a_n>|RyAP&%?;wM`*zKK3#Y=ld7}}T$d@8xBx~)O z0hVBWNp9N z!yqQv$uBRJ6;31orL);`Fqv#a!>mUdk!jP@H?@YgTlI`jhrxtNwcx9%G&U#t$ca43 zA)ncA06+U<&|e1Y9<;LefBDM*F9^G!B@w>MFaW!9`dlZ+DOle)Yb&{yj5cbZG1&fumtg}eRkCU>6mQ(g)OZwdqId=28$Mrpq1o#8y*soI=cOb>1tjb zAy{HXe>nR8_J=93zw05EU0e!VaccPMwA;Q;+P=!Y1XX~2+5;ot|DvQe6j&qYFKC_v znha(2bq*#_T!LzPd|jI|vLQZr;AY&PKF{wSzAFU`GsFp|2iqwgS zKS)KPVpYJpmf%0Rwf1t$OT#w#l-H+`lPeh$8+B&=$vzohzrQ&7)#vSLTZ!)*0cT8q zx=^S@P%7Qx(u?Q{BnFu%lf7y_n|*Hr7!bM8pA?`lSZ;kYjzv)gs4wf&gD6KQFCx_7 zebDEAA=%-I!{9-6(%rN#D$?dMenzVeSI0jZsWdwlxwTwiO-uSuq_cydU9&0ra!u=7 zS+=cY_RX+@gZ*VQX+>{@6PyHs*kbo(vkGL5Ux1m<$9w7a63k2Q$ZlN~%@lEyfyLM? zk1oyf3*I?X5TlkZs<-dZamOJIEV?x%rmI@lv@o0waXwOvDMYXCnVXAVnwd&p2xSQ1 zSRGS98`)SP0c-1qm&ZZ9yfwN-DOJcR7nXO8ls%26Wrsk2d9DJH?HO^zjPW9kqD0u2 z$%CH*wJbw*_y-u{S$A0-*cm9od{xWRzwD9cXXde|H);fG0Z_AU2_bcd;;X?>Hxd{| z3=vHEi5C11w)f$Vq^AtG%1j$~OW#u#{1 zlKG6F!yBbMm0PS-nPBNuQN3DXM-@79=CB>GvssytM=_ruun-OjBbXQw4j#x2HXgSA z3|8Q9d-+0xEC_Qt{(hiT5V@AubhXp43JkGju~JamMbINJwHrrF$)+^g;4d%hx;w$B z>Owor9On(|540M9{GT}v{{Yrf)lY4}&-!6M!B5UBpa)PZ{`CMta;=qb(D3ja z;8+e=B_DD12;XiZIo_R}nmJ}Ha40Joa?olx4p_)d%O?>&UkHR>W{4lVpokYXVnUey zrNVrUo_{Zq9PQ2^oZTbK7eN0AyNExb6aSu25GkQfQWFfDg2|G-p_NE*AuRDd|1SUA zD-kimoRlUQPm~9%XWV~$I|ozklhBCJ1;3YRr&!RQXaUCoEkKI6A!@Iz%tO$&lsmZ; zOxIM7x!A_XNbUeHVmTY(E&;!&q?F=t3)oGel6JD9Z&!Bf0jwiq>@`A|3&(!zn0~^= zFK>Ko=fU->K<7w3{zk*uvzo`C`+fpaFr0vWufc&_Vt&OKI{V5|n7+gB_E8S4@Zz4e zM}h5op`AgPAWTjF;W1gpgCLl#AN3~EK;rG#u4;W;pJs{|v9&4KD-%wyG9bmcn|Nw? z4)_!EsgF@|FmCwmuGL-wa@XjlJ9>9>;qKf&_hP^D3EWTyHLz+8qX;QTtO+DV9-v)r zM>b(LYjOfjXrPoDg%n44QAc|_{DMykU8KC+Ul+NL!T=Ig0id-_O~Lq!M8Mf%T}8u4 zsL6>W1NR>~uJj~=dZX^4-wmBYEv6oFvnq%FmFXN z0*HI$1mR036Unk$Pn|q1POdVe)x3v*Q_2DW9-UH?Q>R?tU;W$zl>x;EP~}h`E`(gF zSyAeH94a-tH+g>UZBG$&$aNenPY@D7dgFQ>NoF|hmk2&4a`aHkjeQ;g=*HmV+yb7E z|8-+82un>KFhf**uTU{uug8zLC6uh@LO#rpA=(uPk%;n#=3sfs2gbLc<{*tq(y!IT zk$>pN>w0Z!fy1O8n}AK_}com!wdvv=+qA0o)sB4N-f$XHTP}CJ+9Rls}$nl^+VO-A#lgN#3l`XhS18-;WS)*OD&n-@W2n?aVi7NF=`pG92y_B44+R& zU?N|%H3JRu^CEZ;^3kf^0xJLOl7Uj$a^j&&Popv3_q(AHHf9VH5c@o5(0?uy-=Ho8Sy|rgez4SKj07$)Nt6Tls+vG&b4;iiwQ@jQwB>NCMMj6Jx3>M86v|Go}~I%4&IU$PRCJd z-wA0B;vJZL(;(c5K&!(A|U zKqpH+%4GV`Q{h=DSm2cEGjOU>O~=s{V;(u|K20R`-W=@cR=LG~WbjzQ;JF1*|D`4G znRogZ8i9cRgxU|~#dv^g-z@|>=IqI(ge;MjfF|J!q-qs(EP8iz7a&@T*c{D6s`wv* zH03`8S#1V)L8eJ|4S}XYmm8F<17FXel5&_5vgN${|FY#KtnX8wGV+D?`0ZU=`I>0* zz#W@^+8>PI2~0QP95_B4Z0Rd}cU9Nu;qCYdLEd?%)r%(v z%Fg4!Ud1O9DF&k$<}n;xVtNwYLc1!+v5$|3 z0LZr}{6eJB`QO+ABSh{Rc$LlPP3udDM^qN3QV3C5Tzx@=1f(+PMiYcza`lRrW``!| zJ)v;kd;gs5*MRpSzM*-VnP%?e&nO03B~S>&2nR=mm5#gyFTj`sW=ZbVgauSqq=GQ$ zCeQtSlLUcKgE9eMYe|F!-=o}1!Ef_|V=vj!c<_wu_H?4~-XE~8A6tn(57*qIxNukR z?pZY6D|W9Ih4s?(YkudqA`c*JmKo~Bx1)jY2-?~WPqPAl7(Vl*Wd>g986&b2qJPX@yre3Jb!&rcM#7p99A$W}r@lnI%_ZA^?>ZbWa?AnLg~kC&je_N@F9- zu+!)n+#A-s!EZ!V2?MwKy8@}0VW@y|%UCN*PN8?sFbrC~2?5_~rwYUU2H@KST%3VR>yefcZ&pb>&J%io zej)XK$a^%2BM++c0ZZuDUe`o%Rq75IWh-+A8|YA#+k;F~|IWWm6NSQiKw3f! za&p2I#RfrixJ_!cbP6=+&sA89!ZFDZ9`*YsoeV|HydyG&SQ;8e@_xMO`?xB(gPkmp z1C9=}`%pM1#A=)x$iiUZ7XOXaKycv@YUQLT>xAJd#;3sQHoU#EwXy9_KEfeD+_zrh z`|>aZad0{hOAl29?;z9(fz$Dr`%C_m6bNA5WLzDDvI-K{0lTHiL^=flm@+Irzv;$C z2RNeA0Rpb8Xzh1G=!M?g@E7T)`^(b-e_{;4i+bzV>B$#Nh9T--i%DWrr#S*_Wng9Pmv+)c7ohrnVWIn>DNabW&}j!@HjlwS#;L=HmER+)fLb#%xcl($UqImgxORA4j{JT|$zwiK06(4= zZAP)Dy#?p*NjdPqqd*KM=q-RV^wG2e5U#iA3_GY+o8;Cy3#>Ma-ABzuC{zZ$^{qS6 zGjSpYpVF8ic!aChHJ4z6U$9c5tIzs7_=_ThOyzu?wDYBBup{8?T)K zoQ4EovTgw^Mc`S0x^|G*T^L6F92`=v{ynph8beS>nV~+=sMdq{M-CYEq>&O@!fh(g zn4Wn_1mTDzdZgOlk5nT(ZdS*U%x{f@*emI%8%&YINnQ!)6XF7|GQf1-WmIBpr3or$ z4n8_uIj>5L`-jcyyxkB*8;y0wt5o0sM3j(24IfLZxGuGb)Oge3*%3JAITYf)Jp7k@ zx64_;wu>C%x1B1>GlLTl&f2KIg0SCB;XTCCI@OMbKOq8Le<=p8K*75KYSz!;oM-#n z4uHkIjF)sv7d;OJ@-Q9(GzQ=8`Yp|^s1Q@a4= zfnA{ST!EPZtIEK6W=iBWuTIN};6Oxr#{vkN!btH}qO1&eYCjTJl!bjvMzVSLZ&jdT z&2n=B0G;b|Zysf*v=pQ`A5)tEo?(O0LuFJduhwQDd5wiRx=)2UULGZIgE$;M;XuNv zf`1x-qwvW`;9tUk&Ta<*g`CCDg$Wc;wtRo#Wdv)b6Jgi7p1|V4-SCe7+wa);{FE$_ z99ba5HA-)a4e8Z?036K^FruRW2~_&QCpu~7+JCu5{5MGjQHXfWrI8#JGeBAEq2nee zYzo$3-%d?h(K1>+>MIL5Bka*k_2V^MaY?)wg+oEeK zVE)E|TpywigXza1c@x4l-0W(_0E`i4hyNOidZ}bO!jDm)muaT18jyq%K|+d|DK-)F(9S%NSwn}8rr6Mw4<`kW_&q}Jix!!sQ^lfmeJK*E{E z{#P7?3v4PjtgwQNNCEQ+?DTWMv)Dn@RYF)ih`{2$Ats>}QbPl(JNk6L;sBjhnS1+y z$H@+PtZk1lUZP?{82AJJIHs{b<0L2p4khZRB>%c&)?IAYnZF`XKc|=(4ILJKFk(dZ05REQrQ&521QuOO<)(WwlCB2 z6?<4aAo=~sl+Q^I)^X=)!UIReIM7yK1Ud$Mq8V|4W-gry8}&xtOJKC8z!8I0q(64c zP=-+g!m8G%7e8@>S^b8$ihoP%aY};*zXzxZB3}_rt+s|hBi~6EA^xs1?*VhY37m7C zOJ7HvJiqkjMMeP?#y#wqe(5tnIP(D7MD9-YG~^?J@%U5#$A9d`ZTPQkK#pG&YT?i2 z90(xs$$gbl9K@`c4F4e9H&-hvBG&LGP^DaGQ^-G$;0qwY*bXTmY;0!%A@~?J0G1At zwpRsg_`>H!PLOlu6tb&;ypIDoj#G$!!JuMvKx!8N4qPC|-$#|5{RS^B$?%JS6`+Kt zTYlts$AA{?BX=DeDY4k~Wzz2Qr_@o|{H7xI)mS2IxWb`w78zwFF##DyAEGJQ4gd;M zjuwMg!;^-PBLa7s&@?~x6boTw!aJ|Ix181;3Ab*tYx+6?GJyryB2LU_!TZYqNUKf^ zT;k8fx2^$$5?b?Z@sFD5pt~T9i{O;xnprPZU0Y8Y@9Y4Z;#}kKFY6&k8(4R@CN#}m zd0=0rmKH5Fd^Yq&Aoyp5`e_ebjusqf~9hZ?F`Z&ZatC{zK?x&xF!))6zeM{ikAZw3ka&(zGi4`1Z@7a zEz&L0;h-FX7f1yA#G0vv>ptX{$Sc?a>Y^=mwcA+RH7_V!T<5q9prJcnE8F(iG+2It_KU?_SXz**()6DoZ_C%ZDup~Z~x zl>7Jzrvh-lN)mX(*j_UvvQ1${e!>;mmLt6EZ-}rOc7rwY9A#bmAT&gOQp$w9*6(L| zT&NX{nRAB>yU}A4isiXJ-#=|f*%1PRmJme}E)#IL$LAY)| zI|ysx{D@k40_jg*dIkg-E+`C_JeJyxp;nQ(1rz}@7;?OmQ@_-N<0PDP;0e5oAa3Ee z1f#kAd5rH7)}eo&<~nnHZOg&zR{GK?7`B9d8+L>MO71w=5~7#DW6Vb5+i|Bb3>|6uUpFsmDJ&%X?zT67G8m5`DgFb=|a)$(%@I#L?c z`&OBFy}O7z?d3e|e*#ehV&CH9IADcTi50V5>1X|xLNanjf2116}4uR;km^+Ov&;feEJ9p~q>3tv)+ZX%6SUn2x zreEjPj8fV>B6Pxwn^q2m7yutgZS8Mn!zCb|h z4vFw!wNt9&P$lQ_-2_Vwrj>SaQ(lh&zCFyOe(^HUxOP?!AVDPmNjNj282o*tQ#Q}y zx=%)DR+sqq&aX_@NmyzLvRl(5uWuWPt2_k2Vvm*ZUric91TNI(ijUVJyxg~wT4ix% z0HBO<0Mw-WbiUR?U$QDdkk^L_K^gHbdtKF8q|tGB<636rF=ZE`R*68M_dSCWnq%e^ z+-%h4NC>(keDzvYQLqdvPuT+sh97v?Li2CU#`FLf0!DJu-|yP8JG)i(nkGAFe_D6* zL4tE^-4HrLLJ{$~5O_9m(A)E-GFk;)f1H8^M64)R0$4qA^i^97{V{-*=I@>dq?w2e z)Ch#3m{lS0>KfrDG%O;8oc3v9$-!C_^X_Gwa|MHaiU8c6aTbv|#Htk$Ept6u`f}Z18>-)!P8#W zCjpaJ4IkiYtxGh(cHg$^sJ5`9|FDwek}Wi{wvtp zrW3ufoq-Z`Ss)tOc-t3b##AZ8=uQOiC6st@K%IT&c?~TW@Y~v0k^b2UJ&SBCUz6<% zh>F#sv@n4MkK^$s2<6o|J=g(?Y*uNxaDFiKR6?S7(jhX9R0I&9-vZn%^#HK=Nm&m1 zKpdk!Tm<%TGZUQ^@zxsq*^^SdA9ICWufu8)uyOHDcK{%xb(HG#g z0n30-)rR6%D346|X_F(=cpnFEv7dk@<-(L1}_80_ApbMpVQ3Iy#VeqR>K$6dihhRyge7WZGzxfk^5*0x*CxEMlR2x zzf=$wkZ?I|L1XXw(HH}oz8!%g+n~VHj?6PR_z5(EG2-EZ*l9op zZ=tZ3M!=2{$_>l9d;MWmW@0Za*OL_?zCcpg?Z*kjl^@zQkXPii58A#+VuIz#0>xNuDA0ZxntIenr`5pI7aLVQe`apk zlYRl9i7TM+oB7HOLL`nff_;EJ$?b)uF zf(&036oINfLN*#1r=aNsg)IlIGP&IZJYWM)5~gkm4z7Ab^8(4dboA@%Y$wju`#vpv zc1|xNVAZ~bQeRwqhHB|V&-#h8yd{J$&p`ztM+`Pr?c&re)OsdcnB@WU>BA$~@El(d z?h>%WUIQqYxuuOlZ{)X$q%EL~+h&{ywDcVR)a0NJk51QCoI^@NV z`*q~VN9LN<`zqWTX_=~zy14*^{KIC)huF1pyG4&>%Q;E@9H(T-j2t?}NyBy*_D+V> zC|O8WoZ`4A#fxhS$k>2V+Yz%9kf_b;&O?Esl@s1=<+vDVsUt}4hv$WOKma~yfH8}S z5lZOHTxpP%sswB->G#ygmOKZk=LTf{7C1j&Ss8;t9Wp3a-G~2&aP)cANiT#Qp8i5e zOC(3I6OwD+7`q=_OZ-bz=6ua(3NokLvor2wlsgRzB}@1}A zi}B<01D_{<xF$vEtK*#kQ08PR}@~9XsfaF=3C+ze0cG$wiBC<0WlU4x?)44V}t>NK~*^> zF8jmBsRLeh$ku!MMSgPZi8ZB3=%`8Rd|YEXDGGJ3mXozI!TWka?XoTllU`qjZmFM_ z&O@CwOct3qQ_nx7fE;&~Z3(qT=b;Gjy0pK@)X#Q^n4UGDFpNA*kbov~4LEBQ8_nkR zz85?C`m|s#hRNu)I%w37jy|1?T2x+}enWel-z4O$53?PDW&^R@G&&+GFLApikbL2s^ zX$=r%;b#iRbUn4_TfJs@jH?eT5A8Z#s2KFst9+=+p#$&^g3c&D7(mVn;OvPQ2D_~G zxBVr?lO=EbSdQ>peS?)PN#RsBGRgqjkhtYpJiaz^?$veK;AANmQ^8u`5s`e2gFe_E zhkLZJ!6huSJNKfEN4GD}^yfYZ4H~~$q_oJ(v!>rHm|A-My#Uz;01aE~zx-(%WH8+| zX;2ArxzGn00L$iGdzM>BB0t7oZJ0GceyRNtFM&T}R`^+e2D=~C6{LYmP`s+umzPxM zUWYD1+u9DRXOBb6AEkQnobjuU2`CvYl`ITHjjIu2)b>;k zGuo}yFTC1ahej>2@rC-)_i87G6~)yuygw4V+%hAh4t~`V3>DW_UZh~9;(sU@k@9<> zE8YU-Odgsdoq~u`4PR|%B)>SQnL3*4xeapxwFcs z!N6~?W9FY^U4UZx0Yi4+Bw&bG&vNuUa2aiQ_haJ+^un=!*4;wBv?UU|qFzYb$OL#~b|N12kvNJ-1C-PMoqo%N1L*2)<*T{>N? zr>&U|p693FB=EKvgO_^CzocHOu*M%5-?5V3*j?QSI|bSd@M#$oeuMxC z)HvfJSosM6$MFDeoSanu1q+;I*SbK)CXm`3D;;t^XzfM`*Q?bmQi3=+MoyXVjHeK4 zMW-=}YFI|HozvIcJIXpsGv@%1c!2CC5K48bF6g|2ITO*N8S`&m3 zCrb}?DLmFmnKQ~l$z^j*0cwWP->fqlY-GX>fAHR@5BT&fPBw<_)JJ3MaY=3I(Esg@ zvgE=sIec!N5p9k2(EIQrr4TjTktagF7tVIJifeVk^!(K|i4*tB^Nte4Nm;-!A6W6- zAA3eQU9<|)meyzIbHcql_n|0w@L|>gf;c3J;EY{JKD3Bf8L4ses{Af~_ z>=O(%13@rp*WnD!B}@;)5@Sy$1mCpnud7UNK3qF^GCTK7fd)ci%}<_R12juv141Jb z-bBXhSNFkZPmF)reLM6u-SL#YC$n>|80&KD@U@-+wPuHfOy6cwK5-~3arC@QVR^EW z^4V($2GBk}eVi8J<#j~X_?p7XZ2e_sEO^06CA+Pv*F$@%EeY46TDatM`>D8_{-}92d-G{Kwn!d?NITGGV}B~yJyW1YnaS6ZUt~c!lU&_FBpWe8V$>> zwCicEO!ZB&TUY?Uv+83>Nb?9A|LTsODh!R9@e&c=g79YTwc%{xvm8HhMEqHxjKz#; z=bQ(0NE0HN$s=#Pe*%dk@;WI|E256996$YsO5TSKvVh`-*<2Z7fbYTHtP`6&^lT?8 z$#b~|bjvrxc1v#xo=P#;Oxq39O8OBTDx^ajFyE5KL7_fZ#+ZTYUu@jzm5 zP~8k!Yn+Zl%9u&+CgyM zUuhRtX0xQPuh+2>Jnc_opi$AT=Oldd2BqNA(GR+B5vF6F{yN6DZ5-Orhv||e zDdDR^fGNQk4nY%+=!BeC5RTjx*BzvVBlPwRma-&D19BfUcAp-sb-TS{+n2Ad&@1JC zURTyddpkZ&Yl2p=XyN%Kue=7*2j^P$v?uVSKA6Aau=Mx^IAgid_q=EFqw?rU$(2ZU zQi}`hES-crP<+IxoH<9(y%)q&Ijk9q> zEX9=6#m0J`mD0b-mTRQH_X5mTunLh#!rz}T(zc>t3C6O7VPc879u<=AavAX0;{#{mAF zfs#!v7W^-5c33gDU}aDQ%hMS8lOqjZp=dg#2{BReLoKqM*Adnl9qMp2yG4aa`%yf% z;np#n=LH?5hn2Nw7baD{3R4GRB;h}_?pusxC z1NAqCbmn|JWKY$5eJ6=eN^}zQzx@GZDE)5GUs(%dFtHtpN3`{v@nUwcZ<21Q$zV;& ztO#q?sTL?qSbBTivBf>@^MGIrz{Ssn%s3|h3qD)7+;wW6H)iVsoX(rw$2)wf@qH>o zv=JmhvW#Ui;cPVhCfEk~@3^AShKNI}{6Q`R^C&6#CNp2P@=l{z>*EpG-Fry&mY^}- z_fC}NlJ5~%zF=;erImBV%TJz0a%^jMfPxdK6m~&wi`1u&UY_CWBU}u8sugDs<+y;? zt2r5);)_(LJ-!XB27J{1sCpf0+EVDNWjNd#%>LqN{EFHE^o9yc0!-`b_I$azL#pLq zG&ELyjaE5xp6hnT)sUyXnzZyWp{+0CWe@h;u@gnNAB9fvS==SMFt1PIoh zCc>IgIZ81a5`#C^?jk4kpD>1*DWX3iRmz2_7v+wF{G)6v$vHlYZa2a$P(>DWmvjvkfHdzJ}Zt$jVEO4&zm?A}N9&GRP2kt(n6=+&>CrA6i3 zA0Z5Vjn}#Ayk&0MOFr@-65AuA%t)~enFyG+H-I?&XE>ir9taNzXRDX+{+2aCB5-6G zcqlx>_F)nPDSKJ9NP7JJlkz_dS(YH^Xiv=p)+*=rbG{ddu=xfMN~P_uB6m%d0>8f$ z(4A)0z;trSrS21HPwT{QB*10bu-uBTj%S@~tLC;$8#t$RGu(Yz-(#+IfGN`)U1Rp=1usDdbM)Ke_I;bF!_OT@E=`hq z(*@zAm=Sg9e@VK4&;ksSTfj3mJSq-Fk2cwT8@X}}J3IH6`z~E*Io;YXD|fT6O%|mK zc6P}CF+WevyU3#nE9~h=Wx`J#y1)b+P)Jqwv80G?K#~#P)c6`TWYob#wKmU4Do#v% zD4GF>D?9&mH9aPzu(#U)xIJ~|^>cv0Oh=0XNmG002h=u8`Xouv;cUzG7U3Ezpa@ux zKZ^?(gJPHwVyiYZx1!t$1_FJn`2Mc!n9f_zKJ^6fOzWPR_yb6e*K%Fh0qLr9Q zcUGPQ`b+xZ11zgQA*KyLqUAUTR1(rgT_O9$LE&We_2-!7K92>erJy6?zLij)RS*zJ zWYg)tZwjkK`PI_GsC;7r&A0(|6Tnn#L2RfnpiFuu-@_TmTv`EC;`x&%;@|r;**(6^ zhHCvb250d^YI%l;geHLFRvQ&j*Qy$6$2cXzuKSDcbIC3NWJjzH zKc)r+GXa(!zI>uaJY}2(4Kj<>MYOde@9((wR@iD58TX3@h15_&+z6qtMe5a%?CzRx zf=3*o{t@k}o_$oyq4+!0LTQ%Il8HMq(6tZ}Po-8*V?WLxHhgr+NsiT^za~ZeTIgh- z{*P|acm1JfR%_ncLxn30!SGhXOEf)eAz&YAMmTKRh_Ii`-CJPn)!ZS!iZ8N2G5p56 zbIA;_>!*#>LN%{W>WZ_YO8l6#pM~(!0Ce4FmweS8_4^*pROvbQcJQ=XmXF4(fj~hjQS+3minM2 zP!py|)zcOK@>ovU*QJy%`}a3wX17+lnrKpg)2%&ujH}LWlfpkX?}M!jL>ECgtuNPw zh~zMVK&3hy-Y>fmpr<_!14!e5jsG0@C|DRAme<4H9m)EwZt-zL!+6#MdEIBrJfit* znpYtDkfXfs^|@Cb6!d(OUDBN|TeJs{o%rzZ(|0ZH0)Y7u!aDk)ZkQ{;ZiI4)O?#e6 zSNom}wJbm`bXNIh#WXZ(Pmc1e3QIll)D+?CWYK}q2f!nj(lQ_2x0I#lX1bM-EYIET zp~s8qoG)2b|$HZ?0y$q)I4H?2gt17B;yg9_N4aXEovs!Kzdn zS5R1+Nk6Uec8Hcs5-4>~GCertJq>gg%;(!$itW|;q{ci}xT>Yd(B>T!2VX4@nR+Jk z%`;^k`Xuv?d)bIJO4~on3i{taWPq}k;u-*ryR&DUBp<#sW~NI)|p;>)E--+{L<4~p+nM&>+T`p_7{Xb8Oo6xCJ`KO~5~6XEG< zVY*=(rbyD$1vSK>XjwLUHGiT!vgeiYyyj*h2k5zI7M>_eLp|QD-0c3Z;xE88?La$e z6(0gK@w;@*7}nIArz+1-nM|!d-C*CxhUeiS$lIELOE>YD{!8A^m)|ojsb7h9(wJw@ zav8f&OHQpQAQ#a<6-y^SiYqx=k`m8j{Dziabt}OJ2rj1%FeIEWtkD6&YBXzW3YlW? z(E@}Gw0=%`{O`V5)sC_!_w8!$@3NqW;Jxt`SY`%0rCvc0xg>Y?riUsOz1D0ukm_ zD4w=Vu5=V%>;?okoXwMr8+kC?!#!`Fb~LcP*?^#z#7(YV;jKnfsgJDKwXg^l2>XQ8 z)o6LwWz*Tw3luI%({5K408vy@MnO>zv;h56W?|aSTj$H&#uQk_&jR?7rw~oUw1$Ue zfN6*ssIa|(Hf1Ut$8^QrIa_jpm4%G@;Af~G;2`6<{IIok5>Nxub8_N}0u-g2@7!;5 z=#ysVHHSStFL(2s&J4f>(i~+qI`{wWs%r;|Rjc@?KjZ@54M`<=9^nWKJMvlmTp%yb zDeyuU>YdzO;{}-kxMAl$kI*VvC{lh@ROAZamavd z;wG3WRj5{y+>3+SpEATI&4PY!Z(K)orIjt>9l!F2t5U5%UUB={3)=cOv*D^#6y;&a zUx39YGR2MB8hl_rFDuZ&rJXoUv!N4lW7$}i4*F#^<;0bn4F%%QLU-Uequ7Br@!DE_!ft%Yv_bPov=5-(+i z1uZ}mDDB6v5bEI?7juJx-1h$04suW8V6>1i;K$T&=8wF95+T4%QO5g`AIakR5Y7y>%V{%pC=v5iweSg zqY6cGj5kAu%Qkv*xg-8{&8?q~NuMM;$HwDilNITgs+1 zL1S5gK@qTwXRISSKB_mPN-;1fX$5+KZ}1F;pI)1Occ)v7CB;SV2}P%*CHB3D_$DOw zGfOl@X7!>njc2a5Tiiex5lKgs?61!UiIoHOf~u@>I<%pjb9aP{{O}QasHHan8j1)- z32rk>v!ig*r)a4h@W-a)MzFeOx|>3MDq!yIsq+#bG?bm@I40RW9Y2enYuN3T}4%x<5l?9g1m%60BrNVA)k zMWarUU%~5lHY-BYOZlkvYFoy0BA^QYgZB{pPx5rwKQ9NgOF1w%lj!AsECU;9`S;6N z#v=~~iO_Lr+k$hoi!Ol?hZ-iJ#HmofyapQWpNgsSABz^I$zb7%jpxv2);{0OS&l*n z-gvn;)VBW)-D6xP3i^&cL`E-$$UUB@L6w-tktvpl0KWDn+BLaM2iw&br(bvcqN0$8 z(Gx&57Y9Z6XAqaDX{v~;fVpOY<`N*b?HsO_)kh8np+PMKrV?N*FBjt20`pc^mAm6EPXEcV87(zs*q+ik#% zAzFh@)N%vWlf!{v2@kjTmuTFDQZ>wny!Wg%oD!`AIBQcqU18O38RVzi6$kxyDk%c? zj!P{Zt9ZJT+~^(aw*N=kd&hI#_wU0I`KZVUWy{Q}L{>IgWtNdWqinJ%k;up%AtNCp zGc%(odlt#a-l1fak^A|guJi17U*G$_zmNN$>%6W<*V*UudB0z;=Qy6naXj~qBMKLF zKwtp1nA9En%2za93TmK8u9ImnzNtY)RrTzkvfr5T~@XM5~ zGgx;AP1B(o_)h(ujSmT?0LuVzzcJ+k`*_gaA@$62?I1^A*@MyW?H?}ycvU`HOn?<{ zXP^4F5cBtMJK%$Zp%!*-Ba%Z**2<(fu&Bj{lM%#gJpkGFISJ3zkKlO11bB~Jo6#&O zBRmSA`I??~r2KO(8_q#$ouQ!ni;y2kpJID_1eyf3?5n3wYZs+|dp@}Nbj`9SUA`g9 z2~mVVAIg}b&p0oVW>g9k5xU0rifH6y(Iw6+=mE}~Oh^(!8v&Dv>!Hz%vV;isxA;t= zu)W8z4IJt1j0ZSv{{?aEy&(C&UJ%p0x+1gIbf_CMM6PinbkRpvTz|ci7a?`XN zEX^}*iE1v{g=*Op{JeU=1|miJD|;Gf!n#W6qQ8MY3u=uTjf*$_q&0C)RI_*H+2#MB(;!J(>G3zt6B8OaR5=fmmU+8-}=Zs zH&jvFmMk`6(E$wbT!UJD1OT6Re5xdZjyM~9yV8dIdC{Z=hvAb~Aar*LHUtYqw&}p7R_W4D% zjLRoKReO{d@TMqa^Q!*##?ie#BQSNU?fIX00S!6| zCaxF%tt*Eh0-XXg5N;fPu9&@l@_1D<@`u{~Q{ll!iUXwaMj$-3vWjC^7M?tmK=r4> z`jhIw>WCf+6Owk@aUH}TM|&p6h$BHEFv3#$G;u$BoiWM)to8R-fHw5!DL6L&s08my zVFT{CSLXm}v04liraZLeP)sPVFAXo)Nw@7iLwZ3BVmD#5*MJ@+B&rKh!VFetIsjdd zHAg&+6kzK_)Oy1@P%FV@gixc^A)l0_R`;Jm&9&A&0ngsoxQp~@cW9m0k%HgB2H38N zCq?MPEU+JATp^pN6C$5q}HJhAGe0cn{F@_0-7$Iyv4I4~I;P2Xq zWSwb)?4jGhly>;u&cF5er!@e)0|jotfru0-dpIK9Z@<^=G<-`-1qI0OoEvagFwaqg8Q_FxUdI~r z?rIedW{h{_QRNC+%gOGddt^RqD0>o@bH@KmG5|gl4aD5_f6EQ`Q<6^>O97yCD2^eL z%eq?@zyG=$ED=C=wRB77;TRn4kVIxUz{QC@FwL-XZFW71rg4e3aP}DcZoNCNDJrwa z-MjPqXQKK#31w^!P;dMr9mT@Kfjvd6@q&&Sh+d`;!)fVB4qeD=Ox#}eAYP6^H3yog z1M%TDuPNAjm_e4qSV9K{lb=NaPNVP5~3a5$kagkElJfg zV1)RK_oPT1tkO&CQL4UjvoH5_YwVe_#5;f%Oo4jprBx=w)E<#FeVg6%(4RanPH%}q z*LSJELdQXDO485s?NdN#PypH>O|bvzy|W>}TmZR5D(V+_09d=U1D%!sn0%JXwNkub zyLzDS$bqJj^F}=QO8f+r{~1Voz>p*TB18qhh$w^mZGY+$5FH@uv7`6jN-gmem`0|J zm9@3}CU@`aN}&WifkFQJLW4+FNawN2xef1I8&w=atV13X=B*X zctFmqVZY{!d)p-a=UAg^5eWfd*7Dce2GtD1?)LXbmXDyhQ-ogYKiQ&zv>)`_NGi-@ z`}yLa?{T=*pWPR@p6isoSQITZ&0=W2N`%a%ca#?VscgakFP4WcWM}?U;KjZ2FuaUC z{vXTb-V`e+ z_nL$@FBY1zenIKWgOJk=!1ITbvgmSOZyBXks0lgy65Li0`u40aT(G9y zj`c<$@ETc_P6(DDiv{5LA9ibDPXqr*ek4LZDdzo(_f{Wgx=6^b_g<7%Ynhcb1T zK5*{B@&G@)pUxKAgV27+eh}{AJf;btF$xj>u4W?GTx0vh>LtFnuK(P_3iQ#eq5uBq zKYi6h(jRc#Q5#?6`O`ti5piHZu5*dj`-hRv`ic82T_u+4KViJc6#~i%Hh5aVG%Ny3 zDkA;8LpY|V8od8~(Z@xgj?)n^(SL~~f2YcHHXn#t3LwClgke_cv?}<0Fn)Xo%L+_d zX`goCAkwiLxZg@rJ`YIyEF3inVj2L7JaapVEBYwaDe=3iASaij&jxmm!9nl#)owDg8yt4f}gOb z^X&LZu2a;ejBq3Opwi1+Qf?SrtLY^J9&|DtDiNuJe6imgh1egXZOx0GMJEA#a z!yj=dKe-5Ec=T5Tn7xwA8;b1{#TW*sSUy>_dSuT+1mqCka}?)8rz0;%vq#UJ1?_Vf zl;xBa9iYX?rcVF~X5g=Kz13wa7+d)g7T?+R?xs+@$PByG5$x4z(=`KVi7Bl7mRK9h z?n<;XPjvEZKT`(udpnwk_HeLIKzbP8mi+5#{*Ia;14KrsZeL&HT_Jsdr^IAEP_hE` zc8g=D$c>+2j0MNG0$B?jXO!H&6gE!6_=M*WicC|f4`_?1zjmccy?aRwLn+{lzwQY= zG_w~M67wnD#p&HKrX7jeAlTxD#%w*Ij)yPswO{sT+B~q^8uivLgLLQvESS69%qWt~g+)ux%tgxpO zNSzQPrTJV<2I**8osrJ<2b`YqZQ%F)%Sx#3kPlFangCNoOyvlmEKrG@-Rmr|&hLq) z(9~iz$Wu~gkME&1$A)MT3ESBN4>PAee25IjNXSYcoiGDmLb8cpPbx$v z0hOnIB)epp8$?uuGzrZ(he1t0-pF^lYN{6Hp;^)KPzORmm~GlfjTAeu`|)tu|#>aXS&;ddqp|3c^%}G!QpXXe=LPvHP#(!qp(5ioMz18^cY#fSC z0Xo>5<`zKL2!1f{Om8u6NAS{Y-8nPqSAV(kXwaFOFqj-{(qsWUpU1L?sRE|+D8hkv z2U|+j`aIJ)Z z6NxC#$&R=SOEyD*n}S*3#v*ylBJ?FH*Gnvj9XS0W!dxx<#C7*t)ldKD?*Yv?^0&%} z0cbE9fX&B$I%VUA91T_N>hC~t&hh;5b*u*{iGG(Yl_jt@$?c#@ngn2r@bUecxy=Sr zkv-U=oCCTuy~kpCfWiYkWd`OZ&!A)a zyklo$QG&guajsQ4jh1^}dqL+5hWX~xGaL8cAGwqt>p#qskh#1~q~K{;06SyY;IE%q zfa3FC_G9=&T>=I0l}K(^UEo0GI8c-P?z-uh@UvhW1|6+{9lZoOYBf=k2A2C<9P4v( z+SmZd#elMF(1B!g^adCNW_;`867C+|>suOZB?=DuoM0S{QAvA3m#SadbyjCyKFz2M zTM@cZL_GBAeOGhoI6|;3b=25<2c4m0mXLyqv;POxd_t362>K-yaf8we5o)|2%q7PJ zZhL}LmrBMN9OFn-%`W^EdjSjD2H$dI%c0IeCU=!BbuZnCpY;LqG}RMah+!C;qgj^F z|61rDT@lB<8(61DR(tc~3oc6GVbHs#$&Z~51ob`y`X9Yg${1-;EPlTTKj{cm>;P|@ z%iwdDE{L9fWZ#}AS5 zSRoPswH0jtQU*!ltyiBYU9W}&y|C{kRk%Ra|H$jkUK;$xaZd!yRebD@FvDG^{G}e5 zk&}5TEA3|)$d`yr)9`zHzw;%^G`_bpFrUguu!(>^4Ug{=nZlcs-W)I)yL93V!K242 zfOiBCEfNKYq2eW9@UI?;gg#~I1v8^EfYexJ%965iL7!`AuqFLTw>wJ1j=HZXW1v}_CV3@aW+!f~}#XL}n z2$M@Y_M`XKH5qr-w+gFbfX{=Gk;<1zeZ^VVO_~(|>5uRT1F~#?l*6d;GTWyrmwXHT zEA6%8Iv+H?zp(HyR)jiruT*f;fu%?MNWO93a$2(Z`A5^J=3vRC>D zd?YoIpzpyjU1Nj6OYp_D59%?Q&nhlGh9Z3a=WIz7x848|ZQCXvINZg|>MR>v|f>J4m$<5n>n}lrRU@O|O8%(3MYzhOS zwo499?Y-n$y!?UFY=!I^zyrSX(W4No6n@?YqW^Q?@weu8G>5L|YZqr2#Ddoz)txe8 zh3$d@mtNq2`7<>mm!J?i&pOsG>N%l2%#U@etm-eK&~D-wkCM#5S&i- zguRPnJ`3DTgkNxVDx184zuiBPPt*_fYMwC%6Nmsz3M4}p18#+g8(43JywaBUF^{g( z{a$D(z)lbz-QQDct%JZbl?@2Q@&3gtd?h3byiEN-Dw~2i{Wm`TR|2F|-V5M;$oEG1 zmMEEWci0D@_LH~w2C9swzF`H>98LV#gr==wD5x88JxC?u>b=!JCP{Tsbxxts)f@S5dZOkf1BEwriK{Me7|$;W-7P?^)SKmV4eHOj*6r_l4!!% zsb>1!ItZJrCy9^TluJqbZ6+ed@#sMdSR(;mBkNBC(=+%1fn+Ba-2Z~2e;*%11@RHr z4|Y0`ym_rRuFfX}93khdQ>;746e`yHRF3Wdji{$Uz&-yX-L5k}W0LodPUK=EWU(XW zVkXrMoW#S-@D0tIz9zv6CnVf907E8tvM17sve#o`oRI(n+K9URRPmSL9W}7o+BCKOmTr zP!rZ?}@ga z#CCf(!{S61{GmTh$~0TIE;C4PA2#?GyTjOK3T%%EsnvK*wSeO3JAaS^e$LG;PzFC_ zN!;LKr=9ccm3n7`Sj$$!mA#$YP$H_Oh+|FA5~VgABM^o<7X!hNVN?sQgw|boV?IfF z609B?Y~XuP5Kuj3kb(+#YNy+)x*~Hgp|E1Z&e@zX`G~H;&`m>$3DbpT<-<7G_!exX z0C2u>H8dgbtN*BPpw+eJgm5*`7|D#pgv8!MbeA)?lD-EkV>{nRB?)rsxaw(_VmkrE1wI;`jmKOBR`hccRuLrGKhFDHbMNu zkQysI0FwkT>a`jwR|Whn-cT+8C|kW>nItZUzP(0N;|Pg24!JZTTp0NaQ)`(#-2nGt z3(aLFf{pI1@Sg{rS|!p-fMv2RRQ1hFSRoek-e3GGX#ryt@(aRblFdu6y{;CmbIHk~ z=K-0*(gfw(NRs`($T8wv##jxOUI7q>73q0zS@&i~TSy|g z`j6hm0r0c_8LP`*W7Rk?Fy@J@DBys()1?|$PE*Tg&9+(tlp70uHH+!vEN3tKC1?sR zcuBl~pqzFlqMr+#^-2*WCow%w{Pf8=X9YFt;X1HGu8M+8`OhyT zL6dMwTl!y6K{F`lXyjdA0UADigkQS!Q$3)6&V&jtg+a5L;M|-fTn4;R=8S4Z6vQu6 zh(!|x5M!tMMIe|KlB0FJG(8elEHDL9ZJ8BptJG{LgOzShCYV;TLhh;tPP$F%WV&yR|Ni zsu~Bf$P-|g<=@E8xUzQr?&wbY&ph$ySl1EDIH-a~mGp<9`0?Ewm8j?QV@KW-NXHKX z`DqpN5{bGCK7i_-aJ!rNbI8t_~B3Q_hm+XyY{a$bG3}kpAw>2i<7a=j=D3DiNpESrGrW%j*;H z=`H+JUs4XL=hG&k?&-$IA6LK0h+Mn4??eLMlL6ufKJj52SvGugl2A&&2s;(i;Bsa= zM4Eas-MeHUgKLJ&c+^MNVDv19`$*au8>Ur<2ihZk^Dx_H zWPhmZ*v==B0V`?~@=O5e97(_oyM)15jcP-+=5}i7B8GWj7sw%`1lQ(tu69*`Xj(=p z`+79Qxn|H~eEH}J9x{wBnKj}Qr!ma5BxoNDKAb-J02Mo7N%^ypb|1jO$M=*8_!xl= zh!S8fXWoA|sBn0Ov`xPmFNNabCKk$x0_P^51CQkz?-1@OdAZBHEcfoRgahjPbarI0 zF722b@9{)Y)1axn*x0&|;OKjoG<6bc0yNU2I%7L~gqoc@j;#HeGhnpHsjKaACOh=E?e81pF;>azZ2Y&wS5}S>DSYDW7Q1CHI zB(vCjl=cFc@RpaGnANpJLXA34cm!X!KZf|DLA!8g@YkAj3LeOe@zWi^kH{D$Nd5X@?Cp+uxw3ELExH7UMTv;hy{ zeo4sWB{V>h(@zEi6M1GsS*WdpAJ9GLjfrG@FFx2()Rc~wuv1k{j86lUj+Q$<$=362|xXO(pV}DFRH|tQt?xn@W0Of@ik+`29t#-ICf1dQSkV!Jx|xb^oJANBE=O* zTmj*qge?tclkUA3+4H<7q_!BK4U9$&Zg@F&JSol_8At^Vu!8jdarujmDz+hzv4h-+ z9+MID_Cs4zoFNTcM)YP4frCY~=DErxbcT%1VWwkI#~QN<6)VY_r~P$s>k%sTPxXY2 z#M^{V$I8%355K5&-YgeI!+>I_ELuUg#&MHQ>yNBVYcF4$=si*~Zf7|S2UR{*)?k0A zC7wuhLdYrWBKTA=bUFiwb_w$oS&V_?tY7}~tu+^He24=ma`8=P-F$ua`DPL=M1rn>*tV0+_k0_CZpJk;g>7Gb z^M@g$NY-bi`|7Fb-aDHsP%a-LpgG~KD01f7mv3BfmLrf~Q&~%u^%_5HEU@Kk;K*k{ zlJ@_(Z@0fJUfcgMNAry7%DQ>`<7TjWLS!lX%MaBbnPO93uz~X)qyB*p_0Kw`vc?C| zlwyDNh8-~`Rwa$r7o>C5SriYoySvin-HP14n-@<@_dZuiyBBDyiX~Oe`{78v`gBLq zEvBa%Upz$lI(4#CGHHd2l`|#fq2V$ERiK1E*L>j9G=K6nPiR_ear}?Je|6jZ=e3e! zl7Ud+qj4l!LNaL!)Q(O@UEnN^ooW`aml=hW9)@-BP{2Fhk8x$2+PPZi6l0gl@6|U$ zqB|!e9$C5&yM}x}R$i^Tq=??_!=pS7PP$kjH}V4q`R1$)1BgP}YalnAR!I?0R~m8B zM@KE=h_3>s0pnwb&-`!#pq*G43@U(E?e3%M?{{N!*hY5KyNHRfh%tVjiK9rQuZthY zjUr6lpi`3;R~70>XS@Q52q%4K?$niZmQ}bEMB>-Q=|$3K?$n&$_>?5Fkyu@|u~c2< zu`HZyWj>wc@;TnEqvA$qO=r|)CpKS|8P4$QIE3LdXMZ{cXj*<9KKQdvkiq^0kEz^) zj<)O=w$w$~nBGF;`SrCu|F#SB;7y4ycC>9kG*wX(g(h&9s$BYnSuk5*T|qj7Atp+u zE+C@lt0_69Y6QOGgZr1FoK8rd?;<5ObaQe)=50RC3FE5Jlh0wk;jHZU_MH{x1okkI z53CHXABJ&i1*4ALYusTV)h=sXw|u5c!*Lde5+92&QDip7cPN0E@$hiVfKs|j925P~ zPpnUTU8zo=P&Jm8XB>XiM+5#f1bcNXY($b$|uJ6zW9*}N=)0-^l zyl%bN6l9&DN=k|Ce-~DU;r#lwQCl_aJjhjMR{Cjt=z(=vyR*i$MhUi5N(bhw$&U}} z%C|vBUo!}gLWf2vVTpF@BXV*Gt?u6d!xAU^;gw5KreYpK6OGQp+(mp=m{miZn^&R^ zFfU(#RbPtS*+AYhX)iUJOmSsNY``Kh31p7>$|3(lU%pnVa5H=bD8%o2vlYr7umPI% z$UrdS2~pi7cB#P*FlT5YO~wlbZst_LEQfKi)AiR zG}Q-9YxJO*Iq?%%Mp-eXVTj=s!iKC@q~khvexU4XdgA!G zXDm#L&>qdCSm~x8-+B=-j7O}xYqxVgB~Ac9p-lCVkgQY5bkf#z9=$3U6M`Y`~;!;li+!IzRB62?{de5fqcZ=Hmh+PU*^gu4#AnIYj7GlFKSr8^&*C2Ds6Z* zu7r%44Ex%%$HdInu+8WF4`9CbJSn5zgsmCYYdlt2C(#-0x{N7#DZ2s3ZwA@kmBI2< z-uDH$`TpX)nC*GZT8CI&2YDM2jo6D&#Z^ot=^07~5;G@xJ&N~|N_m2DB3FGA$6Y>( z?E=8{4B_&lJKImUc9!oX>*!m)#%E0&g|&LYB(Rz>Q#kyb{cEVM$NH|{%G3Jr!pS2r z0#HZrZrZ4|X=aYKDjtT_u%*3B_nep+6?y`+KEp{`Avz1MBi^%unE7YFQ+fmJnM}EH z+k$Iz8T zTo?HE%XMlcH=lzxFz);MYFsIn3PqC45z{_6^0GKWk!(h-Iq(D>D)D!SeI`3wR?L7B@qQ#*#ua0Pz8&i-+o|Cik<`R|lWEtD@nMn+zLNHr% zmDm*3kTZ{C}Lh*Ou0HIEb?-z(gki0ELS={igonWm2P% zFcDh)=-Wctn4pLwK3<=yKcnPA9O* z@nlF=I(z;H94dP%n{uPdtPD5w0-nzin7updb4V(MBY-5p2T+_`$|qGlFr8~JGM z3K^|XPgejbh9SZBr(WZ*AA}vgL^pgmxf*)+n`tRkGTyxm|E~Pr)$? ztjjuUDJ_3r{MvJKY5C~8@s)cIAH|(vq7%Z-xw}5U;Q5;phQx;@)|Sj?=a|PNbP>&- z#uVUw1Qd8qL(Ay?5JPcn+Bdvd)s`j%EhfO z#phcG7!%JL-AjO3uVOa%RQ1#@L}OY_yFk-CrN|4U`f0ShUidQ=2fmd-1(oNf`{K z<<}@$10v?n<)r(=C#RSS{pU~lWk!J|vx4WW1fYteUmG=&80i@L;PSLyL4S=5P8_lB z(if6am$79_SnA59(IQ`1jXyYyE_THxdZZDKTXFsH68C}-Y=F{I2_pqacR74QepXJa$mwlw;~;r5WXQ4 zrh}&8_0{I@0Q)so_kb0a7rYTk>OJAuuGu_|uA zumn@(rUnz}-)tb7wQ`_#Sy7r_z$C29D;>y|hH$a|XsF{yp&s@t zvwyxW`tmpZ#c#Z-I;pBw-1`3M(UYn@Pr1toN*OVuUkt1AIA?I38-q_ykYBe*4dpeu zy%!N@d2qOa6y?%(u5q%|1uUVf&@$Gtp~LVOr(DEF!~|ggafhLB9EC~#^fb6O3?06+ z`966_RhDLIbA3_AdEo_ciig|JHB#dWqxY;e#C;POG;OV#kalld|ym{~USjgcBm{`JE!gwzo>zBQ+Og?t7H2XFB`HR2B zv^^0S^n$|ag+Z-%#cib9zQcl_^J?HQsu}TLTcY%6J$zon=HFl7DI@v6GmUbkJpS|!=qD{P6Ofx4xy6GPXQFT;K<1*Ck1)k;C zsPw=(br@J+!%>-cqI6tpu2eO~q$qB$)(oBVhU>_E0m9`!0Z_b{e(#mH{7;{32ygT@ z&~QwrElQzdc-X1X#A}Fpv8)qVgzj{xxrOM;{(_VP(G_jF6v`UXB1(vvneC4Aa8&>D zX0XQJtK$A#K%dRM*ZdKM{Qs6;#7DvZ+yT@9tu)Oezt;KrExJ;M9H4VQ>NCLbHMVy9 zEsaAH!{UYV*|ea_k1q^*xdiTvT&9BF?v3$&i};^UVv$U#j?^6}X0He|w1?iZqZfj_ zqa9z)pZ6|a&tM1iP9C{NkCH4dusS64HNO*4{Zi3Fp)l z<$@ynxvUS+wVbk8^T5FdHF=?SpUAkvAOQbML1^&}wa%I+V6pMOXw)!FDEuXCKcHsO zV54q!5a>_0%-ut z9zte{8?h@4%7zb7S%nGSg&SE8?RgJZ`RKj)DHdMzHtElT{Uyq#m7poFy>Lh;cgSgg z(q^ljoTKnlWMD)>jKNTzSh6*vmV=bmyMLt|9~P<6pYrNMPnJV%H1ijOBb;CNO>n@s zpRmO`m_mY6^RwNy5>RaOY(j`Pc_lpx@n+~F;Y+C!LC<)8NS-w*>QfTk_9)&G#IEN3 zocVzkOT!t3%q4LB;ruCgsqJQ;f>f<-=c4}4g#-MZy46Rnx$~$Nz20`Ls^uyDiMSrE-?IZ!(RAT0Hlrb-kLj}h!)3WN$iA`02J5aQXS{1W ziggvia+=X0_YHI!F=)u>!HN;LD8H{Q}LrP@?F7I^5s?n4N};*wcQ-j+LM@AVLYdug#ebClu8TpUh&- z2xoyN>jaP5-8>$DA8$F11HX=TBgkO;RIxC$DK%6=P8R!rno>x7HqCVKU5$93@IRk` zqERIZoO*_~o0s6BZYo+FoVCJ%2&uRMEtlDa?`5?+?F5v(Z2x+g04&7_kNJ`ER22hP zxB_Sszmq5>6 Hp?%^=1^Vx09MM`gUi1TtteFS-B%X-*d32G59+djHeD z>U^mD>(lhmr*RkXdp(6u+hH!j`f$Vtu2H3}=LA*9`da=+vI1vxv(<3uoeUt@UtWW1 zr^~u}t#AK+`MkSShcGD0Ey9UUi|Z8OYNVtiJ9BIar1Wn6M;|SfMj72 zfMk7s*x`FQ@0jS1IJ$2QynCtAtPN@D$tT%lP5AiYS{zL2#{^)M~ z?^ia1GWpE-pk5x8YL8xiZb<8&V<|`Ex{y~k1RY{b;9CWbyh-R-exG$f{DRrerKNvc zR$JIF-+wpgOFvFHwnSh6FtKKh?0M~Dq_r9H5~d=3_9%{+k)^LObn0Vx*B zyO2Av*N^R;@IM#B9bG{8wkoM0NQVz{EKFvPiQl0A3Y>u|y4@d*@fl_P#z!{5v(}+Nw1G~ehI6(} z!D+J+fW=B6cU0tOdM}!HCPvS0-F%^(tWtZ-*?rByD>BX<$Nj#`MsJ|vgp7lxnJCan=!ZWO-{l3(dM$9}s#y)UD z2-gQ#e8Wkg4UI2_3t{Nedri|UCcdDMC=NQu1ds}HoO%id;rgQ9+YNqMCz6TC-+MLQ zXn(wjANaZ2y(%96=Q-~lY5u8&W}AVMIU6vyF%>sjRgeqSul8WRkWt=jaA$F$Qz;<$ zQ}OAADY5h$?QwJF%xf9U>mJN*a*yEj;miw zdYZrRzDhYwh@-i4YC7JQwyX>LNI99t?6QWg{j>BZxKi|VK4AafMdR3ZF8*-sW^euw z05OUCVf}w`)RXSjfqU{0SJ_a6ZEDL{;VfR$Qe{Gv-$dY-c5-+gddUnAugPV zq?pt^O<9tsO^FxWkK@Bg-w(CT6%Vy(%WF$Yi#z!d1z-Y>YTOf7ZF?-p$Z&d-#a$v( zQ!=W~>W900WHBjKbwLuFls(Z2+5Abys(v3sd>`spa%qttzE{d$7Mk?U`y9+#2()`i zIrTo)IlN6TlAd&11Fx|AyidryROGh}N+!jWxxSQ7Qf^hF0|L_54Oge^CiaSn1YZ_htW zr=i}(zdagAplrzGiH(Ez~Fvs(f7&Lw}c-LZ% zEXFpgzp;ipgz0rzd_&=7%W)_1<8qJ0gZ-Sk)I@sme~O275yr6-W*lTlb8o_y#}>q9 zz_!OW!amo-|Fdn(+K_cJl8nXst$Z@hO!HE7tbsy672`0Jv^m^bR+!^N0-Li>X(nwn z1pbVQu9@<{{Q{VBFQnqzs4t}b8%)s)MIn{Yt<0h;Ai~D}q&3mYO9wL7VP=&qu*rJk z@h{kVH`JqeCP}+5rYrxk9Q9{!d>I8dnD*L$3AN45BZ9B{lyNk;>TVWn8nwk0+O+kb zv)L?pZo~3L{8Vy$?vvT~97M;%s8&5-t?d&hL(be9x!fOIN#@Ky=imY7zL9&i;kxTZ zRg=V&)J6P+_qFrN-LF%Lk>M(2-nVgau$0^%k{t9t;T3t&dB=@1@HnfOvjvsNO3ATv z;(VX3Z4n*7c`N#Ug>F%zRGf%NTENwT^*L)1Rz*{gal$zUNh&Vspt9zZ3`WoJCXYH} zQu9;wyPZ`-uUjPxdFx_jOU8e>Uu2m4?X5#k#C<0^<HbFs#^A1_OJmz1k|Kjz7IWq(`XsO+3m282rKjY1os>cEw6p!qVA zFLa58!=?6vd+y6ucsGani$7k(%<}8q5bJ!57aqEHwex`Y_wW1zWUpTsX{HRlw1x!# z($yul4-z1^X3slSQXG1Add7b8d8nLR>K`1l{TXZref(Ez0OT(&LAJY5l)_-|z)B+c zC6fu~EyeEEn=6+)O5)Bj3ND}0{#K`1xofPPxZuddfy=^-bLhskw#0DiUrAPLQ7U+W zCWNdP#ro?N4{-2qJ1c#5qnrQ!EQCfhJ$eT3UAl3$g zGLa60F>3Prp!|KmvzCA7T50_*zrjYr7Rv4Q5iDFEW*BWi%@{Es7=diV^QxV# zYdPFpw!t}Z2#pK$0SSCF14e6?^IEJ&6P}Y+9l}Z~Q%EqMfjO*cP_JcD&p{SZH0rfU zy-(g!0*VopJ!rA$=6$4LY~GO9O%^S2iMVE!$(k-G9CH3cCHpHlq_7gLZy|d+H_0ZT z4ot_#&@vIRB2?onjXj`S@_+iJlHqOVfl|!0i_8tYD*vPqJFhQPs%msY3Dwsa6&~*# zUJv_KEM(lnRBEq)@t`Yu@2~G%u>=S8)$H#D5x#L;f=^Ks?F*1?@1q82j?)E_@v zQ@iBOMQE)dEN*ukpLBa%AU{0(9pMf^QG7u9(|W2~9Ge=~?p}h=ugS zwlmMMlVeHdmOjhTGEsk0W4{x$bD8M$oJ%3id`kW~b=kFhx+Yw`M{wk|xY9R^QoRvx zcIlO4=k1)_A~lDzF5O1C4nZClut87hRy02wc$w(ew?}u%leZ^XCZNF%IqWY>Hx>+bD<{R>J7PBnM}GBX4`0Lz&4V2&wJH~95*;_K@s40aQ%(>J zA-Sr;lzLDl=TF-@om7M(( zPfh(#?&WOwADRrqUW0-#!hz_tAuJ{ic#e0{#0_yxVrFLoVeeteZWwEZxL%{IRYrv= z-+T$}_~?Cl`gPH*tBOtdqL)%f;_67WKaRJWkoJXS5$F*gHPUgK`VEbSdQ%1G4c@|g zrXSwYb+#N0;h4oum^Hy;6?1qetIt7Wil?2GNgg2O(d1h!G|NV3Y{Yx3bL8fuW<|Cd>}A*sZMvwo|CHKaQaD)N}A?wb&)xG z#=J-GGOSx7(#R76^_(IXGA{U?KSyYOPa~v>*37RQ5?DQ_hWY0L+yN-?{rm4RsEFUS zE?m*@xyn;qIrK0JHJQyfs{(&5r*;`CVmFQgP!>yGb$#%BS~^(+%d7-y(fXwWZQcSJ zG7{Qrn`^ib2!qBM&6m9O4@>#U!O8^@_*Dh<`s$Z=l#Bwb9NWSpnB)oa+!bMAzCoDBzU6=ss z!v%p0_oJI5Gs{sYP@}XSm&|LaP_${RyV5f?lBU*Ld6OP_iyA}=+fm#leRMTLTh z@^f**@!~gFvRob7n4?X&2B%fUmAJp{vM3d&3?>xQXZdPO1}H`O%4^*EG0GQl+*z$M zFskzEF|*fWoU(kqidKwrlByZK7zV;;sY4rrOR>w>y4VJ8Tz$6l#*${8j$lRQ#W@v& z6+;d~|0Khp6WT*HJrPF+#f4PP;IvE-+4*$EO(lJKYwxQWZg)DA^#x7Aq&vw6=K;_e zG;*-+1|AKgX1!yj=sqVv>)Ufv-EBUATE%NF3Gk%-%xy+bFN`temDuV%+koPZqV%B2Eq&pQw45+o0~o)Q z94mWuea-byKVMrFWjhytT6#r3XzgsJ<*UP%-#82QCosXE!GeQ>@PY*S_e2YsR4hj;qtA`7|+F*^TGxUKm;D z8$H#6>#zDk+HnmK_vuRgYa7n6n5o%ll5u!9Li_D6t0UC;pRsi3UEkUTDfg8zG}>GG zmYXvw7rFEvA`rYiHA_?oci~b*|l3(Q|~xf)TzVArfBc zwGOU1XY?l*o_9+0tnc3BSQTD1D9Rm(N)7s+P(#9`E(EL9$se7|OX$ckC`>Gwys2I~ zQRF0)$3Zn^l|yJRl=UP4dkstE*|W$OIiBURZ}_HvG11|LlN$xSD0p*;XYP6EJV(Yw zC5pBBo5!XCdzC+_=?k3_PFJjWcjdCE=fj!khRQdL?-F@8fa_#c`a*Ffu~(|?XfY5t z8ujddlGy|@*G!as9uLMn0!%Br16y#p9^=33yz|RZ+1wOf3G$p*4whs6Ma-bhXP>v& zT?+Y67wi2ZO!riKbfxh(Mxd&lXpUHNJ`+Rr2tg@PRboA6@bojbRF+>6Gq2TP_r9d7 zr5ilS+-ns3Oz+W+yL$Dx9NpVIQ0?qe_L<;Ziu!PHjoX4Wa~>v$W52$yWB3I>iG8M1 zcv2E%@^ZG{ELxVHbe3dAAX)CZJLT%bwd)NhRiEz|dpvUvBI|vX*dZU1e%4Y`HR#@E z44KK5^ylPh|?L5{~_WvsqJeC(j$8@$W? zEfcU9#0kb4$%A%vn@bNlxI33hq~)}RND(kqDvjUyytqRsXIv-n2Gfk7=honeGZ57Z5gmjh zdka{9ZTYWq$KEAs!Xf*+O?TBrdJa`D-NZrAF zm^f+pv0&$8PWUaryqYvK?YDpcGohd0ym9o#JVo9W0O zs5?YBs34iEn3TJ2qVpO{Ki>Ks#X@7LnR-#zqk?RiozN}P%C+_mk4Wi%=V9yUIFGT9I;+>w)9 zBztmkG%;4cfFXp)(Ss@bKGEi(=e}M`Qa$&LlVxcmsiGdPhMCV!w8bpGd9%Z$GiQH) zGc{vCEqT1A^}0*^$Z2W6 zsgP(2%mmyLmF*i#00(eaEq(EJZ0n9IFvPgx*bRxmZmD>S1~JK^4nAKCWI5TG~Ti$mJIf-7NuS&Ji?fF)+ zNvET*A@j4GPJ69@cp>dO=v*pW&m0M@(+@nD7=LePlWQ?B@^)?QldYwJh2v+h(tQYB z(Jpi{zRVP~R%~it(bJ>OSz-M0se`c*(b7Rn@`(~%xAC|xxy&GPNp>P8qp1fiJ(GA2 zYgdHk1Z>2H@?{8mdj(F1Up~hZ-B_f>e}zOfhVYbR0a>P*I!`BY_gsvM?z6PT(uZbQ zDh3~=cbGJi>80cvlfl^5tp{h^FFqd#P4}xd=WppQi&hWIP$VwYY*a5lcMI|mz=r)> zb|ag8W!qj6xNm_zG;gOLeMIcX>tir!712}~b3??+N>BvU(C4tI-ts81hO3}i=Tt=n zvrb+@)ijTVt&!atF}jDnEN)`}>&mlCaK>v6El%!@^4qUiE2+)Egkd`L-0=TJ(^*GF zy}jRG5u}uql9nzJP#n6Yq(pM)5J8ZT?vj@71_|jNLRz{Ry8Ggg(lFHT%;&eh|K7Fk z^{zYTp7(j4{n~pU1c9HSX@R6{#TEP&+VkPXYM?oIp7A0hrOq0+4IJ;U*L^Y>C72-J z!lgsfe|OB4P0{3+&}}v=S+S6YnLGUV>Gi%$wbJ9I+zeHQkzR!A{Bc|J22*FQIz<)L z5Tf?jUSPkA{@~}PyT5mf$>2eE9KEv^plK0g;iya zh^HyY(FSQ6I+i>hakA#T!eec%^AIMLneeUZn8^G%5=@;V&RA{P{lQEmedBS&Lr!`r zNx@RqH9aCiCc&TREd19Q+%>20um4ut8UUmR&%=;vM^T}X-{h)0{c&F=M_U^rHFR~h zVsBAAg%4;RPdGb*_qUv*mi+)jY?L;TFPJ|6JP`t{)o(Kn!oW!NLpI*7f*M#|`Wb^t z2A7?|uUBt)rLS3(_UwOb)tmZi0r?-`ckl+kLon)hSb0-669Rq*vg6^Ua_~EN-Rhos zZ}zDjfJeH*2qQL>DTl7m+D^e@^fP{tLPl>xdAP$s7!_*9^v@R(Z6FBIFlilXBqsYC(9u`7AKNF;nc6LWz*$eTmpfG8wk>x zN>WdSPfV(hk(1Q;qf_!(1O?1D!jR(8;*s&*1;4?7+YMAG#*34ESieXubYEw&h5)ix z`6rg|OL-sFDNyED55Pims9GBWG?L!E&oI~UKk45_5Z-4chKU!F|6M%sh!4 zn$04JC&N9AbK{@q*v_y6V8F$`13#|UmGB%akzY2F3IPIVR|WvOQAe+!o!`~cJ^i%{ ztnn);-+oO;S!k(eGCU%udZ@9}%PQdt9OkLB;5<=VBse*Le#gSLP0WLB4t9cHL4quG z(61V;_%TD6mN|R~r1J?au9Wk)w~QXXLRPG(=`RJ3e2(Jbs_uJncd@M1ueqa7@f?x) zxyM3$xvu!EhQ5M2&H|+$@xNr|Lg_wRN0v=+j{~o}a1`=f#raPb^=2%p6%o4)s0&8cqS;(mXpejk!h@V==J zt24oIjH{dooA<8p{X)U_8i`ssLwwNvcfMOCL-n0rr^!|%gAmt}f}@n7(@5|@j3Ucv zy({-Xfn#V4wjw;@Z4aOF&%!%`63)b!|7<1bU#dfmZ`N$ZokK{*q-4F+IH5-0BPG)l z@yx6An&M4WE`uFEr`2l42lC0IsrgeCzBxjnWq@uAzFDjCX}UzKpZuWMU)vqz3$^bi zX_;Wh4f{W9jgCy2na|Az?b0C_l{FpEQDBQ{0#+XOf`bN;l?}I2{Hiz-tXlNswVLrF z2YP8dO0)5?CnJ;tIv;S3|0Dnp7oJd}uabQMGGzL%FTC|os3JJTDbP#}Ks8e~0ElIH z9~@Zo`h!m6kt=hQA2}eqT;_5-%6oHE1GVC%>Uyu)E`JaH4Hm$n*Xsa4+T791VC>2Q zAiM%r7>ZXCQmwH|invfHzgXRTLWT7-y>F`%kQtLZ!WrQ2S@Nm3nA-U50Pwc5u=2+L zo})DG`q+9cRa+JW)wiKFt?%PLu>RkGe${yZdEBR%>}-;2poK7uU@kshLl2TR@qJ zRJ{F+w%#*gP#rhpqAc16fdAxJFq~4OGDTILGyw4G*DuBbk<3RR2H}EsdCK)&^S@5q z$nED)%##C|Yp_q|EE@Ef1BCgG(=>Ra_tp@{CbshhZ!ZDg=I%km1|A9}WNF*D*ZR<= zS3r8(+uHL$ZRi#%zk0C+RQ`hs+*x1jLpX9D2hn5Og7PyQfThJbM{~{H9lIq**+j~P zbW_>+=I+sxUz^W+E)*@qC`}gGE#FR8ylw&I@5q&K>zSL2w)+c37Uvifcx34FS+^u8<7h82Y{f=baBM;)2rfYJ;5|l8xPuD zRSFr(t#-=8=<%52 zfZnRQ1-C*&GuZc$440+dS$6vJ0KG`ZI^v{>$Z||e|M~Nj%{Ipt2)BOsWX@o6Skr-0 z=qi?=ppo$3^@ziXLfa4cy`H}~yt>niazxz&#(kpcCMSw9V_xih`CJpiQp7eBXIIKukHD$E+CU~h~`0|GKbYe!kDZcxb zj&=bvJed{ys&b0dvge_iB$ZErN>(D|#evGFawW&z(i)i1RUYyBfo@YU!#}WDxS~C9 zd!@SE=5yb?P9$*gztx}%*lQ;BZ480`A$EQG?$7Jn&bD*A%n^=d`}v`L8o(AC4Ij1_mqwxkOd^T3!#HGviTh(sZs7JAQZ_Cw1S2863EulRW8$@7+GJr`S0zrF<4YMNn;=WFmh>shzZnJ?Hm$V0+yZlZQK3D z{UQ)%4nfLag3gxM{Z$kcw2tx(789;_^>pZ$sN%C&F1n;6bty28>gPVCk%VF5QT6KgO-&HL%+e|635w3X*R-D$%$IAAU$hh3Al{)G!twdi~^alIu8`t zqwH_;*$N?x4%)_KFG8{3cpNkqg3IDcA=SU35umwCz-zX@!pCNoi@WffUHrw-jq$Mx zlb_3btaV^s=<(@=X%v#)7XEO3vIr`;XWAUn{yV&mS68-2VVgRgLN>l0*t{XTI+31= zGm6_n*O_wlGnt+unGX~6{y1DHmSsKV`MNd#FJv3NQQuK&jzN?cOV}*+y{OH=YnRd6`+s;kG}2WQ ziJnyPERoc(P>X^~Yq-u(sEY@@TTYSDW8v0D0O z)k=biy>9)X*bK6*8{ST4M~WS|&^xGEj0|uE-VHA(%t(DdzwDcPNj(rH zFAC6V8_45YYDl+vu9G5exIk&Qx901P$WL-m8Uk^u8T>Wz2~pOHiDaPDYzO4QL~H3K zr0@0z7MHe1c=c7m(3C&EN;)a#x`hqQC4MnlZx}`{zs+T&Y!NPG9Nh_j+4O5uXBa0F zxJ6-=`xG41iNLql<{uDBhEo~~h!n0H`u$i!mq1qII_?t38nlvk+EpD2l@L2xQBWzE z2{9)XC|P#pmX2zZ!qBv;T>lGRXfdpOBY$vyLw--}_rCK!j>}@?*tpk`BNMKTLYWHg>{Vn#$O%>fjOs4^VR*)OG^Mxr*tdHk^ zIL*h6Feo&rpb&ADPex?47(Nv|p*l()?;AF3rRjHv6cVNc!Av?Ga{j~v06=lOH=A*z zO%<)rR1Dpr#(TvU+*;dDYMJ;HMsc^u6b@ZWM92P3^U} zJyukxDAvs5n>+*g+^cgO2?N$iKlGVwds&Oos93+9<*?e(r3;;qP?-_2ZX|Iufj^LM zNPMACn<^pg`wwRWcSdrbfNVF(faNU3J6X;Rh2eGjzr*NC#vgSOpDCli&#OpJ2$`sMr$1Zc?!L{q+@k8gEAy`Bv1N8E8 z_|!HAIQ7}jBg8;r6B>5u*UiR{GXKBXOo4e@qy zGR}wG2UVY183V?U(@bglFBX;i-<$*RF5q;Gqs@niqO6N#LNhox0@&cPk7MoV?}o9t zjf%sXj48ulXpyR)9H!aD6JM#<##Vx${i`yb;~v$}r-)U#Pq;y#dxAG(iW%$@T0z|)hE47R1{k&?{&@n#;3SlF7w?3KE~Z7RdU#SjT2ryw({-gLQODDEk_1)JsrZF10%i)9t61oQ*W$Q-pR#1ArP?U zV2;H=izOj1edhO|AIGQ*hqD}e;Thp&OP8jYUP-fR_?@YcNDyZ*uURihdx2Iz$4+-8 z6Bj%5q(8u?IudTHqbA}z1}{j{HYc6sSra#{Wn%l^`$}(d)!-?kLFB(nuV25kwH1tF z?V#~#dvCWYOos-YKYH(Mg84#*+o9r;~f~>4yL_(>lxc30lw%M7vcLZ ztmF~9J+O7r9I7O?iYm2&Xl09C^!-g{4I8iP-zs?W!nhWXUbtXhfDX5dY{)2VLC$hYsC0#3P`MXx#*4U zEnY-#SN3%LA!UNpaHMpD2Zt#U6VT8qR=%ahIuTgMbariX;EiuIe65iCeOM3F%>0QkzR^Cpz#HSgE*SXMnPoL}cT zGx>4tLN)>ema&@yx=R?3t!pn{xy<`L(E*StZh?-uCxBP~6I{m)lQ%(m^?<=atH1a+ z2#XQBNQvnQbK%u|SNS6F0h{y!ME0a_>~;SGHMunO8>&@|IbC@prgoq>>S+5(q&1)P zSd4c&TaEkd<@ZL*#M>RbZ$|qSTQ&jrGM9N8mxHC#1ud;`)LJ37s%NmTAz;!!vzA*a zB#5oU+6p1sJE?z^!m`o>_D_gVQBUFobS%@6mvDB2k$D|(dFy6KeGDx--vM*E!KS;j z2w)smPA} z+vy1>20XzI(UPZFeOqH%nTjl41GQm{(p3X1SaU3}0Z1$S6gjLd$-bJ5Q_zpHVtb}WwJ zF-@iQN;d>;WAU-YkXZwegk5~DWgFG;IlSP@?S z{v?!LIyOBGrGgFfOWR`rpB+A4M5f2awu$ z2Smn-K`FO7HOp1duhZU%We(Nu5c!R}E@gQvT)er1nTo6|>0TTRSY8qHD~dMH+p%NV^D2UQKCLKW1Mo1zUMF)6}E<)Pu&HN3+y~{;x2@as`Mr zVDTYu;vwO{pAEO!X}b>Or3Ht~rVy}V86b(9YD z6^?>``z{eLON$Lp+ziQ)ZM2l zC4ynw0b?^`LIS7Ii#J?3%7@dPq}cn|z1pG9AA^Jsl8&q1BOs(bRbB}tT*vHMoY!5| zF&ZlTaKWmt)_l`(%8TBg-x@KwMJL<=0*Az&B!q2_C5V7NgmF#xWLsu#D8;&p#L@1P zDrp6?G4m=Q)iy5PfRXjE@n>8L6nIYQAH?*~bfJourI~nYke6y2yX}hHgQGf$)R^b6 z$ok@t!GZjeDhp|O0miaYHNw@y+}Jjmak`KK? zNUin2nx}EwDQ|x|j#d65zvkG6kO>k$`$&{qJm`qp8;Dk9-V9w$rQ#bYoy&)AehFv< zm0M^~`EDqGcN5su)r*NSB|5!pqEUvsQdOz*Chorfp{;3&jFF|W2PNBd{AJ8>^NG=zaoXs|4&=hfv05|B-VSId5=MQ-vM07C8C^^hSQH&OT*58X~l_2%i z?f}c@?LKxov~ven?j@R+#S;YT!w(K6sTx`)fff$RNuP!u>o{=)a?ua9z~n99U^aIc z=DtXbxP!46f+Go?iMHQd;?B6W4q4c@$_>0wmGcy7b9Gsx*-p`XX?H0Ey7*gM_UMT}Q=aOiksr-pOlwJpv`- zjRTB>QTC^BIXfbFYia5u{teG@$)>2yi4AruvRpy&(> zGEzAv*uB?QK}e=8a&kRJM1?$`VtV2+=A`<`Wk8KTLgl*lqp-x zcCV85QiPQSZ}Hdsa}dN}Li&~%?Z&tCzdh{MG&{kkG{Uo^*3;TxUBRV>Rz4-nVDHTZ zRL}nN(m{_x11NN7^|jf#*n-L?2A*vjFw`H1-6o2m!V3=g`9F3E^qm_ZrqHU!ISVl^ zD1z0K{`l%_L5xzW^j}t5Ve|^)b-*N5SQNAc0*T+)Vi00hOO^95w3M|QZJ4Z1umEeRzmr{>TQxOOPYPdrYFK9@O)iAeqDI`e* z|Jmy{mI`P(LSo&smv3yq>$GboT0|(zeMjJ}h`?f_R)y%@;0MEGgfYEV3Kh&U<3n}s zU+W~Av!XO`-v>WsAh8QBZ#yXRAqv{FkASvz8Q4Xfz*f;&*b zX>C(C%e3y+F2QadE2^B@bm4N^!j1Fj5|ywwqQ})0$f`)WhY9B0Jj_*P$wTda+?dQW zs?s5Q-MsoLSdfvyzr)3G(tfo&MNyN5UlC4EG)*(e@y~c(YKi%`e7IpE{$yk9uRr%` zNP&1T$jrWN5_8~VBxOssgK-AimnY@REC5((&V{Y!ur zuMh$1Z$y)iloE~RAztgIteU2gVj|d`nvqPs?}@d7>8h><^NO7>P_m)~Z7O4Km%PUEz0g`!&YG&?`zoj>)*f;rR4AWHX_s^c=${bImjO^KM=!jD68pJ^-~JS%(}>xDznx zku8&bh6ajSGXGo63VBqLm9kebrI)VO>G5TH+RMIWL;-xjb^4rbe*<~-=!}}z?AP_b zpX1AxltN!kF>Qr~9Zd?$2Evds{+(@RObhpPY>C0oI}p35QNCX&1;5`z=NTPC7zg!7 zf*s6lLEfEsnA+E z6Z~+mAJ;#5&tzbx)O#j|GnzI%`{Idzo8&S-9?=`o!u3kDBcE-1apI@Scx_p}Js?9Q z=f%+2H$@F7vVau>>29xOT?N01P%NP$cR9zBc7Y%}gGw}>vs|y+9A0WS%<+)$=N}fJ zaSw9!*M$X=Q^S^1`E81Q9@2|A??J@UOo1onAp6mUqJc8MmO40=cbGh8at2004i=;T z$shWR3{53N`gft3%|u{LvYBJyK;NF|XuaYW7v>5q0C#^h?O%w{2};}jGgXGt?zp9U zw+=dsnBqCM>?L_E>T(Qa&HcFI_YziLw(iuu@7od`>&Vsrj$uZ@mp3O_rpM}{jL}6I zT*?Ri4gcqJf8le00(r^>%_yaV(D*J;UTbq`cRFo!y>3o|LwI&97rBR^+A+$VcoBiW zOgQ(h3_f=S6o*lLw@$wUo26?Yt1KG?@0V!HOpOy%>}>hkVEHwxa;bG{8lbEsZ+oq( zfuJHX>ZsuWvofvY^|QbjjevCF>1|S-m+~o?v^XG=&BQB2MpJ~ZQDGv+gTPMaF(PDb zx&$^<9!KQWAw=FKQm;=+H}nJA)R)EPDIptSCl12v?VP_ZO`KVpc095P33 zY4EvGq7==?;y-lvwBi^Sz5+IO*!B^lMHw<}gh94|7TM)cUV>LG-5s0_bv#uFhD)Td#1pTok>0vr}E8U!6A|EgDUZQfLlk#8W2i5KXH2XU_@ z`8=2p)YIRooB@O$LNMsRBLMb$GMS&G!Dd=}gGklh{-ksfWs84((eE&GMyQ73i5=_F2A z39_0tiEWttncT^EOU`Q_7W7Te%@FlW2m}!Z{hGt)d5LxSH_c_a5iH0v){?obIskA# zbi4(;YV93Okl;aTc&bK)?i?zS37<~01&q5Z`hbTY(4^Kf0XwH+W1D##NCK)^O-#`n zR4d$N_PF&W5<_s4y}}-$6~5+p+d2K+&-LrPgx1*6RAR=czWW~g*Y@YQiJt+*s2y z!X;-{bLnYF%u&Wefj*aaR3?Rt$B*zJ?G@|cU~B0O5(ID<(Pn-xUQvQev-E?}G{4`s zG8zz`a_(5AL4C(hZ&~*pFOPcRfHkj5@aEs2B>;<*uZOKCo2?2wqWH6utc!l&k|~y+ z(Phhlq#U)bkhw=#{O?+46jB7v6pdpJPrs21IO~getp&>WKJg4Shvmvyt82Mm?1KlQ)0ib z70DxyrLP>Tj*Fxi!ArPL8y>5S!8;%3)Ft?-;|XWYkTb_zrI;xb{zW#g|)+` zycge|shn?!%8Mm-Tkdq7jc+qR?FG*A6m@6m8OlV}-pLJ`B_;FL5!U{>ug>AERG1{z z7tJqG{`gJ_PLJo@zp+S`BdOEeB(vb!y`L=wMW4e(80p=f<8a+uzPA+FGBCbgKHNJ2f9Ev8I4aL$*^qpY|NN8xdMfUY~fcIg#E zCif3Te3C5NFVaWl8-42`ptgP!sE!`|vgSp~*^m`J76FwGq{@!8XLSBQDx2+dued)v>ZDBPxK|(EN^I8Ty-Z(XZ`wnA6A-MU7hx|sed0t z_@oF%??&&TFWX!~H@x0*tYR5^^q!;RHEjR8Xa*nnSYf^57(ROcDN*ag#J6m@6rn_) zx;H#`2MC0iER(npuDyuQ4Pg9VLfz4}K^MgiX5w$8v7u|PVtlcE&|$4>jp`-sF0~_* zaw-GzrrmBzo=IB7Cb19Sv%77V_K40XL-i$?tE#1QfVitE93-5(S@{R=8%mu@bta%F z#aYWJoX*L-kml-va{|+W2<7^9?K1cS=BuFXsK_Ahx)F{wijIee$5nd3Yns_m(S18c zND0>W=q;JhnUGlK=5vTbmg}+QTQ~U3GWHVX_Sel*b2-Sj8Z_m{1MB1>TN<(>A)HIv zhVI2Iw~z|Q5Jl>NKD)dm7%f%5dmr=jP|a~2B`}eEonsgp9RHDVB2Jh-1!?=E^AhsQ zG^FxmVTCHAxstS=HkNJ>O`&cn&k zpC%|~kyD&Miytj6aVF{ApyLvNNs27IEAksyIE;cO(yh6Flmh6gw^z zS4*olq>`2E=8s{m(0D(KdkpuWOzGJq(Y;gmF#FS>a3mEO`AIpZ7mq%I1=HpOdwr+_YqH*5#06P_lKk<#%JPhRb)&_LkwHL>ZTwds5?8gjQ)(4L#5`b!_rPyk zaT%xj{9G(k!n=u#H{;eDlN~Yy646ZR(+jc?s_Wk8`cWx>{ipE&A!3G0S|$wk`8Rdj{JpKc73rnyo&ffJIM4 zig(W&?g@_4hGwA~KklkS5=*&glBWjnWxwk##QpEao#}=wTvp)|KF02akwRj$A9hY_ z*$k&1Hf1^=CeEU5-AIu+30b#l|0n)~wLh@i)>!8tJxBqcpoczr``PH%xki+&hSob( zV}z7v3%1^0JfTLhLJ1M1dzsQbyz*PV*VamwBi)Tjq>>eBvj2U`Gg4BX&9$tU z6DVx1M^;|8J{Byl@M~B{&!b|-L2AHN@qJrmKEJdhhFeU!>U~kc9S*dcmrK8Y!xQXj zO_2Da_Ml)=8xf7a@P)@agmwVDU3&2L!x)V*upN&5IM)a+c>4GD*7`HA)fcmuT%5$N z`lI+N0uFs|XEq2}>#1=s*rCj;+C}^~y!4v`)M$DmGgd zv&EMnGw1yeY$Ldu*#Cx13#wtctW)g6RmGd&Ex=B~EI@?|JV_25ZWg$XzfM7ZjRGuB zzJ6g^Q^N8VGU_b61)~6&P$8r%3)c29pKAZ$gk&T262Kms%k=<_%4*XlSSzJ2SJQ%! zsg!?XD7+ktD$;w=zLz95tZG1zvn5-bnK14NP*Dqnj=G2kKWiCwfmCZ+BDjUf6gYZ6 zdQ~G?C6d}njZh8^W5pUeuG@PM#d{Kn4Ru5@>UY;U@KDyXW)=n6@m4)Ppg3KjX83o& zD^q<$I@knHSiFbthk++(xp?Ds9Y`mP)1M&hZ_W6#e5taYho$Tz;7sw@QWWxvF^Nyg zURaN+9w$ec4dJJr=1U`Lls>s7St_NU-cl?@;pbTz{NAm*56eDNw69-T=Wy98?%3o$ z#{4^Z)a#cyw#KRek}DRv#V<5Vu>;ek%eLXa!o1rL)(M3~2uK!x9k_KRq$dK>Qggy_r7NzRouF*w7|6AMWd`NaJ<2b=sH_>G;kY?ZZ1CHMrMEMf1?%R z6khF{)${ZEMTN*tzp}0@zrS+3?tY#*4=g@rn>bjT6f5SROF(1@VWo6bvkAOgMOz-c zSW^Rat|88?b^q!YJFs)=L85ez60`}EVQhMkmk|f*SIe@h+L6<1y`b>OXB4&Nc3HQ& zs@s{K&)j9ygI+&a9lQoJzFl|WaS#$BC?kIfocL4r{;7W|OO>Z%pX!^dd zdK|@;Cxp)kWLBO9LdfZQmB5qDG=kVCY2;#4eaDCO$& z8uzF_ii6)#MjhLf_~|)7k>FtF77PmVjke%CoS!bnKiIw`*t9}+i5Ql@+yY7YB|T={ zO{Lyce4pNzdoz@=8gp=cisxR4)VXSW6oG?^HD!&nMlUpb1GTlk!{2`3By+hzd;ixP zkg8;<#Tc4A{n5+MOU6_&iH>T1SiJ5EmE=N3mAO7i5DarW03D`ZGs(BE+5jOZX!m>%lrY>z-Wm|`uL=2yG1!cPB zSa{9^hH+M{+(slga>V-FROF*=|30h_U3L;RM;G()HxV!t>1{VT)5 z{KBMVmX=DWE?=rK_2+)JdVCJq0v-2lu3nyOOsND5jH}R9+pBYqv>QgG_4-pB>`w|A z2r|gBW{n&~GpLD^3OaO|dD_bSl0EM&a;>L-Eu^>hv@xXa8Jcs4J<*_vfTdl3lW1R_ z;`?Y$Bb{dDK5RG(7loyyAh%AFvZg^(7fw~CnLTCRcuob5J$1fDF#Tu6vdh#ENJmh4 zorxKj6pIcK{LB&mmmzEL6ibe7X2$HVauTm?DZ1&iG3}4bxkL!7FJ-Dx$Qzq%+Bl|I zb$-(x$7%!aQbyy2GK$C~lTXHxS5^@VpMO4$6LH3zvc`R|l6^Zd_V8+54y- zU9gp)0nT%H=vd)92!gI$fr57=&7c$in)pUAJ>SE*dYB%t2zjShlLVq!6zO>$bq~NO zYNno$_sjPt3PV1miBuZC!c49fls3aEk#tQO`&KsBtF3f@<>5Q)JJv4HA%bh)5v4*^ zWNrVbv3L1?PhhzCX+Im(aa^Xw`;ATFg!!lo6=MRrCd4kP_o8tJgrY&fH!Sda55P+H zZ6FxUF_hO-Q#<1Mo8r6(u0$()BgfEj9A5Xrj>ZmWZa2R+vSs;s;>)zK(MpAdSGipR z2iR1$Td?nn8>HKAEJl>?@uo#qeIKpoN&+nq?U_08kJb3nAXb6nK89Nb?8n_J|3Co+ z)Y+4o&1`A`SUq%Kso|EI#QM23=*Ed2@qaY>lOm5(_g4(I&ht?r{uh3jfGySs3lwC+ zq3lZAk8|^N^NB>c$FlPea4A+J^~yjV{u)#C)Q%_@PpVn+``$c3N$57STmIiRAO|E; zEtkI-kzc=HTR7sE1l%&P+nk(%Z98-0Cr^6N#^kd!Skby%J*WA%!Fw?}d;%iTH%O7I z{ipjO_0BVk6S6@`_M(GF%0~HweQZ<;0qSYBA&Im0Tp1AM`XBoZJ~=D)KcexJcV6VY zU~vx#N6J8rdst?RRC3DJT_p|Oe}5@&g^S$SAq;M+8wSgNvtTj>b4UA1#gs)GuNyJ0eCXkKADRo!9hnWRKs3#L0 zHC}oQ>>|-cHI<75EXL&=gao~(Wq$AOT;mXzMAbKb(xGh?#kWGPR5%^i?aR{OJD?YsQ{u#bUqt$Fas`+E?4J|JXt{0t0$=e2CQa24MGpGE8^3CV+B>#ky)Tvpp^Q;VfRX;I(^pJ!y zz6bge^5XDNK2`zG!@}taIwbD1HubOM1clQ1wZ_=GI?9>hkl2BPz|p7ZsHjGr;C5UoOVo3Q{YIuU}uSb?;r~ZnSlmq(w07 zYg>F4PTypV3=XT Py**?GqFclF=X;}?xY+ftU7S>C{m&>0prYfrt4$VrJ{6z~fa z4f-%2TchM<%f|K*-vKK#ncy?ROd?#Guuwe2NC+|-e&stp$)>*4+aJ}fB6{~KRLq%{ zr6v?Srx`8~g+G^K+K^7ivDGp4dZk*9o!5!651;i+#zxxDBsUYflOp5_l6ONM@hBOn z$~sri$13f~sW^>kWjG2rAd2vHU>`|mSd_ew#gIj4cWc3J_kO5c$bwem4%44=iHq2B zW4%mAdP^|_WAw-QXMII4_X#ISc_fab&vQF{qF=-ZPq7iu80j)rPX?2!V@RSu@_WJ- z%TU`Jd~|X_WCAaRq^wboQLTzP?bHU+d2u)5i-AOmX7F*E6|*Yr8f_c zL}ftw`%Yj7Q}TV}dM$>w7citil~^5KYVxT*Q-4@gvQ?_VLPy{s!jI3F)|deG>bbb! z5nFB8rk8J$T3ubTfz2_0V~schw&rubz==aN1P)M4Xu!HKEJGOa<=q`{Q-uC-OlxijAuSKRz~!K^_ca70wl|4oqeUAbQ&Y)2 z?aY8%=79uwU82+=Lt}9d72%^mF+R2>XN#>bei~p9?=e0|M=&{TZpNuNqe2TGMuL^R z=@Ge=O#Cy!6vn^XK>XYni$20h$Nj_pjQOSuwM<%!IxpC;Sf{=Fhv_oFR#-dpvHOg@ zF{rM|5T0SRJ2$4Ye>~FRB9`+3sO%rS<0|#3jDfA%8>1hU)|tNR5uS8J%kc%Yu-!w$861>(HH}{AHx4m0>!m^#ja2uUcT@h?cNhTyw?liztVf?dZA_kC~9AtUgpU2Js$ z{rIGoQgWSk*fW*Qz}eQlFxDJB)0n4cZxyv*&q{oX8QhT15e67v(5@i+-wnz?hG?jK z*pt%zC0aHAj-@rRnszfMqK)Z{LI4s?hMnv9OI=UrMXuR@L*l8gpz*JP2$B41SxBa` zsyi|5H^@5eHVu1Ma=TtDSY-4z4zs4CAtOpSSj$FA?WU7Ao~U&%e0-J~teFhf32v;?8Uug@0ZRp%L|L zSV?8o|8&(Oz6$VJ#gQIlFpD(AG>EI_vMSa^iSp-6eoK4NAPxbn+5dsD2L~)v`%78f z;zjF)2z2-(j#i1DCl)KTb*;f4?~v{iXH%=f?uM(*L03i?vFTB-?IO!a~dh7VAZZ7}3E95G%R|EKHHj z>zWR*4})oP#!%Zje-3q3j%~MjS8%6U%LsVixVCw*wz2h{2<2+7xwKtph-If-#Djp{ zMetMAwX-u*K;C}cO3+HYO6^K7=}f^!$3mB5MS=0qBXT&f&%f<^>iz9_q?f{+`YCEJ zx&Yi{Efg?u_@!BW=0zZKECvk6;2!gk^O>;zeGMEr^$h=zP0b}WKYmIwN%x~%RHIC5 zEeY+3&cejc8V)N%xzJW_Q3XB!o!b>p%z_| zdb%HvE0V%G2i`~27gCz5ztLo0ZYyg%A82{x7|+_lQ&RJaeP6}nbZ3q6$F&YPXV!6! z;GoTV1N+~NGzOe{jyVcxZzPE>1QoxNjOFF=|F-tb%1;B?7+{)kH2K0#TyPV;4KpR; zosS#;|1AfZ6*TporaHqabqOV!v+Vh?55HIs_u5K(YxU#+scls$_@@AX(i>lV0E7k@Q`%w4X$tlDsE}{C?0_tG z+!E6{tHiQZ_=QEznzAyOt@|IisGoqhM8|ip{U2|agcxSyAg=DF!CXt*e*FQ#$**foh0X{tRx}f&7h+CvP-aJ31h|@?NDOuRsSKvsZZg?B(HKj(QKbQD~dM?_g zKfW-1j?gHbG44*C2+eJZMN>6ZG)CDcuW&BuFX832RFbd{Uzg|D1pR?&Z=}6#o5&*9 zahWZ1aE?vV-lL|*A)Unl!p~m@%ZB+EhSrknK|*jQDEL+htK12u{X^AMvMuf99>2m4 zRaj3qGzn!A#_m)g@cw7_*P*WD`7yLARv10`HvB~0LuwmQ#{`sF`%og7I$WXNbZKNwvqN*0v+F);D?0Eru38Ah$Iq0h4@$Bw1Z77<|-0O z0Z-X*c;*n)Ry=i90m-?5E!{0?yr#4wo6YTp=3Hq}J})bCBM$RA+iQnJf9Y&?{B>otG6 ziP7}eEQ-4Q3B9*pA|G`x$H+1P*{ID}bP=d~HJpD}`t!97(wQi&2CDfXTFcNgB|c8u zLE1iLr>kmU0`EYZg?HbE?1Rr;&wfPf5_zF~h$`-;4QRqOihNWhOHjiC#xs_#Mj0P$o|8Z<0S%z&&)S zV$<)fCkB&v4t@UnG@8()asFcp@UH|{I|{hX(R1nV#c9%&Ihf^b2=gN3?(XNV@g9_f z?cP2b?EH|lEFmr~6Y-QIB!rrzRQr{`oJnrz8*)V=(;rnA&!OH|y9p_uGA1Q*O_Pms<|^7nbUOAGV0duB3s)i;XyzTwePzPC~(!xrD&JZeF4_tD#R+C__mzgjId5Aepjq5k7octnt@ zf3UU>!_WtW4}oK{V2Oh~8*6!QW5qZ1P^6W{ZUiIU+Lg5zrtLb2D~=`lJcl&vX`H@- zU+gA^g53x~Ze=H{b;+{d&Y1wwcDBv0J_P#ipI|R?eo;T&12_Os-0B zJV%8wYKVS*o2vZ(*m}#TD%-AWc+=eo(p}OiY+4$mySt=2q!Cc*4(Uee*dSffB@G+N zO~?i*LE87^y6*e=p6?y+KYyUZu{h2ZbImp9t|-=K%@^QcYXHpR@rWfw@Hhoum3Qaa zKDLaYTdp8?drcj3y;U|9-cDU|W$s0yQOFn&o?@^cKN#3VlN_lo`QSY-hZHja@HtWT zb7dKXy?}N2*IA^v0a$--eo`dEJs_Wr?xZyqIe^+LUcA?jPQds+=fj`s@LTLOi#zd7 zbb|Q3=>qPjr3*+ZR(_cfWk1E@ZK}b$(ALxK$pk9V;*WGMe3t7ii!MP%rzhhnOe(M7 zMBl2hSgIti+P$MaDZ90axkWZjB0i#Xk6S=r;N$uYs}L;Eg;l!`ZIfX@Md=U;p)t2< z=*Bm=Zw_&h1Pw^t4i$7ZkKcC2kXaa=j2uEO2KL;{LfMj!5!>4uk;}(5gGq-_?77Ho zV)-}^_|EUgYAG1xamR?+E=ON>Yg6!bQ=T(QPM>L#LN%_6&`$Vc)&(bZVFZZ4yS_^~z1I%AQ=eLuk>MAlD2y8+KoXQGJ>`$=K=w2{=VL*!&tFlbQ&#`zta zyG^^0=y-(w-lE-|4yy$x4NF$C?hHs2W!gey$X>c0Aq5fICPx^c8SVxDNGhssQc>Ed z*H)r}YQ9|6Knq|kN#p}7!$xF8#~5fBA!sz7X($`qL_Fb?=ag9VN~~BC=*PWC2dHM6 z$w=CmnCHhhgZDRBr3!y)J#ThxXlmkg`JWKA)vYJv=AW1Jp z-0}`p0()moOxM>l;~J+31_HH@fI!Y_ja&35&cM^HPbog)630ER9EDO!&cp7@x`qu_ zQC%BYUn2QASA}#f&l~)g)iA!(SRz+2gv?nYOFZTeh>*vE1})%t_jwUS&;G~?s~?o6 zs--+Rz2@^0*NFF;33K;PbxSpq(VIo6r4@38iMu~;xiL9^yz4RR3dF#r8TJ+b4hh9P zl>RD##N>a~n}5++fzyPy$b=gf0(b@-wL~|Ox>f9KAmvg-wnh?2KgZjev~Vtn;v{KE zS2^8y8TzG|Cm1wbFAgO{$|zLN{S@itPWbiM-c(rW;p<`G0I`Ytb5J$r{UgCb|9jtD)psoZOLCJa}C+S0M9TBSNO0AWsJIdGMs*QoX@XV(16Jk@^*}LG!^k;PI#DzgCQF zzoyXmE;z_T)M6H)oq)zl1~^VXyrgmQh~}mLc1CY%%eFd>Zz+ZGnAi6JVjxIH100fM z*@oZoTL0L21PKJ3+q@N`xuz z$$|KstE&<+#Wpl+6MG2qYmo z^oSLTohi%Fl3Aq{pn#eS-O~wgmcNdf7_?s2g;GnCR81@NxzA7RU15CP=mS1{NPQm` z{~10ACk(mt>Fju2hTclttbgUzGJVXHpRC}PGrh2V2tNt@W_~}XJ|J{S^gF1fd z_5+wa%b+Y)cdEI({XL}8g~$(jQ?J-6$(8f3S?HkK98it}t17=|xGPxinA_ z`-|*3KOppIg!QF@QtR-_=Usewpm4}o(?npYgeLA1WpGiP*iDigHO&-nbr<(=CGKh4 zx968AK84&V=A^KaBMGh)7ncvYlr7~^~9zO0Y2u}2}D!q?5(6}X;LfX2>DkwU{; zRd^8!etu%>jFtH2lYWUOATPBf1J`jG^S>ZBo&`PcTVJrmo6?;#`Ksq{(U*bIr8@W= zGS=%eG=RRbUY#qHUR(EXr$89`CB6RA@cSa4FdDKOHVfYy>}gVvTYb8P9J0MF$G)-L z@k8L+-KoU*KEL@!IAP~OSY-LuyDzGkNXqy{m`PlVmzW8f;VFf3{o`lCGAo3}P`Zfs z-s7CD%g4G1-b3`A4&jRY5tK=CWSR=kTmmBZs{yY=15gHC#Itv-#zP{0To292Djpv%ae;!KV;*X8)L$)T~ZL1J7$a~(7kr`i2&xcEhc`?X&^cP zK`E&QNR@z6#&`Ml!MAO9b)gSVeZi7{O?!RoK@FRO3Gyvn%*F8~d_gG6d(k=U{*aMU zrBvrqeL3y`OVQW4Y!NO2(*9R{UTBL%zXYX#7Z@h;NRouFi8@o$h)na$3|07$b$_lm z82xt0tw=~QJY}`s^Y*exN-4!`E($AiEVIcj`;(4vcyJhF%Kl8G6vk>+sNdwUi2u*5 z_>qoQSYJizCm%~T8rO)xAVc$LT)4q`h_ShZzqVRe(w_@=KJAR-V6s!%NXZFzDeY*N z<-c)c1tUa>lKocM-o0VLySEe}q^lWTScfHu1&JOreTjjg)O})ybQHM-9p$(%`gi>j zNuFX3>JsNL?Pwnk?fjrLX%jCK8IF$j#s|wz+9{MOQ~d-(JF%nT6B;X62Qz`id!KIc zUL?(!jSMjs0t63(D=6=@(ELERgJ@3;UVS|^n`GRqH(|&aaU%gAVT~RF5deFDX=OB`yeSY1vZwg{2zSNs63Y0bFn+=7aVlxjqu=8? zn?`m$ZXFC>nfMnmE4I}x{~AgZh^BfH6*5#{F?$}~MEK4NxjqUnxAeLTiIrhX1f>UR z05~T1>q#de0B5F=!W7}}0<7+@<2S1*<8Y9B3q$`tUe)-67`wWTKYHY2no0GMSbb+D z*opbIN!cGpIC6QopkhZxgI6NmJK#k#m3s>a0HXAw)pGC0+$n(Qgt<|Z!yfPJix*X9B+_r7?!amY9kRf%b;3YDb87W%woFCE zeEuB`b|EF?7lmX<=R-{iP6(-iI!L4B5=#SV6V-fB&Sv$ATJ3B2lUNETfB5td>bqm` zS!;RYcdPVY%K{pOr7!g6gn-1lMIJ_Ee>jiF_>y}`xMvf8$oRLnJQD5uFeYysYx2#K zxI5JIn?w#R@H>%R+Qi0cdu5Rf7o+W(#D$^E3X_@an8uT=EBHz@kp2qr%w()9qEP}; z8XQYT0auZJM91bIl=F5|fg?OG!`;oGkR(BLb7-!sieDE4SR)QI{i zCEQ$lA9)OqFE{K2XH0rgveM#hXBk}C8qmP|VvdthcPr<9YweNCU;Co-!gJqIaNnLn z8;>rs{B}lJP1R=xE{6r9`CwJUqq4okJqdxq)T<5glUdwj4M#%+2ub(17Cj*pBMsb# z7F`9mp8jnxK`DwLMUyWiFwoq5GMY#j6=Rwv8ofbGw3-NE5cpPsJGKYJ$tB5e%FG4$47>cFFNm> zhAG-`25S4^*I7J8YhKSnGTk-k8yEbR@A7C5NsPgLU&&$#Ex+)f2}0A1H4Z%%^_!Z$o_F&^8G`&o5=f$TdT52VhvoI zpp6D=g=W{WGbbn2Gx3LLP`sPUV7FTzD0>g(}!K|P(yM< zS&@68`aJr@XH%pYwcKl|n``9v#t4{jQUb;6A3OIP_`dB@I)=;Hc6zi^++qA70_=-C zzqo_URdPSSU@DhVDw`GYLhbabdzoGo*;)#WGtSE)D%% z!gIC`>`KJ*;Rw%Ve_r1N8a~F*d>CCB2i%Z!D-&giQj+h>dlTe)+Qjxsil86^JMlS- z4xWG>@!hW^!Qvq_&P2(6kQnTBtwIj1F{#XgVafBf5{Vv3*hAnLvd^ojrSZVhqk(-o z^;gtOl%63r7}FHtln4Ycy4bytt{oBa#}!79!AHzko0b5m)A6dH(AvYQQ!T5wFwSk- zVJ6DyBJTS72f04+80*n;FDfafPTTjNf;*$0Lt=bYCu#Nv@QI~M(O+%*z^ii_A%{3k zVTerWXv%<=gRh1NrOtOsZOf~=_c6z2ZIjAb&bI5b@4S!_#sMR~%yFWvYwWY8D}kT|cg%sG zl=5e?SMOS|b}YH-b)Ro>dfdO&B(NN3SjJJS_n#_{s39;s1vEH~D!^m3&Yy;wsEaRu zMj}a*t#R+*JOE~zeex97f2O5Z{P8I8AhCKOhgB7yFRwfWy1rk#3k{H5KSY9b=-QE; zP@xMTbYQ4MkKU$P#VtVmYrUeMCvqKn>-t&&SUrIesZ+Sn7|k z54eyuIgRZrjNc{fd|rK#5gFJfQh{SJ4J;ZBx3149qkBXF+JWqveJIdS*Q-{G$Tm^Y zhq#Z3K@4MK>X`{F(ebyQEGU0Axrw<1XXls^g_!G3F70iA9$jMh$(iRiQ19PfuOQi&ADVi4BJV~N-{QQ&_ckAW>KVe88^bTl7l@QK?v&=&eq)k;M6^8g z#&1%HS*w)b<#n#WL~^vcz3w|U?-&0!wBzhxTlHZCwB>y{UR}hU>2|GfZMzZuhpIPe zOA_;XU?gaU7h57=LOXE-?_z#jsGH*=1T!Ebm}=zZYMWXk$wYd2?7q_8SlR5LY`n0> zfU`a_nl}b^{|y>cAMzSX{zWDFCo7WIOJ&R)f%IZ7amLnZGp(759%7#ZogwCf;Z~Z9 zO>0;8>WiYNDgW}muT4kJrX-Bv5(UERjbd(n*Uy*&65=1O>8p{Zr>&193i|>h{BS7j z40w9a7``*HTC2WFxJ@c{3$j2eFhHZ;eGr4vGHE9Gn2gWwRyYq3yJ(!iPcK3wM=r=y z64SyM4}&CbS+UAxA*-G~am*h3DN-c35rFJGV8t_Ew~-HA*JNfk5e(Nl1{#C#(3U$S zNKCyZ1tXBS;p|%$Q)txwh2B`0kB@I=sog_-PS$_Of20JqxL2wYxwd7cB$X;yz>E@5 zSjttWPk%eK_{=@fN|Z#{?+gLYP2c*hG-dNg;$GxOJ?0$uazaXe_glYTYb)Jr;cvmN zi4iX~J+1}DlyzG{wd|0kBWSDJPc${Ka}&}gTFj}HD7|dH{9EbG_Y4X37HHm^$5zhc zD80biwYy=HY)bCK{)Rfpuq&Pjhb>H2_|}6Naj4OASNTipA^$=~dIKnbv>x|#EeuY^ zjey?^g&JU)wtF}97>o6A7&TY^c0+yiwJ`p&TJBFde@%flgBjqtkaW z$2Vcgd3Kb@a`!h{`Yxqys33K6I>D%*CV((h_n6Xp5}vZ;#~pcKlYtxQg~%WrO5+?x z`N6Y0IPx;Yw3Mor;@0Ckmc*%4fGlTWzU{#`;d>{%;3GV6kl}paheRR9MFu%h5ECN^ zt#nqA2)odx+c|cHqSi&7`gTv49z3Sdr-6FBOmaRUgBOmqitKV)gnij$TAvWi6GiEr zlKY4*j7T9(GFOVTOwve+=ikQs`Yb7uvo)!|FPtKW%M?7`)<8P;cy}`WOQ1%9#2=6s zsX=!uZ{54X zXbf@X#Wmdza<%9sB;44vU-27Di6K@q5DqxVy$nbzi7Vh!C+nf|mS(ZpraLSa)9&H? zeJl|8iOjupVW~iAC#vaNE;W7BP;#6@vT(<_?xONKHWZO18;3$M+~W`FukZSvxL}j> zD{_as;ZCX7k&yayGx6iGG?x`z+dT#_Syeb#QI_(z0Sm~x*LS}utC5Vo4#684A?>|c zMI0zn*SXt-q7{`j7JN`zU02|vNN0|jla|&uyew52iTzt6riS$fkp`YP__*>?Ye~T- zA%qG_XKQ?DEm{f^6((J5;{1IkBmP_x#tJ`VSVFL!M?gL+adl_@aCao&*lw76dUsrutZB3@SDINN5o7UY)n|J zLYYTgXaFkZLW9|Z%NHpnLeo1Ym5l7j+#aD#1Y84(>x(M45q%=oa4m~FrUgiyGXJoV_Owi?UXi}%Ho;~yl6CZF5LYadNSV zd%H2VjWrq6i`=Pbyfic_cYZ{4?|@7e_rW?wt$JYu%t@v`n!~)Yq)Ybjd4weQkjf6kXH|HCGprsbmn?CSGxjbHoQLhjMka8__ zvUTb}itjjE-1pF(`#0>1_fham_3O+$0T=1fktDhgzg}Hdp}hG9L>oxK>{zOz znpl-U$OUZx2=2{D{bxPkqrgW|_9oDI`Wc;XZv^MqA%b<3G;nt z*cBXk&E7NejJYK*$qPKa@#h+j;P2u#)N2ZO)R{r?YVB3JI6@KL-{=$nEs!(QcfP z^6Etn^FpjGOcYM;@x3h#%DJ0814={40%}ntqatZ&Kc@P@>4fbv9s*WSA&DbK^mrij z5`nhps)Al64bpwQ)0hB=boTkW6Dn1-#wpjR8MF5)gjzby!oACLk<{LAu zbp7)J!584Y|2tHESbpX8l=iJSfx2LIY-rxhrjf#^e+w+DGi}E+DTbY37S7;$UfGSl zNj@o}%)~-m@9kC0i)VX5^mqx3!e2HGSP~r#-hxv!hs9vo_7svXSI4BYdjp{=iwTsY zFM}m` z?Ud5NM?2-qUc~qG1e%`jIUcmj2|Xxfy;9xSh8`radaf)|V)u}c_TxyciV+--!hi((>#&~SS z$Gz_xC(?p8R|{X)>BNqk*_a3*!KH6vml_#feZq7#)M z$eLI(+Qx<_mVGYmHnIgqu~r6$A-!sxOsoC0E1$=mg zM=HJ_O=YQQo}<~XT?4k*=YU=oI=us--1X!i0QyoBzts5(m^P!J{a~*`Is62~OS7=F zyCxv74k(gIW=~0>tF~fFo-^t7)1R|zOjzXkbxEc$o8|R#nNm!13IhmLEq4N}o4VT~a-*(A~GEI%|2(K5O+e zB9Kh_)t#)02bD*HlG&g2#w|ek!9FHIkNE1h3kLHTV^2cRUveqjut=|2h1G_xuVcPc zUyqwiN~4ijb5jdEK^QZfcRPVd)8;Wr`@-}tI{-sg_=3FP?;%$f7m_eR}Ip%n3|CtArnw@?~boANXgwD z00+O6*^X!My@8>V0bJCDI`f|{;rI7JBEaz?pzFxS%kohMTA^Oxx}@;BkFn++@M+-! z7BC90S{KL0#u{T*%z@J+UlmU0f(l>?nf|v4x$S)P`SnlpXZ+Os^+bb_TBI7E-mgs; z->5hj(2rl=I z)YUf}gDf!~LNTU2rH+yDyoMU;GHNSt`x^n-BMd_dl!T<4E8Jn<6W3Uno9ek?xg&#f zJ)=Zr>0+Regvi$?QXJQCOUyhFxEjTWEN?K~%^H&yFyc1abJs@(FeqWh z(SLSZS)ltUmr~JFwy@xpiIy*AJAhVrhDfEjp@wdj4 zkC)ce&pMf5bl=EdDO1Pn*?f17kX2jYOWe40U(2yd z{IT%FE86mUgk6N3LYzbVsO{dAysIEW=Pu+zM)ofsu))_)R)YLxhD~;QnIUBrz}KG( z=x6e|tTty60f1mh(EZu)Y^&QwmglIBNa$nG*ZcGF#%8#^>?)>u@F08 z24`4&ymKlkhKOeK!?>uuBuw)PYQYq~+>=}%@Uq*KsW;{suM5LNI*J|rCXzJX68-t{ zZG0G0H3H4xqsThXO(DnrIPs;k!oxh)J{O#oMv1{bB@7Kp_swlKk#3jaXQ$z??_C?EAMXqgnhqr3rMv8vhirr)b+g z)h=7k^7DG&A^Wu9?oFGsHg(@Ff-}#qL_bXJ5&o_B7-}a+WFpvi38;SuPSQ%4GC%K) zX#!91VjC9F!dvZ42iB-&&)n3Nn{b>2KE9K$vio?DQhb$XGnq{~It4kd7Fw08%vl;# zAFzpNsZ*=}uNHt<8bF;pZf9Bha9fDmt$H7LVlfX~NHoeBVXmy)T2N+qegtA-5@{m>#nA|xsuY+q+ z$pR1=DN|0;v|SBXzEFsF;4#XIOYXNok`HcGhx;B? zMHJh>2F!V=m`;Q}7&%bSZr-1BAXMoOCVZp!O4nL{bO zRv(gG6Xv_+MDqfKah&0W%zL_0h5Rb)b`5*0|+5mK7P=19e;M}P+zYbUJ|1m88_kH?U z0&Uk}|6$ce1@JcAakd;xo9yy}exFtJDxSXR^r-KbrugszgMChOoLDtmK^cwvnsyQ8 zg5&PIo8vHY@b>8E-{dQPiKP3Kmxqy_Mz!52Adbydm<4<7j(mpZ2pm)of{!4Oaq+!V z3!xD!5~~y3)nvxwL#9cxrvhJ%Z#Ln@CAZVXfbn--`hsqxFczEP!{N+na)-Z#nk_Jg zzY(`*DSX6(6~zXWrdd<-&ESY6KryqqPgf*i2(C%@e7j@iYAG%n!!>OhJ7cwOxo`U+ zaf}ECjUf+#6lb$t#(pC3YJQvEu&$M^az~qR5wE=wz>$YhC6g;&ggtjSoU=M5prZyr zCq~Kab2V!?As|#snnU1vRJ(pMBL!%fG=hOQRxG7-sh|Aje?I9axFIRzse5{;b#$5d zcV}IQC+uQBas67a+Qj9}=5>@87%`=H-zlgrqrX^5LwcWGOW4;?e%%kVDc_}9>tc62 z3C%ERBii;9+;vslY{FwKP3TmX^S6Hmj?#5In$t8i_DeqAIi^#nZHo5qY7;i~jb0^; z-G34zL4&kTGCayc+$ZH^K7&I?yO(?~kV$I$&j(-HP#00t4AVWVIiyM?o`{`D5G**) zmOLBrc!0N<6Sl&|n#i}>h8IH0^#YacW@bx@AoCSzFK*J@iE~&8?JzHh5h2k&i}o|n zi{k!X?+j@AKsyRJw@@+K=XsPjI~2vV8Aq0XUt%c;vchxSXm-?|Fl=#wE=Dk-QF&{; z{@VU|aOK6*acL(|R(xIbeJQWwX_Zl9ytdtanSi=Z+wgClAFQo}EoL}AsEpLxH7KgV_%=$XP zr#c2Zw}%#7^x%sV=hb>~fv1v+<8-+-XjFC8?M@a8&Gu56Dxekp)XL!{j)A(FgP;T2 zYTvp&zeTBIx4DO(v`SMCUP_7g%`1I%n@{HuoSO<~l8;1t-F7CGBZWCU(%`}7<-`TT zn)&%VDv}`=X&5zc9)1h7spkhV@EoB?j**ro!&E=}WDSlwe_M zc#T>Z15-PC?)84E^N)62h}#BNo~U0^s+yQIDjxL_YT<^XbvQu3nTx;zeY4W+@I8hB zXHltumK<${LEZwwS{tB&P+DJKKWpu<*46-Ay^EX!3vH}k+NXdP+5vDRP}v2W8sEq} zVR;D|dwKn}v5K&b`D#AK`^O-eCFfLve?oj~lSFklC1GXud#6of-&$mFrZH`S>vSf~ zIc7~lf686EJI>g@Py$e}fPYrvssFn5U-%+vkD~n9Z9v)SdfEAF%j}V{o5?VwhDH@p z9-p@1RmMqfpIW&ywVJ806R^(b9)CSraq&E;nV|sJ=2V(&Z{~_?dX0rl6r$1~@lLp8 z{+r*I#m4j8*s``@1$h1N<+k8}yZev(MoHPz0MmdUn)3NBSrIhz-}r1;_y0I9GbPh3 zgV`IYGRSnv0+u8QeYS^^EN^2$(KEZ;{29r`;;U`lnq1>D8rPJMdm#(VWb2f;yCVh{W(o)BgYEXKU2 zvA10%g%C-rurz~P+cKo6^JcM>kei!wG?}yYxkv=vcRG_r5lRoMJ2jKBNQHp%c_KYh zA)m35+O0*^+2d?98$Zi#GWD$H^EY#dI7ZEB>Go!fQF$FJu3OD$XE|p%kuhTLm#6~H zlPczD0tlf**cN1L7iqSKhr*~A(S>B%$U(i&$7+=zGKX;QK>8jO^Msoewsf+NI)=(r z+tPZOWFYjT(lRIGRG@cCQuKmJp&ljy8u_Z(p@O{?kb&4yIiqABKSpGjZ65`QGEpEz3Lz| zv_dTp2Eh2O$pU8(O_8U^di!ruIjk$rSqM9B2 zEb_Re`hTUS+o#XscNauZgdc!9XWQx8CI23=8#X#;9}P{u-E=1Wh=xEUZ2J>mK3~mf zg#Tn*D#NF`6rt)6CCI|MWp@cO+8GJ2?VOw|}8wXB^Hr62IoMpM1 zA)fj!@a7XM5=Tx5MMhjCcsVm$jqS!bP&1LN=I2Yoa1_gfO-ibRt+APJ;KL{)va9Vy zBWD_sPAW~IU$lM)FF07!thCo5E}vF>onn5kG_~gPzA2^NHVaJG5=AWWPzPNB zqTD53$63ao!1fxC;nG&PLxq%eKQ5bH&9+*ldG_p+Iun4pJ>@5$`uaXq-(xPvn%6+}HR5f8GfGOD zuiq>Ydo`U!^{TcI?0x5fcwsK^cWz+(PV z9+5(?5H`9@Q-GHtg+`PA(7ABr@^km^9&MtBX2n}jVq7*YhE@d{fpBu*X#@zv+hd$_ zgg)^Kq~rXCdiLUClJ1wWkV=!Cs_ka?-H06NgN}Pbq}ChbO~4^66FBDpcwn(Faz_9P z3Jj11&Dfj#q|a*n2@xQ)W&lpUD-IR@cP>BLTf_j363nDQC!Ta|tofvWah~W5y;t*g zpA-{&e*a>OG!{%fX7*$x*^=NI2(rz3E9e&bTgl1G1WJt42H&dv)cvcc{$@h}NI{s$@<9?*U;Bvp zHbwTGVL=4oP&RY<%z({aW4&tY!*SY;-dwra;}4Rt2foJAbBwxQi&mnYZdP{63WL$$ zzyI`M;iicRAChrKz^8or1Iw#rvS|^Z(iP^@5+eW*~Vq_MPA(l-R{Yw6&bU zmSyj+kT7WkQ+X8`X^12j6K#KB+SkSGSM0a$)9b%@eA7hiK~xDE`x2~3lnByzk{n3S z5v0*zSw>RdPBAfKu{Bb-0r$@&@v8&2fkY~11sb0;mlE?Tr|_rq50DvYHfz!GGc|y1 zc6L6mt*%re9}lD-RRr1AT#q3#U#B+z+i%x^8MQKn9ARST*gsnjr+;wEs+DJYKF4i4 zq5y}xbGf1kmh+a*Z$sB7Sl4A`S+>#QQ(YC**Ps`M<%J$}&F-XoEDe2HQ~mcf$34Af zvO4ASL?z%gx63at!tz@Ga*C=@wodU<{>)7P*g=AfvmQ!$+I=5UFgle{qcXj)c?oOzGmMkpH5EN7rnML8 zqKT@}PYlOwq%EJf&tA_oPZS#`_IOh{9!^=y)p&aEd%(iLP(oZCR$WK518Y|xNA{1{ z@wWYa5`qL0D8vhr`p9g30a9Mhkiz(#y8lJBG{>LGAK!*%t zl{N>^PXxBAxc~Oup4EfrB@Q;fzEBGpyXJj|M8S0MRkgZ(wng6a^+8Mu$O~arKE^hS z35dpOLF%9X*W?{;^E7XdxtTouIgyiQA*!Z}hOZ>t-& zixBa1?<)Z09J{#;p|a_a@ahp*uh;u?7R9R-K7q7R>q}+3l4U;P+7v}p$5O{Djr;nW zxf?Pe?a9Rh+g*_4$vE=GcUVM_zHc)5$c}Zi)f_5Q4_?CLLOK=8{?ACd=U98Y~vCdmV#z9h{&652^KVx+L!&I(_EKZCrAcwhvy4&%+UCkUbyK(Ar3|2jmd4stub~ zWmA7AJ)q(OnxNCZW0#`49QC&2B*4*0Zs+Bq@v{VRetvE0(0h+;i?9Zp5&#$;2Sg9- zmYn3Q2lOnT+>v$x>%l5T9r!=s4&QKs931L5SPHB6_WPne= zyu9d?BGLo9(xtHNTA|i``c?dA>WtS}?0ME`=LxuUXo(r2dnbdD;!$dUoNAlwWXieTX&dyq>6qoY6R&aLsYl zP*E*xpG)QC&M6Fv=neZsV(b5q9MK>4%sk^f$(Ewr(z6m8C1{2ik>-MCQNjU245*l{ zkv@=Jne21^WDz3z{*YFRu*wf4Un(Bx*+W(_9nNaI%fKf=WAb}TA*C>X6YVzwr3YK< zwrBfurR`PGUSxniya$6VA7|fQ{m`E3*);jtHB4Qb98!^nVmFlwU>1J)9EOdh{T@3> z7l7M=E`U*L6*;?Ly)KY2eL6VkSY$t>&}4jCYuMP@>N0-51ioP@DuaL8ze9_aZwtH2 zS2eJFaqvh6BMN=v>#P&23uhsZmB5<`*=*MT{n7siinn)}lm|Zg`rp-t8zT`>u{B=l z_GALiSp?63YX%Y;d~Fy;Zm*Jxg!X)-0q&SAdt|}GM%Cy8EB`rHp3qP=hji38?~UZe5A4R9CHpiQrb zJ$9nJ%J#oZR|qO1vXi$8mAeWV_lG(v+Qk;&4Cm4t|Dn(Ui_l=R87h~7Fy7Up`sT#w zSF6<0oPR~t99p5Jtraw8Wg|icEPtBOmPihlqXQ0XDdu_A^A!7rv!q;2aABa4=ajLM zt|q1?j-@lw;vlXWJlj4$zF!ua`g?Bd=XV1J!J<+L07NNQ|Bg?QOx3E7aL!Fyx`=A{ zYM z;!^!2l#VBHEd@Ao*Z()PUH^vmm%ilkVNgZ@4^cc3JDDcT#@vjaEy88U?fi|WaK zYKDJ7KSyNH;1S^73hKplmDYX5X@!&bPIu{^F4|F+lrYoj$0~2|4Y8vGBw^IFO(R3> zD(uLhOD7I4rfWyABupXT#HBLbZh$H3-FR+-q}Xt^xr~M&bylLhM((M}?7UM)b&Bo) z!gfE$J(Z$H4u~K~lFkQLrZ&z?=jnz2kqZc;p0S)?;q4G}uohREu!Nt}Q-s z6s%=kL&JWJoWx#*i-qAXz3e5-L@Q`#Q+dcDlBiSwtb?K)2y<0uAuh&0XUkGee0J-~ z8bs9P*#tRGw_<28!xDk5zOJxEm_Oj#mI-**_Ec#=YLW8;_Q-B-rbDTl-crs>0H8dD z4GjpU>f4WAL!=T zqMZhpZ+_SOad{I{cDAb9$X`go_ftxB)i+IMJ^O{>v1nTMl&kgJjo${7N`9z7O)p_? zsJ6{sr}qT69Jyel*O1SC%YF_fA~A@z9yuvR1GTUpvPl$#er}*3p%uYgDU%)CG{H-2 zD>@&}n%GH^N}*tr+s7BGos5Cs9EMxc&>T83?l}Q5L{{PvVwtWaWI@i~=};sH#YNUC z#0t0?76NHtVohEdm%FYsmhoBiuJaviM#AQ`FL|8bBBb17Ege4{Dj2(UEto++MnlV0 zDQ<=(AL&?f-CxNS`xxQj+W>f{=I{(~DPdS|5xyDp>>pf7@Lw2Y6r$BM@vswmb;u?D zNkI9t%$oHI8&o!Z)8Y@ylIy|+xiy(G{Qv8^UOjbPQCxb2IJ)5}RlR-$9KyBT{XDQd!*j3}?RMGD4Bama1Q2(%?r z?0yvI5?CDl{+8usdr_@E%U(MucHlxIthk6RxsWs7vo~_ri@anpswLo*-1{z8LR!2; zopr#e7sASq_dpv#dqiTlemCEyLz$0PTVlOGe=Tk9UHj@~ z3+#*{BB47)fD_St1Y_6RKM4w*Q#S7P_rRKosO9hLreGR+C4lIVSjlHp1Ux~;f`4)Z z~|=W&jr$-2wShlVw)~x5rI#`YH`;3gJRJPm;R_2ZUuF-MG0m z;6MnXEJT-VYOf{!Apa#7KdC z9&z;F=|Bmx5D+?z>lp|oVIxvSyxY?F<0JRYb9PO)QE8e@3Bpvi^Dl?$sh3E9Vi=}R z=$%loX=ur)Sd6CF@;NOoX~9Hvej3PShEUsr-z1|IO{4{kpB@@ar-A8cGVJeusjqY5 zv^om}C#9PGblvF{JBs%!TadUL%w4W6rq2#o{m^KAwwC3|rti3CzSbVk@pY4L#nh7; zbxDS2*XV2?o$OB^*xQM~HmL{rKRT=ARigQC!gAFhf8fjco*WEiop`-c34`6NDk5X` zT}YzzJy?iLART~gsaAUwAeDsTfcQ#Wd!z~H`dT(D#Wdwp$3T#0*>rv|?1*1s6mS}M zfgb9g8(N%gf8%pq6TUeK{Y$a5iBI+$kSzfe-AbqQ-bgGz&xHY!#;_Qg5cI}>fS1y+ ze;;CYhGXW0{JGW!*KC&ugO`N$p_s$wBHwr?y4PN(ZlIbHA15nS?Eka^pP#lGpDjBB z*K~jnyY1RNW9s@E%do{ReRB;R?6jgg^8rgDuQtt+V!zD?kK92)WRFdq>h(k5;VaWG ziRDTai5sd=&er-wy$gxE5rWi>0dMP64`=SsUEA!37^hk8&x6i%^*$Sswv)V5$?!T6 z@aqPMJ4o=o6S8O8<&7Te>F)RzLhAHDUz6%gT;uC1%!de4SW`aRA+K4f^FSIO2Y-~< zdlwN2;4Cfh5IM0B1J@00BXd9emor*koKg6ZS~IV)i)9CA1QU?%(trKvP>R8&g5`Xv zlOk1CatH^CyB4!rE-SQkhokDFBS2L;|I_o=wA}};*WjL_801vEf3*?%x3VN#z%eE6 zIWPU2F1@E=^}o-td_kn$MdfRmf@q&oz+ANWHHGxzk^A5&oHOX>Nb29JbCx6|-Oe#w`gUw_sU z7L~bUwvql zraBrn%P{>F{B5p)578%NL=ZQ&44G|KCTjD`%mdDHaVHgO6VZfMr z1E7wavrjDadyj!#08aNqnmH+L=sn*Z-vy2%lL1Mtm$kf>sz1EvR6Q>yUuGi4c?iBn zECLqmCR{!D=QGIyjfaK-m$M_jWAhJ1O+W)0CEpweJ!cp;{y@IN!zK97Vz>OitwV zzL7I6h=7%ifY)UVf>c1j8nQ7n>hbE?-R|>cg?cy0#V{(cn9gdX8naAhTz>OK8Yzr3 zj>}_bOBfv>jCT}ahYgmA+Rl~7&ojMdZ+diI1}NT$ zdh!u$M#exQ6Frb9&!C$5$2X!#tPx_8tUI7Tz^(jBn@7p-qZMt`UoSPLBwuzPFhi?C z7AjEy+?5Hiq^klHmq|VT@cSMayd8P#>+8GblA`JkuyoIXVibc52ZpZNH1SvItWN>0 zj1M`sWsDjTKLvRIj8se-vu*5=Alp^Q{m<$F7_6SQ>!qvD zR1(f!Xr%PQ!)p9Ot}P+|hpM*Fy3`q*J;(q`SL8x&;xC2I($= zO-gr3cL+%TbKlSV9sI|fY}Q-V)CQ|VXIw7~+UOJ8oJf2E?$M#}3i7X8`@UdyBX*y>U-J?)ltc)gweyY}`l!wM;N zJtn(A!_I6Y-5`Pk^3&!e%plXBX$Q-(_Zyi(-A%;E2@??s-P+VopSBi*QRfI$2G5qU z8*Y{ok+copBXGi0228w-;8Xs~f;(&>tTA!_Du}%#NN6n4GiG3$BD{e=S9M1!F`xMW z_H~e3;&rPX0=k-ze5q}2XI9~3F)7T8ePXyFZt^^ z6-7?^Q%v9K2$&3k*F~?>r(K}aPe)*ruJHf0`JmimLD!v>9Xv7u9Eh@59-n8c)@bs4 z*nknpu;iDCg{aECaw-f3&5Mjld=&hxj5|-r;;%puqkA=K?oTiHbQRALPmO8iyPn9l zSELCy8&POk*}Z6WI>g=iF3l~x#pD`|jos{cV_@(*e0=rc(1pO?ZiDDS=XTh$09Ab2 zxi^bWMC2G^3rLzJ@m{3ZsW;oC!E10H5j?Pa-GkWEMlvRY@;Pnyb_p27gYF@97$nO9 zsJrl8mT3#-C|-|)ShEzNLVFtyGY4?b!eCD+m`-MeY`&NXpW`{T$Xjy%%?9kBBCH3@(d5;m2FPuLlrln~T zsL~$)0q(acvlTjNsgI~`bLm39E#tBt#13JST2dCy?f68&&=cQj-c>&dnoZoPSazZN5_U2#Nr!y0_T^i-}oU=c!gDV^XsNO8ALX2d7!=P84f z??X|Jw|a+T$$h~9abjrvN=}C|RX8Cd0lJvl5R;W!E!taKvpIY=s&(W!LX{moSIQn_ zpK>$Xs`%)w@cQ_ z!i@*Ul#y$xEws^ZLX!FPD3QCc!Saqlr@=On%WhGc8(Y~a_j3b9?T1n~#)PaadXR4) zzYOS^K|XC9*3&etGa6u}x@nj6*9mGzS}*Z4@Opnr3YSS2uj11!bRc>4KXB99I)jVz z#-lyU6k^0!Yic=cUR%Zctvwc2lx-yE;^f7S-{A7QjY@}lPnoTx<}%aD=3R246&`Wo zlQq3h?`}8Wz{$PK;5BKY80DoB-obI=%lsaJc)+iO)g}+4aMF4zmP;^3&I5;@b zw|3{7R2Lb%eS!?>eeRo+=MZ~eq;i7+pK8XXTUyiod5KpAtGS9R6le{$9A=#oncJhu z-Ugq8k31KuesIzPA!w z?kX%@auHK<1~-0dI|3wvq5FO9wnj}X&4N+D76oqp|z3{Pv%_x>Aj>g!|atm4&I z>I`s7!S_d*i&*8AfmkC%I{AVi%@Eo%qvXP;0H`H!3#v^SaurlD;{XuYy6nd21+D7p zm*XXOEh##pwhn8aa}oIriv`7u;+Q<>3Z`1|BGH^Nm$_Zvo2-4Dnzg~wuz(`&=`X*P zX-{UEc`cj=XtA2u^R*dXWDnbRLT#L9Cpw=Nx=plK00repZ#?B^Go&*S454iL;c1dm zZ1|`*1Sv4`)!={-KRP;Rn_X|6Ocq6FdqHGs*Zkn+2F9K^EAUYaZ34?GTV#a*cf5(e ztEC0daJPY{^lRJ51O*P)12x{;tuM!);<%V?Gpd2o%5nyx30R>j|I_az4HkKc)KNRh z-CVPkcw*DC+)E$D4r!zgRj_*vZYglwu71Aw5ATY-!bv2NS5peED`3zi1a-OBo^VHV zM=Fhtt{7oFpR9FuW_v9TO;W`+I2L?NV$^GPDuS}0gSfXmd^hp`fItM(#)WsLV+k_c zce}#Rv?jk|YS#72n~kbYuoZa7QX~FT4*usP1@4q-HbL6f=H=DGoS)nl@qGlaK`&!? zn{S<-f@YGi5fQBH?B7=HCo}H4TbrC#xu-5GQduuY&Ia=`4l*aJ?p7piQj&l%Jon%Z zsKNie0?kkCMX~T>sc|2wiK(cBhO+ zu45bQQ#;0&v#x(r^7K7U_$wqGJ>C?1icRTxde)x(4li2kTML{0(p;7=VxH~Pw#BG8 z<$GUOYdREZF?4P3Jx>e&a^qU{C#(t_IdBt;>GJw>XUbnL7(lF>ryC83pMxfcbU~Em zg$%Cb{d`h_Leu+m6pBJBH=jwrjTaNGrOz(5I99`>DGw#DXJ-O*r6YYRD z%VEiVe^NV+*ejXPb^5iHEjpk24}26-FC=fKXMmkoPV-Vm5H|W( zxX#AUG2UH5PmoHz%3s8DfAwo!CMp6NC=i@UF#+I1ti+sNOwM27zEPf@*pUpy(e=~-vD)G%3R zq1Y-1lc)YiodZByo?s&TgQNK$H7N|rpg<$w`8WesQlluXP}S0U@4lp*Y1Y?#@wBl| zF8u;3enAh0yu`lAU&k7zsAXEU^Q4t)g+dLJW~eSnr+ng{ocfY6jBVI3eCQ4QKklq} z1ruIg31ssSs}qN}F#GSHvh6mkeYa4H^ps;MKJBQT{;*+K@p?W$v^q@pRW-#$wcG@P zz)pqX;9FN7L9;-#Syn2gpw9tPLJ-^dES}@ zaJ?eAe7dW);zqJW(pI--5xqkty`PF$=ft}^60#f?>bVWF;?&=G; z2@PIz>S2;W;CG|{jZ5Ox8$Iq9=P^07oxg}KvNAIIx=!`2`fcLVMOx?JxZic1%EAXT zV^}H}ComakR#X~V2yh>Pmy%0m3jpR7N^77${-&5Ggd^#uSt1SPbOeTghAAl|y=Ijb zD6cZp9@=ld4^ulUW$+`xj;&t2+EAk+8^`b{zf=_l?tQC@rO^uJ(}8n`Uv(s2UC6Jd zDO3p*$Xwt35uChiyEIWiT^|@xrl7QVx?x)%3;(vHZ84C{yxQ>biF-?AK|6s&l}Vl6 zozsELSxPZsv;2FJ>iV5g)T?Cy3sVj@8yYy4WG)u`AZi4q^wgBd0IKQOHzcds6yuyE zBdTcz7{tSVxsMc~_MI5lp>7WWQi8y3vvWj%T;`0QnM(oT+X>5!-p=bz<4f9PFgy*4 zA`z1o=X%xpT5im9R&*dbz63EILbPBJrddSd^#T=-6hq)lWYM1e3Yh5+dhX`D)inL+ z=GMTVRV}jE=wR>|UGNV2g#K?Mrshs|Vn!~Yf4Vk9xVS13^}}(`>$?@VaHB}&rzS~1 zjzqlk3DKSW8+tSz=a7YyvczVL@;!!R4;N~c7a~J@yu)VpF*F$;u8Ui+Oaj#qWX}YC zmnd%*Ow?2yP{a!u8>~i|%{Y$>Hp>3)2TlnM*>0b1J6ru94-JZJ<|>^4-6Z7@H*uY3 zTJX2UpBWA%tn5Kx&$k8LK!MQ_sSJ{g+5C7=R+YM8B?^U42mpMd!tJJbr4Ni1e&K+G zk-E0~RgO!caO#a|szF45tC z?4e!9q@40;|LIn@+-xp4T_;O3{xfojaFs`#i9LLPWV*K0H*PUBtC5ddfSG~YB01Z* z0eiUOeq*|ZQ>DxsQFi(205oJiz`KdHI|=ZL^oz4HSAV}mL)+9~ zV0`bg&QD9G`{TyEn0LHAlBqn$cY5k5Z_?|6a{cbCyyqz!B7dNg$v@Q&GkpP?umz!N z_3w5+1~&YSYIOe6ck%9CZu%ELvC zhBxW}U+XjoK#4%T{j;flF;{VmWuPnbp;rk$J?3yyBhG$GEE7-gHe!?W#-RS=cN;6~ z6X!ggcIBrg^Rgp{G4)%fPAv@As$&%8{--xIk$C8sgw8u7tm8C{NkQf+(JDmtH~1K zfGbg3NNX$DSLpz+sgf)7)LoZmc2j{_!TEs$poMR|cIUD7-DR2iO)GCf|8MXz7;KWN zN5B&O;4t4OLD6$shYkIY{>>T#%*)Blm_Lt$?!u|&f?mI}cNCnzcl%>JM=nUxj}PJV zyApd=qPE?4q#wKC@7}!K&81!PlYZ!SX63Ief~<^Y4lmaY$Qae7nxlOx@78fhltx7j z?*iNQDlHqK@u3gspYEEbp?i(FNi!&e66%ea!|M z(wdQ4t4-v1zkih5U*?(m5S*3vDMsPazd}jXS%0{o4UX6OgpQVjy{S$|2Y>!Y^K!fO z7oR3Z&1$MNl?!MWTsyY|FoM)b?Dwf=UPZqPrG`Op{pM$53)r@h6%+sK1q-KnBc=l4 z@DdCF@~L4Kusi|jZrH4PsJ15fQBn8josY*3%Hgrc%|c)*E4lh_(LEgpZTpD$i*=Zx zhaaVo_mucqqax)+Q!q0bkA5%$aZX=JG>>7<6u!W8@F_M?^Ew4a0h2+Tf*yoZk4MU@ z{kHM%s@rfn#r^wm2pMylu=H4CEbEVQpFgg*8^7owgd@3$GxBjyn^IPms{-+|-;WQ5 zzHGJHFf&eZ{qXsrZg^iI9-kO{Pi!eQCXMF*PRmWp8wUHed5pju{u~D^+F9c6r%+-@ z&;VzgfZ5B;eDprHi>KT7N5hI_@M0vCPY*A@Rr`PY2Q<*)3x4t|V_1U8&YkoROhh7Z zytBNkOm*8Di7Rd>n5g%Uruo?}JvDc~gqS6%RADHAwP7}*bSkrf{?IHrPdEb15LjGm z?KhN8tb>@tB&dgI2MDN>MSJTlZNhBzAI-w|e_i*C_DTdzbDN3HZ4NKivkaD3%_V}7#sqnz*O*Znccq=zO7 z116p7Pq^uEj@Erec3DFo+gqeNGN#LyeVTo@C2tqA2MUt2p+)4#RkZJA1H{@k_rB*P z>8pa|KV&>lHAQN8D_(~cTuB73|E-X6J*nm8pU&g`=izt7>C3vxTb=`= z@82ZnrOWc2L0E~7>o*MviEqgpyCuRBna53gc^B@7hqA1q+7l9>!@MN=?>Xe1kMm+@t$P* zOP3S}awHcFNg^eaL|>$*_#~u-vC%OCtk+gdyM!K(0~bZJl=HrC;C3~bf$+07(D4vm zi!$lb@d5F0hZ_oA+YBNL^GeAN803b!UY!sq*w&KqVHdieK=>b!6iB69N`CTJ!G1;e zX%g_7)y`v*48!Nc%1UA7iM2WuPFAV~we`4Yy;=Q@i4vtV~ zt%>n3HjNNp8?NH3It>9Xc}aXkS%^UjfO(k?QZ)gi&2af9YFy~P960-T_#lNj_hCv6 zf;2hn!`iyK<5X2`ZS8H-b7Qr5V!@Hw?L)|ndZ=<5cuzqY2#~=uLNSDqmW4u_E6}T@ zrQZL?b)|?5PUUh8KD*h%Mary^Y%w!Ymt#Q^@Gl-J6v(Lun*5yp5pibMfL-jXzn%|F ziOYl(yK~<)h3Z_rdA-aL9W)5wx6*^7EwZ`FeLh66>eUI}4JWD!MOfhx9tb^m-<$5U zV`NE{{w>_xVKlqmlQXq2H-Xn}suOsb&O!%&h?Yo9$;4mDe4nq~)0er$7vkd>E9t^x zQ;@r4hv=G%Iyr1d@fo~T+4E-5*Hcsve^0gMC-5#%eaS#>|| zuVKcf^{s)z+b+;m8ik_+1I_sScOU7iVMI!SlZ>)`=cAi;t*H#+R`H91{&cqJQUw*2K#G6myS*HDE{%F2RY^(lt zu5+eCTu6>mR3=kxoMqd8Oh(WWhKh+Y;BRnINoaXjyi{~TV}nNMo4*kDHwWIJgw&Fjl0;Z&dc@5~rs9FgyD;YhWA zS}GS+Ot!i^pd{!5$BUqnNS8uDe^{2B>wC2w&t(B4>EVqQ-Ucj2fb@Ofe=Xq3%OBRrJl|2BI~zGVm7dyMD&#rC3tC$lXQ`?Cy$?q}3Nwx}(ofEs3)gVH z_&daQQ-r`ZvXVz?NIfoKQbEr}8rgihWPEbikPa)c+`2s=o=>?1L)A}p6P3_|Yl3na zz4aZ3V5{!<=PaAf>+4`peN!Y6pqqTy9QGe8TX3Imly(W1xIRmK1||^%FZV*Pay6x9f%`R z&sDNYBt^{dm#eFKwrIY0Cv8P~Nh#(|t^bWRN?*TK z`^2D5efE|+JaRApg=@BT!)nW#ZM&JmLEUc}KTYi&s$85MzcUEc;kGljQGwbGwC+D} z{sXZ9+xfxhbOuH|Fv5=Cr6oe@WE>@B@L=Bnbio~>wxLhN{P+`8Gt|6LRRvxNYQ@|U zJw%IYN80(sWwIMWNigwJiTxB z^xSd7=YC9d7F&1R7ET`gvKMisI(r+Zb&;+@V%AlB$z77?s<(F6%8_#;-b~_LGolM78l0`iNt<9rJLV!MAgMC$to zN>!%Dr4R92ZYR{vk}>FrvejdF$ANG9>UT-nh_iVDu_b-}UBSqmT^i4!)d+}!k+hyqeavhT?$XpOEug+2#~ebSc30(FWW;?MMz(Z1t9qOVfA3#A|s!v%8-hPp(Kcq zhM;p95-c?>t*?G&o^RsTu8n%m=H|OM*K93opg*oWp)AX2c0SGV+#T(0y=lwXpOKpd zJA3ka^=!@{9Gny0cs1xx*s`M-ZprEdkqS<~C2*b{+|{wD{GTrKwkumtoS5lL1OS!` z+x<-jQwiY*E-aPo58P+dW4*{Ew&mt=Asspc+eKrB*QWhTXNjE~>}UOk3@t`oxMq~o zE6ZhX9*uM?Tj`61O#iufA3SgUjhq(DL$E}(x6a<87fQv;YG`O^!O@lnzJmT6_AhCavA8W&7i+Mn zirU)evCMXkJe`hcf^=TK45mRQ=_<Dp{xQvvn*z%7-ihKDvPaqI-HTxZ# zX|;Sd%j#CJz{J?x5zVNv8|@-wqof?8cWH zbPBNu4?EkDN;{$tK}jIQ&h^R&Z$My|NF)}OtTAsz)vz@w#!owQ^bg!?IV0``9#q+S zDg$lRb-&AdJkE#VFH*_m(fevTYxR*j0v~F8Q*~$CK9`Q!S!L_1`kAA5Y|3YP-P=EnGB#?&HPZ;N(qEVf&c|lPlIR^ zFlW9rUg)Ymh7twZQ)Y|^!Z6(F>}9LeG5-dv!>yAyuCRczh}BarpP`SJZF4cs~S z38dW|P2+RfJW$mxrQy_?#{ueICyz&lab$$iSgoWUcdjKQcHMtF%&`NBo1(rPTRW=U(o z^W;oGbdlO^8*gz!d+ zK*B)(a)iCr_N?2WWl8&oovWFr*Eb7w7T!xX8zO^_a;JfIR~Gq;|1>PwM>h$I zxL`!i-TfkP7=$|ApT@F}WJP`Z6-zJ-k0FqcAR{57rXxWr0Y1AAkx5#L$M;JdDOs7^ z9BWy|$IVT`KF57@>*727v&ryM+ZB%kriHhwuH{&FOhWJIt!#h&otll^UQGQc_t^R> zyfu%8hw-h$Je7_Uwri~XH+lu4W-O|w_BIgSgt%lNAxkR4=d$;@bm5DMce(qj=lgPW zjpz@2_aQH!DC9O+Bbjm14BUq9bqeTQPAZ#WQO*B4qITBUgwjR+tzgk^+?YN;J(`VX zawe*&Re}Rfi3q@=N_pw_%C+c&46Xgn`H^jguzTW>aP=C>Vhi=l)fQN-pUtihCi@Vr zF!U?k3<6Y1{=;dblSQ#v$5m9T)^+SUj<#3++J>Z`WqA}$H+K)eg?ZLdeWGJoG94xU zt@z4GQBw7CY2?wyZ8nr`k6H1ymZeCnuJ|JJ-jC9t<3972H;u1!!e^g<-lE!;NJz+0 zIbFxCC_G2*gs{(tnidZJr)XF<$?OU*q*;FLXCcP)?_X|%+EVzDzU+JQBnj+aQTg}Q zQ5X*gzhX?nBOoU!U%DuNC$1b_aT>m>U9#)8@bk-EIONumj3=gT_r6LV`Z9if77%}s zTs!KIecmt)Osj~mKGr6&SOM8hWA-z2#liP`>=oURa?sCCKoVHxLhzVk7nn4vfM4vd2f{|{Etv`$`m<=djsEf=y+&I2GvQUpPH)zIN& zlhx42N@t_NHQrPRZnxBNAp^{p)jc7ipI&%mP*g+u*6GbqXA9!uZN!Nn3F0KkW|Rlj zN3%;lXD!n@tpc1FPTXm0ceRH_r@fu$juHO`3|kH>xJ|DVPw1v_wXOE+%^n`MRmwzz zBPub~g=@X)8Cr~;WHG6DCLT?*u50g>hvA*ykp{4c^em&~M1OTo<2&&QiE+IvAbKQ| zD$p+>xyvWYMY_ol!M8h$CnOm(L{bTENeCw0NY&SvyOtwJq9c{D9Os&!G;q5{wo0AR z1+FEw9SHuz<d5dZ^ej}d6T}&2HPoq;uM>l} z(sQdE;q#H-$zm7L$e~MM{_|H%i^O?sFm`ACy6h7NYbV(^U5lTF%L5}fX3#za`mr+z?j&3dozATcl z@6>nvhNmOx=;5M7GN65jykn^oCOGnB7MwGUvjT~{q1<-wkLxo@r2jU%0~jD&XVyzf zl1{-t?fDnAq!hgGLSh09G}Dd~SbyVQ#f?8HPhya;ETPtLhB^hGr5ngTnp`dWe*xIfAy!k7XkFQ)6NL%Co^lk-p+D`-u<$W zbN=yA@*=~@PeXiPk9$hzw{a&*_@~qk{qfgKjSdU+r^G_aUKBGi_;5qg$?KcI2k8?v zDjOTyw?NNsj6C3p?4ctVODyDVqpM;0YHkMG%pJ!rZ2GrjkJo{zYj*gk&0-Z0|&Q3gv=tA+4d?1t_S> zDuzK7l(Ib;AsB6oRr`LVTutc0sFbSV9V;R=hgrviWW~O71O8OT=oyl)d1h34JH$=< z2u>wC8JstYg!%-hzua~wFVwcj;`0d>7ZH90N<8 z1(N?X!(bX~rXmQ3CtPN~sU#H5V^W5zr5evx{9-@mvOiZvZZT<3Bh=bFuIYaTt z+cw+F_EQhPuY@dXGw9nm>2+dfw8<2bF9i3vxt5GM+4916S}mhvJ|(5-%N-D?Q1rI3 zgvil#z3s=N{MQ$F(m8S5XdGa^phrosta44y*x3(4PTSHtWIrLsuECPB_E6al;KV*S z+EtSyo+nE)!)_4bln2o&%DyQ;Si#1ft$<3aY3l+U%A4|fok4v|*lZ6z8ba&ca^wuIj zy?}cp@%{8E$6xd>Stf~$4e)H?*Y{^V+tGFjPSC(UB6(OYOXBpBmlk*AJpK1MgWs=d zE05>hazbDI)XqcN%$KT!qwk|_WnF|OaKF2FrNsK|iivlqHSLt*FLRy7EXGXk%+L*f z2F5sT;UOLSUi1n%SV{$DdnIhYwew!jl@V)SU#>I^^v9y|s8k9U9<0vX>Wj3!4E~z- z(|!ixgWrM3q{3tr`3Txv=6FKw23zgWN{6%c?$v`tV6Dn@Ks2zM@5PamV zPzFXEaqRJB-1dW}zGu;Ttk98vIdr&v6L|5zkA6Fc->imUEk90R@@vM=acSl&*j=-8 z1O1hPssQ4^pp_O7sxy(c&WWG2&V2kT=o{)}%R$0MZvfNb zv}iRNtr|K$d^!npBO{8O-NpFZ3jF|9* zg%)s?nCKF!U%oFUX+*1abea=hRCX5zP^*B{Ur{217yLrOPwWG`lXo>Aq)QabiR%z# z0_WbMf0jA`+(c9oE-h+#ywe%g4Gh2hwx_#8a8$tI6|tIeyndsCm7%{A99&!D4CGJY z4_iRLrtt?N2vgwax|%z3A`sc1Zk@Gj3tFU$PGB0p`qW=@Rx0!c7J6$#dvb`^D=$;? zLB&Om#F`zShIgEs;v8zT^?hY|d|@;hPJ1EC4XzGtElQorpg>Zx@f^-oycwt;$ZqT@j()Wq;z7Qr*oAQPEib%!G zDl#-bm!E#t5s+f7<+lG%EaH(20r7;bYOg?w3G!d;;5)={vSPGy$-m=v7jjet1B zYQSr;!i$`dH806~AZ?#xHdu;r5!!9Jm`{X!2hfqd$=OEs4-xz5_vWfEyk8TY1*VXe zPpx8m@LhMgU-t_cMzcj?l}qz_vJJEyPfPN-*)ntcA4)n$c>A`PWS>-FTn&cnU%aw!F9kgxlouedq)ygPmIS9{|tolGy(i{fi7ZTew&C*9} z3n&uRGadr90$=57D$*v?tI`*|1t@9zrDBgKG_Day8U%Dz)$ki0dfYc1t)PNiPCz^f?d9E*V^zO%w|Y3l|$w?38na?<}!{ zI{Vjh#YAwBn{~n%I9P_=xbsm2DH~WDRi2r*B=l~9Y~+q$R=SwBN>(ANf~GW4L-p6V z0dYXC)vw%1nLMn3w;Q zPwwD`jM}u@$%Fb2z6uixI3T|nO!y~%fr>r70Z)nSL^vO7*LZhx_&!ZPGg}m2cL417 z1e~^FxmP2G+*JN42hUJ|m1q6B;m`*Ks9pl75H-HbXKWI^i+uhyKw+eg=Jz3prv>=@ z!s$%*7cyU!g2xpFeOk8;PPOC9LauCEX*{QDD+GDbja*8-H}?G+Zi`zAX2wiPJ+*Bwi5r zw+pcLFdx1gCvOOcb}1q$r8ej?V~v$dkV&KxhnnkwiGVcgK6Y4IWm@q6ZcN~ zI*ak|($3Hx2M*ff1Z5=$h^rM)_Iw1lOa%isz3d26qefv^XDG%;ynHlG_a(d#GbP++ z0zz~PjEpjPf)1*9EjGXS34`GJ;i5lM4&MlsC;!IOnGQNLW93HNveqSgJoQ_op4wu~ zm34Enm(|DQwDzUoGG~$Abu84G?z>l3dF1uCYqorw@X=nLa?ZCU@d^Wv0*8+2b9Z1M z%x6~`cW3t#u`#|!vH@pUVO!^`z34|J5~o3Oe-W<$N>(IizP_)=!`D(eibD^gIpZbX z0sN+2W#${+)yW>RU>;|a@mxV1V)mDquPI2SA(z*h zp-4)yNdH7Hp~7;YB&V6T&lm6}Nvoxgr+fz--`BgmvWE)(SNI@e|4lt~HkF~*s&hUh zZA~(Q&8_MtR%Yl@NZQ6v1{K&E$;)k?@jfn;Ld+&=HjZ+i&u3C-y4{qt5a9}7>ABA` zxE*o{`Q6IjlpvNF60Dy8>2+X@fnU!9IS7WqDzmkq`&z?DoIYopY6Jun=~1KY-w{~l zZqEm0lQ}pcH5)8$3A~_DLtlCu;<0}OXz9%lcXOR_C532F0ptl<0RKwTx}ngPEc_r7 z!hf~1bShJcoN$5C>HB--88%?ux@nzp`D-k1aP0_Lc+S$vizx9cAA?+(_4u+mm-FID z^HmP#bnk-)zU~y%$APR#7Z2LhjZ9`n8{#Gdx6~Wb8<-#84K->|9}eXveI5`rKb6O@ zEAQm6D>!v7>XkDtAdt#lU+ppLgvFwa=;^}mQtiBhWH%;LAZ3U&d#`+DyE9bKNnrXn zIrOv#?H2G`sDE^1YO9om~V^{_G`ok{vFFgv_);nxZpKj>OGyjgk8@idywe$ z5m?64XzIH*bFdn!-edzo){=w!-0}}YU?v_$AnLdYLI9w4M5CC1 zM%g#-^tA3XCuMa?UHFF6e$EuG{wNwFf8eTvW2?#q%n+k`b~=IUj$jX^ZQ*}-@mFYd z{c0~Yi7FYH#L{RzoLW(9xPis&7)444S2CEf%w}5h2L|d{kMofC%#UcsJoi zalmOo{^z>psiN&9MXGUplWK{J88)5zw3<5HWWV*D#T&ne#PX)YohZ9-vlG`TWo)y- zwP(!w)2W@&9L8}W#;m8avZ$-K^=S|*1`Zwb(@I}Si9t!N%?VH?yD;C zOAel4R1blIxdfEgp=`U{OADQ8S!6j($I;E z(yLZ)<7z0m9L}nb;j#Zh_$B2zTyK;ojckg2J=*U{aoYLZVQA&Upt96px1DKI2@DZB zJC@~+O<3cUNC4DwA)Od6{4~!`=mcQ7K+ktS2+H_&Ou#B}8zekI@p(-?_Y*?;xdW;H zx=|;J|3=X5Tme?DKBLp;nU=`xnUczBwvO{w%22<|Qpmfr4e^RpK9E#2``vLjkk3MiidrkzzCZ3F?x@ShWtaLT z7Uc-PW*i%qndSQoINmEgqJyw6pY^A3dGkmXm64oM&*6j1q%EVld^~WG^Mn^C8kNug zmc$nwN>>LP*tX7e`0``FIi)mcHLQJ0^PYI33=*=vHLvk%Yfy^Bml6qsIA+MP+TavN zQ+Feg0+6RlF4(yo_xM&%?f{=#Km`GbCaCL2Ob9&V=hc@DQ$21lk$&iMW`+g6-~$se z2Aw^?2qHNsrbgX+pIq6uW^v7D%pO}*m@6dfc{&lw0DJssGPc$1SSHsYL)!hjSBykt z(#4uZ+ff+Kg4T?(I`YyEm;)N8R|bM&b zxjRRbo;*pRL2mw>)nJ=6o?4zPmhGU_i-Fn-tMNkJ>R@&ShRJR|&X5_t(-ydlE!dl- zl8VCMIbHe}zSVEMeNQ4?w~eA2Mk6Cv#c08_M9K-dbwkteW@y5Yz8!nb8t&`f za>cZNZ6LhKh@SN}myfFK>0W<<|7!G`zPi)mYv57t_NCrr3^_A8J&jU`1u1cv7s55D zOpPzfd-U~m%wey^RS2V!(CkhOADQc+OIR9}`e4z`MlL9X>#GpyZXKe*bTD%{W#9-z z0yv=2!6Tt2IFDseULDSX42oP6+j(cazL!EYCd)O_85tQt$A#5STR|vHXXrdJp=-5w zAS@@mQ3WA_2l`HuDwP0bgS{}Pw~-G7ts^=SEz zY|woM+;n*c5D*}x1vuSB9Ex=Hk~>9(Qa6N2Qf?%DUj84|LqJQjjYPcmTtnCXV+z>t zBJO@mrtXiX5nTgD^GVyoX>vc0!<W=3>+#_ zvUF*W0G2MFOLJY!^NpEk$WAwlRO|HLL9|7dVrC(NN+L+IIEE{A(%XbUQbU3vl!Nni zHv#6d5!gZ4l>p?qm@2Isw=WWo)@*Bt-mujS6%=Bc`ndj5}E!+MYv%7xVhI z0Go3Hk^S{tB%{!?xB9KbWUH(ADg9Euno5(7=s|xiff5D4 zXjzR%zw2avc(Bnat2_||I6qYvwEdWZPWaG3A34q#4=gIYuC1Xj(4d;YwXYj5Le*J- z7XiD)r(D#zayIDM8-uei-6Tl(wnDGGx!gp=?>4mTd5L+0hM>A|grs@wErxnVF6HFh z80$7cOI(815Q?svs52fWYTQ6(L1JlRu93{4_!Wn$a5azWs@2t*xzWcAezRKwW&4s~ zG))M_b^kG4eC7W67T?NqtXbI(s&5`7*ZVg#@I^fpid2HhifKYDuGD#3f z@e6O5!(~q?p0Im?0<=_dMTgZLB0L+T)%#P$-erc4O%J((-fhQdz3=*zUmAbYYjs;C zJK{+vb{Mx(K>+g@X~zFXY18!1f3xXk9S*q;gGs|#(`oh@`zmt}S+*VZ8)XKQ-xqh@ zacyNMc{iSZNm*!?@g^!|bEJ}mxWBKlHr^e}Ny^P7o`oIT={<$hL?$cidfOynt$*xgi=C>sD0N+6m@ZIv4c1%j%y$zy+B=G@h_$dCT`##`jK4 z(B?XP^5W8y_H(*-F!7`5Rx1fsT>4fs62@TA5H{7RCH|NrOA{BdR~YL3Zkh5xfL(`M z`#FjBWt}&q!-?w0F2v?9y7TF@>TG=a>%BLZuY|2x|A?C9n;$mV<{{k6d$de&lZ>hI zh$&?{6S{t%uz+)uPH@&&4d)_dnj8_DlP2XzKg*tY;TuRPn)6tI>ic)&P@7=-Hc+T> zSqy(m&fMItF7sPm3?Zi&^~dbjjlsAWBti^4$%JK+XR}yi>k+O8i}hvhV=o$J55U&4 ze8u}IV5!aXvdspzyUwWhMf~eh2zPmP3a+1R4loBG=5<;1-0k09oGAq*g9c6=iUESM zF|$nX&*sH?`|p&(u*NXSd)X6BAUGJ z)6*aQHY*45MfFnkt;AbBN~rPWJQoffxFlzwK-i2?-iRQ;HeGHS>{ppkaG6r4^chl( z);Hyk4N5f58XT=TKoF{ ze`I}SSXN!PwSF(%mH`ARyh{-JR0XjR?{u0+P}p-5uY$^?A>E&ih^b z@Vf5ppS{;wbIm#C7-L=r)HQEo3S-m~1oUn-E>OYa4xd3k6xv6KEJ`S(Q#xr$2u#v1 z#aLyhi7Xxtb8S{~dc#c6#!n<(Y%S?oA7lngC$hdhY zn(9VHYF~+q6N|S8W0SEI)IybKLXaw9@r7Rlt`*ubN%2cVKf`^lx1EQ$!O*a$AFKm^ zt&Xp7_DGtk6fkNxxeH(^sM1u$EQ5V*0+c|%%ttf0YC!fbGCU))Zaf8lx6CL`G|2C; z-mlSe;l=2)0CDgu*KU-rwwVsaQWbc#4?+$hv8KL=wDZXCA!cKGDwaw8C!v1|%Ah=O z)5x}_IE5a@nX{Gd#!`^(m`j(PrTq$*C_W-YeLVG!`&K>{#{IQxzkb;K<~!Dj01s-v z?J09~QI8;K@SiNgXMFx%rGG^iMC`uD)C}O8p)F27?2am9q+gQe0=A1Ssz-6S-fLp- zNx_ol9KI3>LePWmM$(AIR9jzmS!2S&9sh$c`-9yT>c|jhNhY0;(i-R;An@=ph=AXt z$Kw?!2GawVk#|~r)fO}wZT+kGslVpXQu@rx<2gQbk9`3K=g_wLbx4U_9bJQC1*gN8 zWbp9gl=jk9KNTrF^up-Mku`YqTs7zPT|8$irx&&=yuIbwAC|^s#F8c%VTEq(e$n$C zvNGG$g8wIs`q7T(|jl2FVuSK0pW{UFW<7RWUDA6Xt)~9s%!QR32U5emg zIonJx-n^cx%1~^|UXHB~U-VEA0ep;;r=-XKxxK8>xwJ9EyV;gx*Q^bjkp+&d}4Q(cVW zEplkfIJ4f6svK>{)Aiq;66+QIeNO|Tih6{=n%69a1CuFt1{<-O9>Wa{C5+v(VV1Fe zU`37jovvfvxZbl)MN<^9_Xr4G1|RRn9!2q^ASSA@U;qqDX%su;_-+7jTxw%Alct?o z$>?DHV9|a3NAu}09x}VdE}S*qdnXwGH!MeC17C7z=SlME24!QqI<3Oly~e~-@Vbm? zPdhz+FxV_IO)phTw|HD-7f?vQFC?_qqP0_hKxWLJM$I-H zH}jU#e;)&@NzY+K%waCW5Q8l>J@z}JOfQ2LhAIljVH2VkLz98Whjv&SJ{#!Kc{h}# z3Qss0l-hReO`L~duCSW)1=sE0D-!w5UW1=xc-B|ua^bn=HH+KpKeM5e+487)RNJ1v z5%R0K)tNP)P(Ht@ae7QOTAZ7OSy1X_*9SRie21soPzZ)dyG?p|a@->k??*GkyIM0?DC_T- zAQZN&b=)uynePDPlotS#P^l|{Kxu;Ye_2Cs?N`my4)Zx-JMV``! zy+}hP=qXj4}59t=8z*#CssR!3qt)P^O|#8Y+Q=*T3!iG8@gl_0?M(gsPz^ex>B zB`JK61mwI0NkGBd75UWBQa&Z42fld7^fSG~FO2wP;?=CIK1g`{vTxRiMadd!<=qD@ zw>HmKnowA4QJ`-`Q@xam4qN6sVzLg~UsJ|k$?&MzMP$~hyfbI(E)Bh{}R`cz{_xzgWua0EqhojqOn~IlSW<1qu`mKzmdP269Z`yym zah+Y?y{%9S-6@a5S1A6Ogd)=0pG?^5&gWQG@xJ@ZoygY(V?3u(8IEEir&N)s0B*-5 zn?)?#`sT6Z#5Gr+j{7E4nFIsm@y}f6cU!p59!2F009+f?@bqhwOIM?yT2iOy^scD` z+{Wxrp5^5+khS_kz~i{L!vxxL-dCT<9Z$9qPeN)|0>R~GjRYdh0^K)q|jrsI?) zl<8jkyrEwAIiU!J)>ts?Y=8deyznlVOPN5#=a$f`;`A5oc7~AY_+r%dDMX*?zmFb9 z#xqn)L;j?$-IDH*ex@~1t(G>)zT|(Tia-_yOcb9|a4g;U#ZcW0;*mLa^3SRV^pJ`o zGZ=LYg=R1=ojPX>#s~H}Zj6ZZt)F7KX}F{NAZh^E$C{%M#PVMk&5^VhX5Y(t zotNnc+Jpz9cjmvwV7khjDwM&UZLrbqsf6qXlIj*XhOaA^8U=N3=kM&w=n15&{;)oY zKnyB0{`6dSgGvvdQ>&q_1fVvsbHoiQyfFEC|wg4tFA>%WpP> zyuz#(5rqX}0xd5*R*4O-P^VGuSG~2EQ4iw1s(8s~rDPLW@z|7tI*t@@^Qt$#w(+L& z<#n!`-`;vd$96>DY0l|5i)o}w3TJ57oCII?XHh?fwwWefV^3Lpd5`xnmma(D@#KeD zcwzgG;(@T%4(wTV6*SeKALnA^%7PUVVD`D6!ufIqcmHU%!QNtVaN)0sB5-6|z?Ilm zMk8!$nMoVW21b)|#9gqV^OR_gTWUJh!aUj72_ui~+8@a3fVrn_k0T>bFM5#GeN^@?GarZ*y4lN=1(hN{mbpJ!0VsdYbiojlCF zX4kstfZ0r4a-R*-<+AR8Ip9oWNOw3fnHtd1v^#+@<59cos9BO$hVNEs2btlZvpGZx z0^vh^eYxU-8S*Rx7qjZDoA^QTkMY=tM_>dtaDOTB3%a!?6cZVA#fTF@6ZICOFCr)# znChPl=|3?4=YD|Hz#GdM@7h(|^VWYTYz@%Yo+rtP>F15$!f;gY!fEDiO)H5D$KZrI zZB~|l#EV-NR zJ*w|A?pqN1!jYiaC6vIsK9s9dgCk>HLBdg`(4$ao4^TN*kEl+hjbYBomSi8xr4~M;7z@bP_W6696aKO zS(fk|M_l>{;2Poszh_;r+rO*HDGMaD@9 znpAZBi0(WG~^6I_R{E(r%eg zCkAfmy>d-;3fm-6^PFi8UC8V^xLb2FATlz2k_E4hyKix-9x*c@MXW}AI#($wlo=4Z zi_af3et=1x@!0=82v)VzIvW@BO(p*ah0MWN&{XPQ5eZ&DitRRY>Nn(irxudIX}^Ha z@qvcc)XdC{mR2xqJ5FBhTmrcEU~4k8*f6vU5cUDiV-&jB;sT{i#zC8W)Hp5KpJw__0eB&Q4bnF9!vs zI%s&;WtFJ&vqG2IU)-+)-99w#Je7qcv~zYA))2K^w@)_=`ls_ zVDw|oz!b(+{GNi@>sZ=-%}@J2t=YR-e?L)yGK!JoTcz6(K346Ax#8l48x&+Ehr;KR zC6=4Gn@Iv{llu$ZI_Fy~CK>)ztxg*GQR}0cJ6}*e-&-;8bn-dn6Sr;P7N{-hQ~P0) z$7XR=QW^4`WMsqxf}~&o0k_FXcd)EiKRO1bb@5H34ua1@SGvJstIH)qtW-S3Kma0E z>3W}DGreHspG#R1>QCq;ds_5a&+j!l7Azp!G5vj%*QVQsp$y-lg0nS z8X=IQ+Jn2Ka;|3+8s~Y7+JE;nysfce(T8COc{B~Znu}l@^LfH6i6=N*ex0#{77{f9 zlJB3SVTJKG8};nKW4*Q&urSZ2s*N5cZlgGFvEzJts^q%4`uylg zon-eA_YbE27^sHcXLb_~C$8k5;Of`PW1Y|*gq=H1tQlV!-<`W&DOo!H-VnOUO%y); z=CHx%GNArr39SK5RW|14_e)EMHcc5#-Y$V~e-W6UzP|$q904++*p~q_;M;Qz&t;l1 zRBey-AqF(rcqfo6b;wt-sHYkBzr#1quP&IQ67uS6ta{vCF&)ZAiNKH0V2A|y5O8^1 zVdPw_bKQTfB8;h?|1@}c^}UV_|(^?!@qsZqgw-M@4->c#di zb}e+;4U%Bma`($WZ497(XSUWZ57dyW^}KbqV{;>0nrU!ZY;pRQRu)IqHDtsY(E--` zTdx?DGs2hIa8P*V{b>%Dm_Sa5$rt5<)^{=q5)!-u0WibNL&?cfP$P;cT zgyUD_LH7*NoptD&=9P(p8LzT&52c4vM>I_q3+v^UFrvUO9^JSa!%Tv{?849|(2mtX zue6)EVdXvCWl98zO%Tpvv;LW!4%04@}SPZI)x!vPZ_4WC9G zwiF+7#~ef{WbuL--I*_y@U#sfLz6M)jTeuAw5mVa`))17_VKJ(U;xeQ$lqvRRVu-N z@egSRaFD-50>L)>SrsdyYt4V_WBr`=$G3;MN?mXmW-j}T^u$R1igAH zohh45>3k4Fo~%#eDuL!|#`bQSS=&u6`DU?2%*kJ55m(P4%f#;Xb*bKK)+~0iesTY? z3)_3>?k<&{JF?Hdg4`zbv%c)fR4c3-j=s)hJm&XYk=&c!wzg3&sPO%09s^II9L}~e z?8FdEDHY7%)v{pWiUS>9`Ec}A?-83w0+^JK85X{DEk?Wzqwr5N9^kKBe=m;pjArh4 zVk8&f9K7Y0wcqShRoBHDPudJSnIy`G_-RIMkKM(@|{^vZvu|9Ngf-1{8KNqO38UrkXenn* z4D$1qi=^+wvTGaSqbL?Kl`Sf$6%jysZ!#n(Zs}Lj$Sc;$iBNOk|tRTJS{A2vZOL<7G-6-O4bUPM4Hp$Zf`3f>phV zDI>A|FP$A^BfXIfQfwuJTS?Y8L&h~544sJ)ZYZXAd5Lek%X>nSPKNRD9DC7{{TAZC zPnNdP3Dian^X)PV*={A0pJjQhj{ORYrXkKj{c>w3YU3D$rWU|z_iMpDzZ954{lYzf zh>5_A!}f~W=QMw?sOP>r!G8hePR%Oya)bbhWeDy8>}41|j57*37j9d2E0N}9K=5s^ z#X9!nXI&u(2QXM**qRyxVm`#H#P2E6>a;gmA4FHn`*?R6o3`-{S3dzrESO3NVMUt~ zk2O&IRnHQ|WEX(<{0S&Oo<`EuIRLAkA$^eeD1M%Xh|xwN^|QH;@}y_|YiU7QFi%z& zNY~!^echD%Xtze^Zmt{WXUY*=p)HZ>^brA!b+gj({E6lnxEb59yqbzOet0o0P%wI6 zvw7q?iODlns;mzL(QQ4`gzYXNw=&+511sB{jRik!1Y!;@AIA6d)@z!V;Bn{@HkLZ{ zT9zJZExkwTm294ktgP;O9sBHg+?P7xWtuBvar&PHDten+%`q zeMaiD#m&dJ$ivuf`J-q840HKC`6<5fWr6qO!>_QI_X1C77Cv+T6LlpK|0W{j6g7GQ ze>d#kbdpINAbBrfwB#04Y4g?-5cSrqOk754x|TFN&|lDLd#eiw=M7irK9`rjiH7aECed(w-&jOhQ4Yc?-N{&-7_ z=q-h=wtU)c)xsdJ<#?)z=9^!C*6Iw5Y**+xY2Orr&R2*h!b+GS!@M;tv#zqwo%M1Hoo0;pUbJw3_oYq5oEiaCxiFNWU%Lyb z=S&m~zExVHNZLQk^%lZS25sGu)C%=NNDceX`w4yn>snxRrK-OBP9$L+g!$b^*PrMJ z8giJj22D^#3D;c(@71Gnv z&z&r*ZQ`H5XZ(;w!WBb&HcUIw_8h*{{BXe#^(8SjM;zIc&{w@J4?jUOp<)R!#7MPY zz)NQ{i%aVyiYOv+DCMf_noOcrtN)G(qy6OSKVAUwFb|BEF^t(+o}1Jn2?WpTEMIY9 zeDUUae<4McEBrdpA;TXs|EkL{{y#4p2mkkFvt~<4RsZHme>QB+3FKu@s^tr_N>P+* zJ|3H&d@G`(cxEjb{PLlhs410%wsXS0Bl=pUugV|K{cG;O z6|X8JJmERDWIE=>J6iu?%HK#f_ib76QqKD!-`e#?0kOVx{X>OTP4n%O>%2anyk=8G zuF;>~?I&@2tfGlUJAqFcU|PE3^x+@8^C_=pzB&tgPb^t%AM_MI zA_2-`rj~)G7VRb=pE_rV19g92|J?9qqYE``EVm(DF+ zkvf6mk!LxYq&HFzp)ZJ5i7G_uQg2gcYy2ZT%u5T>VVaENhWWUft%`^F%#kS+Wp#SL zN*RjVqzc^VaQ)(>Wh7p_4(4afX5>H`#xb&}b?p4I`-|0eOxf46kF_t1Zr*_YNBJDL zO^C?4bBA%|OD7iZT(r)!$cwf%<+m0gd}s0I4Ux2r9Ve%em7lh7*WJ%~NohqF*Pk94 z&&|SRSNrnwf0Wl~IPd9N^h;Rqw!(|601TdQ{m(I}$uxJS%90mwOY?!K4BS|Ver%d~WHLa)8}ihW;AGa1}t;xF!5$w((R7{o)%FaG~rhg%$eEN{U7yrIStN;=M-xsn#VJQKj`) zX-9?`bU7AIRbpD<%rE%hCI*h+rk~{r=VVLsJi=!&ms;b78>GxAvd}P1EvQHMamm%TZhT^<0s@N?%zQLX_zL*S{_bR*Q!X5h3 zm)#R1BO4B~+Nko7rB?^!s5#;&zpra03hvK25=yka8QoTVO`CBp z%bs<%%HjAh%5%xR=TYAyMimRqHzP=?lX{8`o1MPkrC5YE?AYLId^YsJvKvQbQEbOg zdZ0livP2>sW8jY_g&SeN`~_;F!b-h)r*w#%l(hQ??LKh~J(+3@F~5ff_&h2*W557T zdR}#KXNC8948>JI994{|scGR}{chl&Rxvv5-z-ASQR@DGBQjvpN`u@fPTHmRq>_9i zG_Fuay}!i?ADI>dna2Nf40DJyo+I3QCG(?18!yzaFa_dACn%kZ$;xK_;}?;xjovN+ zlVXC&q0G~Z5Eyn z8xxmk-p^v7$x}qF>H9~Mp5kU1gZYXtm5OU#J{t*37H2L9f&6z8V%l=jgPY z3S#kplB_AnNd$nv!I+O;=fBgg6>xOP2xw?%5q%RVq=Ed%|FT%Rk_!eZ6iS7oTH2Ki z&%ew9ZR$9~&8x!hCU0S6g_6}r*1J--dYDp5CsWdtMU+L{>W58l>x9LRmVj3>JI;Ib zpuriMo18K&a7H`(@HIYnK`qqTUWu_sxgC|yCf$31vGzsFv>Pu82UMhXhbJfpiAqk3H zYAprt-c6UMegK@hTflauQw%E4oqa3!&xiU;;`)Sos#B%(pCbrPqA#efM5Qz_ls!pe zCPW9qu**zF=FY{|eILzu?L0}w->La8drQUy-sYePNzZBM-qo`stWJzd-wWDVES_Uj zqCmalc(T7^jGQZEaV7XZ#d#*F)vgmOkDww+oU?fbrW`Ac=*iv?J=5KWehK(L_xI;u$!wfAIo~8HW2l6 zRNz%8_|FWEo%_XQv+wBm`1ocS6#D*R{7?SuHAGoBx__U|90XX%UV9RVGw35Yh)J#? z2jnL|$mXqGO}>^;#Dq1jjGR1~49olE^}e2}<=x!Em&*BrISt~g-CLSZ5gj6)A5p(v z87RKIS=rm}YwWAPhBA*Te6&)Kx%uAxt6sU0=0q#i$#nH3vT(=kaFV)a@3v88020&2 z%j>r9$B7H~P1O@hUdrc=6FlkI^gf7ZSaPkGRvSv0C2Kx^**}bk`;5JE_4LbUhmOxt z1kC8(_7_{#f%-~y59W2?39ucZpwp>;H-#BPPA0#g(|961 zH(v?^&?h#Ka{w5mg7``M$N!D|K&K1oI>8#K5McWzkV~tTYZ$A=m)Aq79+3n|3g#pT zspNfqLc8-uV$k2l8U;4ce)p;A;>CN@{^j&g)6I^BC5Is!V?0+2gIUj3Vy~R|`cYs~ zcS$;c$R!YXucG$r0@8$FAbO+fqUsgmj+o!mOm0(kQ*`#QQPu9Q=;!vn7V#IcR)=ho zj-yf{I(ZV*B#604>IL&0n=&%YP4M%7ok(TMEN$! zEG>oz*xwjilj$u7+no6lcukCir4jM9NGXWK$nu-V8Ep6#)x7fQ-jzyYbP0XlU(Nlr z%q5Px-ez9yCXFTG=6n~o|Mu*+u^LID_fWe@ix*)Th+Jfpf4)^*28+5x#lO>W4+!Sg zaaB}$nLs8D#3bu$(dPZfSd_A`P1`odFWDunU`oIF*EDq@SJn(?bg50IW?WTK=2hUU zIPWV)O>f?w_rc+Lj^j8{Ja1maX(Y?bN=_F%iNSq%6X!j+{k_z|@dJWI$!0RkC%>)2OzQ-u;)=}13s+Hm0k2jDm}Vh&4ws?oHARxX zdQCjF7!7w`Vk!=n;3gNKrVx+DBR7vAqSi3Xp}s&^CNgju1X%`TTv{t;qK`UE5*$aZdB^OoQqV2#Ufm)qqey zrc~p0?xtDqR*%IfCO8A9Z$FD%leY(_N}8B^IM*bk$zh>Kzew!dbLHzvEPq^&Tq+P|i;nVR3)*0f#57(H<6&n%C=>KERf zG#WjTI&7?-Y`W9Z@yegyV*-^cn;f8B(gftfg6C8E>k&MO0T30MyR$YQj*9mMB6SUmbl3F6(SVS|J`COwc1q> z+2eoecHa|7$$tI0Y&3~lB7ubJWhJ?=?b)OBh{zjq@dGm;FdL zlJBTOv*R^ZSd5HMca+R_5%!8w*#$@<9ACZ9f`;kX>%OU<9(UQF7{+^9XDGln;uG9< zL`ltR_e0&8S)J5B>n6C4)A4Pnu6uA38d!dFVp=7R*yrz3ZYM9%6r{>dM;WH79WD>! zo3Vmq488O|of#J<=<_I>MR7~Wr-}TI<;-SOP;}n4a2earL0zX}`HnhFFiU5N8zPOLqe}(Ln9uO zk3_=r)D)sl^hk=Nm-$Z}%Jr_!76CD&wh;vm-Y!U;2My zN0;G(si@2hZHL3$lRl9%y~?Y^e{x~^kfc#bzo%(FyBUK^=`t>!UCBR0Hw~Ckr^Qn8 z-;=~*`dAF#_&**OO)}3Drb0>V(;W62Uf&g!#U+#3SJHQ9Xipr5ZWa8lUU5c@v!rDV zR`X^vZYjV2uA3+y_k6mP8lUGP&S~++^|U*a^acCo3(SrFSVncJ0>L1(Ee5CDKCVIM zCV!;lH@T?$mhfCAXTh!`$3QPJwZ^&sAun?m$?Z;q=QhJf#d5MF7E$~%9_s$K-vpTG z7VU(70cK#(J&2OeV<^E8&Etm4)r_)dCZNRMs^Erlg}e&ugcIaWfm6*{E5oqdC7@2Pe4phc*)e0fHu? zxgJ`MSGK2}aah8qiOtxm1Ei%XaFslywH=eY(=yLcq#TTP;ReP8RDb_O*mK-ri@`I` zV7L~8MW~E8jaA39J!rPEMytMapK|anQ?;V!vtF|E3<_bU~tTVCQt7 z`t5ovG%OWuCs z$_nTAyyg0dQ)RHmk3TN)j02v!xV>r(E67?T3hl#k_JB=rgi}IArD}tzOq?JkOKsFEDK{CA z`mgV*>*Lk6zp!SbhK@z&bUU-~;nW1U`*N+(w`aa>HOi1?--HQpE^q)Y?ctb|iJtIv z0%9agkhF~HFg!~7|7;c#d&uwhhjOaXZ^%R9l#5SHe@|@GSe|Y^$;uVdgOy!Gd%oyq zfw#|#58I_xN7~#x5ARpIP)el&Z-e~$2fTWqVlpr#>~f&3b;w@NL3NzVJ%~ba+BwVM z6HBw1C2#l$Z&&rJ_$f= z67ZphM?ky&V*w)2>I@~h&buVeTXG@0U+n$HpfJyLN&;n6>IB#m4j+{ud}m2#*ism! z3G4OlxY#L%S-Zq2{r~$-pwKy;3=;SBKk7qA6c~w&vq;kf>67gy0Cn!+5`*IwT|)dU z)rvd#5pk6+<2{hBIKh9(P$)}KBd zmpe`S7$idGIoHhPL@hJKLr(f|u1?MB#a2iU{j{FN^ro7HeBQb}Fu+wX57sf)g61XH zW?MAOj%19`7^T`B_c>Q_Isb$BG`Dt@7fC`ggsm@ZlycKa54ZaK5 z_^XWaUFtL+eMLf{=3Gd>HB3~6kcO!P1)WHw-*N0SXyy}1f4VKg&Hq=n58yI31OkmC zky*~zB z!^}wAg3tj!;6mL^5dK6Fzh5^lzzrF>)#*acv+mVrfg6`w1N`%nH9VYq-}2wnk<138 zDvN$YPbmTii$|ai%399i8MrUbDhD%hNrV|28Mk6)#Ql1(+#Hhp9Jhlh)O*c?xh>Y< znYA}hvD;c;ZK}6UeJV*#_wN$lRw^{q+M$P$yf6$CsR>5Kd&B4UArWX*`%eqKpZg99 zKUqf%!HS~{$DxY@?kV5eZy{D_*IU^P`nPjGO-|Maq|ti`?e_tbjT^F;pg#yDv2J^1 zR=)Yv@~MdL-_4hpNrdMn-sHjRylI$Zh_(TkiP$)bnzj=Qf-K*R&L6-gd8j1&OYkUK9&6+3QLyHoGa>iw|y5+P6%X8Plb>n#i7Q}~< zhPhm56me;#g+s9$y5OxB&7Uy4j0zd ztRuzYIu6G_3xJR~OCJPa6>?~BzGAOWx6)baaXu24^AC)UM$N~(ORWv-jUuE?*7s_@ zv;=wS!p+-02&#IGpn?lJ7qL@B|1NbPh)$0fNIwpFxFn&TqNBP1|0 zSxBd9qtSK4F;{L1rgmxWOfWdTlK6@vfc(N8`ZU<4q!nuQsLOO09?GhW*xK0i!AXn3Nh*xE6mziD~s`QT*G=apk60EVksn7`z5 z2&l99A>MHux%+|kwO^jA@T(ZR2kkb84e4m!R34W~Fs@nK`CtJrnQ65dkI$`6$8{so z;TyKPB$zC}IuXqcnmWFNBH+abyS3c^=;8jA3;mH}tU6N0rBMm_{D#!ex!n@7%g?ty z&U`7ww{RNrL5ETw)M)Tk6L_MdXrS(i?yyq}u#y=TgYb;do9U{32D*x(?`ZtEJq?b- zNxMQhFA!UD*~iV7=ibfQ!`>cO!59nUV+|6p45D0kQg`!vi8u&XCf}q{;zAdXq_4@` zASV;j&0*s=@t|=sq$uM#A|X2V$YnTI^4|qcQ#}=q(TZddX9p6QP z87Q_8pLn3SB&VgN-SjLG0TDVx0FqD>8}AGKpTG=4H!I}A9wU@`m$we24MTOmvp@DT zT7)lL@lfldlxjK&fNzS| zvG4O?_H6A4@daX`^VuNoxjA}$vPE^ew>7XnWR9h&6!UY=@nawyp}%OmY}+bP?v>fN zGtAuYPqi51cIdzIrN3`#iP6+;-6z;&wu$*MY76{iM{WuTTsb`l@0=g;#PM&QhK5a% zjii|k*eo@R0e7{w&)dXeBzPDztu9r=X>2k@3fc5Oisa&fpr`MQ`N(4#{R(gtl<1q5 zp}U-QA-zBnE=Z2Ybr2r~gcAQ#0RO2u$RVEyQ*KH9Ef%?Qk=GlED1u|?a9|mF(bju0WbhXM?dC!GTr>{kNkZ)oYyX;(1-rS;u)-FgR^CyK;L+Os1<@U>x!Md+%M;L*LUY z;cyF-L}#21XBtoTE#!LNd~o!xm-Ib09^0_D*@ovQ>8tBj5szZ`nh7pD94yZgz?thZ zhY&X!H*U`kEK%#TkXhuy1P4i~O^0xz8KLa`#(+%3*x7~?;h`wqHjd|@Nd)*-JZI_L zkp%C*3ekcccd51mrO^L$w;>$Z&1TGTM zSvYe#yszxEXbLfk!56lpPwdI?=9+0>@8d{(`Qskh+flBVt0VYzm#W1aOiM)M@vLKn zZK#b=J(Kv{mGO?JW#VEzRfc8m)8Y`I7AlpUc8B`9DF;5C%|XTaI{GF1aho38een>@ zWZoe~qbw$x&*^LaJ%2?jNfn-8{7{R+uX*uo6YbUze_pBY{Q2{B_fi5F9iI{)m2Iqh zx2V1s6`VwW8Un1+cz zBj-R1Mlb`CzKuR9bN@G73E4PMkO%x6R!Ak}NTxbsiG2ECrIGkfdeCa} zP`b#HRRr;vy>iyzY%IH%nRmy7htC7KyzoeAd zVMA^y&S-4w?8mtJun{YXId^no*xihRH?OOoaLfF7ZG^vflq)dGGWeT=HJA@`-2+Dv zZE+Ck;b_5NKIePBxb!Iw-MTi1CroMDbKiIVkTMB4$nzgb#z*tt^lLk5eh&xA(W<+3 zN`d21=1;(XM{My^JHR-4&;bH&gizG--vKrb7CcyW%#Q=Czz^Fove)m^bR`_-Vvxmx zn_%+BNoaSFtMf9U^arhZv^=zvzW08qq7}hI+_$aic(WqJ<&>f<|2D`8kYJRPJ+(V8 zxJ$+|T5O-B){HOI+k0wXTYhr6%0bRPDP8|vgDIRiehpd!sXtr__H5=}GmfravW-7a z85&1S=F}e-7qG=+zPuK~w}}Z(@19(_6SZ~vZiKZ{xnz)?w2Wxm$gktR+Z{&<;IYI5 z-PXaU`aEOWn!gErUR;X1vS@}J}LXElTMx3pax=#7kYI=m@ z-g{f6RM;gs;gjIceYn^p`L)-Z@$KGxRmWg)wW0`kxJ!e&c7LYX-s5}PyF}gSQYR`3^E(3xfg4mOSfdp#h z^*~&`7{G5+m(S$JE`3{i^AuNyB%1Fc=k4O*P+oo!g>(}|g_eOf_5=vzfQiq8 z3uGA2^N}Mt;QQh5cz@|`p0eyg0)eL#n;{(7=8sl(|Ghu{^nW}Kgx_M2-vUHU!EBgW zG`RD1Ks95^pdhVdT`g#2=xATg{|;v_|AE?!FYA1c&g$Iw^w|_05vO?P!w%MsAE7dE zzf|t4H+Hu;fXn@K2seSmgqk6<3OksaTz`***CIH8X%LHmNGp}(ynk3hX5Z(3JK{Rv z8kE7jEgXFMHL;d1Bf>KWrNP5iW9zO^|DucAlcjGGbt;#BW73qEz~2Uqg12IrU*gJ6 z7+kT1E{9nY?e4hOy(k>xDiCK3DspHfT>3M$kUKD{jsBD-9y0nJ3?HKbweuTOyFcda zpCQ_9jhvEytPcMt8f zJ{hKm4?#3IQ@X_C-N9w@oWz!4%`*D^TsU*zlk4-bv-x<;c50?7^7knCX%9Gagk6P% zazE^i3(YlHk>ct*S=VJeVIf(CQs zQ&H;XeBQv+T01VlL4U1ta>OTTAVXX)2|o4Z_=PuQ=9BE%!XEQE%8JCko8PjVz=2}B z8z*$XUsF6iIGn46Z^R&k)|}_|EJ9>&mvDwM->k#f+CFn~ysZYEN_H%!xjlF&Sxy`F zpm3h!kajD~onf)Y%K!GDA$PckODHbDsgo~-EH`5;WB2D=P9hbjx!SoR^D(`NQFQX9rWLI&O>#&Anq!0jBuc_W-goxbi-%s6x0iFfw+@0t2E!3kGyJBVUilbHQ|_!Ao~Ejhb4RKas3M@XsoPa4iyjg{{r-F9mA&-!|p zJp-ds_lFH72wf8hS@>EtxSd&<{nEO z_C1n=)z8?}`>_}ir(O8XIClltF_P}!&`WeKr*WKjyK~iqTw5&jy2KfWWM<==qae!a zrwkGVr{|9lqV3I~Wn)+!jw#gr2)fabo5&M$gz##Cx1soG8mrz}IV?%B6h;UNbl-^g zqrJr~@TqtM+_q@HQ#W+og1M(3Or=64Kl!~@vGeC>xnn^YahUcDS1Oqu_=b{tWH(Xp_r0h} zag=J2N@uUwsF7h{+Nf-ByoV6~n8XYKnCSqt0 z)A)d#9{Y)c+<)T*WPkC3rUDXa96GJn>FjnWu(}w$(Om>vQ zxczI7wmR6x*tfy8{e*e&OpH%kB3v%SvU?ci1gP0`V?%%9xV4X)8|lwg7wOi?P-IDIMh+udi&4K{7h8<&xyO2={T63w{>s0A-8H zA&E=>(-uG4r2ToW1lQo14Bn1x##VxTN8pb_hV>bTDE9vAT+}{E0kwmwH$sG=ZZG0) z@4k5Z!<}zD&hnr=83=oKh-*E%S2HA@q?<(W{Havl>o7_5=VCD=LQpVyXhPYD*s=;l zaQqPnDIdf9e3A1^poGu|Xj^aObnI(YFTil0IzKY#SS4^&&`r0c6bP* zA46jlVO@B@0gkb33t|1g{*b?bXHxF;TAd6D5PO!DnxU#^V5pT!gb(={_A_#W-?UKL zWGdlbrT8i&7u@(T>-!UJZeT0P!^10T)2>&}6YSLd)YsGuzmGFyFOsCES-E;_>ru#4 zsB-Itok}axx-lxO*S!<;tYQ#YcYm(18S!A0a&qKpSiAd(t`^#mB-JlIaL5-S7-r8K zz24iL9^hUya?ph9B-iEBDIs?H6CLmOINW4%o6>pnikF6JfyL5|K|?Yy9JPF@%#NOb zW{MnLQ4MR#@7W&HWhYcS7Ce9xh;K3?A3`f-*5kkM6M6oy+H=MY=r9_zpIP*RPZjY6 zjBCQY0Cw;10ffU!xB&zSkxz#g(*{kdZ}YdKAoDv)qy5MpIuC4&1OoQV*S>EE zx1VU#>`;oYYL?xpbj-dvGm&xLUJHIkQA5kO)QAxl(4-(3uh%E@d!XZtuLvcjy#D+E zYj!-Sr((RJ>0pBKdMSEqiu~DIwBvCNSXZOHfcGQf+JWZcQ9Di9DoE1Kof&Sd zLS>uvtNJc9#e_t8qwm=^iO?A-E??)|mXxAY+;N>c7pxsB)T%$Hz9(p_tdOUEMm7VC zJvCBzTx>PWf6W>G)OA6%s$j!+2qFN3Q20ZhXbTg47#CWJU>UXGoD;{ldHzV)+5Sp^ z#R1j@B0B4BxGB0R;&`It3AikVZmkK)OSuq)T!n1SDn%DQO(K8UnSTdR`CLkj}jE&x0?pi}0{(28@_*T+c9B?>4qPdm~F#gzgUNYIHBNNYzGvw7m?7 z#bwI&N;?u`rUmy9SUlLn@VhXQJx{V%BzSf|9UVF1Dt?p%ur0@}JVv)-+Gakj2Mie9 z9UZI*?o+&$7M4O?mIWDKGbG^pMhL(>UxNHAuPh??C#-vh!coVUF87>V(UZ2JIEf+p ze>~jks9UyM=X_hQ?mTJegIjZRdKUZO4EtSR8?!}&?ayb?@ajsfE-g3Q_Z2cv`8KZv z_`04(tb7zgE0L}^35p9e`TYdc3pL}1Z69P}RaZ#adN*yUWy3)ib|dG(9Q zX63Tyc$FEDWMzJ7l<34dEdn!V(=WrSyvF&&3?W=<4bX+kl7yNPL}*4CRiCZRLCQ+nBT;zkdnitY_J&8dKFO zR{npFO&qa~0?55;H}ovbrdd)yb4$cLOPk$SB}0fR+A2AAAW0EsT905C zjIJK6er$#@HR`%5GhGKU;!yeR zU5vesWSTVpN54l@R%^HC0V*FGz~Q>}jO>LFSEi(J4s`pU)$UW}2MN^zc%Rku*$8j> zf4dTE&;351lomBT{5|QaN!MRJ>V5*svHR}Ch>PjsYeOJw%lhNu=*NH<1b7ip+bO2GE;7Eg zeZ7`w{?a2U5x7I;P*en34Ktn?{2#HYhmhjyf1*>hw>}HtNPyK|J^AP9)I>YHP@DTX z9-~4SV_|jNS5gA@e|cJ^>#?HIxXZ$-k+aQw+Z9!DIX~{y$xQC zE8_`h(}Lcb&%9sk5~2>Yl+t}<_6S2p3g+k`QnFM}R+Qq>l(&F)-S*JAe%`2ijAF=# zvjbzck59&dF)Y?SU#Gv0#k)3zv$v0_eh&Qp5yr%BRTq_J8(^>S3;{%Sw;7x6s$`r? zJXFMi(fO4@SJ@VcGcKf8TZ3twExH>`FS^l!kIV7W_afPr*A$W_W;&7EcVjWMveW8e-O&*3#Tu@ABRO?}pl;g< z-hYy6SXoJ?%o44>*5Gp#{_eSl0PO^)x~cT-%VIBV+D`lj$)y;&=dnvsco4sYz`fOI zLSx}A|5G7&$*z#CHWo|NO^QXUeht2{k_S=CC_h z&F5aDY7DyFYSb+6V$l{sEUR8-p4_`M; zg<9?VN`(C1!pHQ&uxcc7ENG4RCyo_;x5TMQ;}Q~9JKXD*Ny(MPqPZcDh-N(*@a7?? zih+vG`rki54hI#~jbe2KV_B!a=gB@?RG4u5dRUE4P^gS%sLk{M{6@_w_XBt|?6s+! z`p0W0OS=`gjA>fS0@>Q!`8oHCK?jX`)k^Ny+)h-q3uN7Kbh!B?q#sl2RNX&x^hA2e zI%zmbCr)oSe+x&W*x&La^yl``xz+Uf(^saMo zIiBsaF75HkEb_2bMmjDGCh{;8zj82gAFwlehM4}qR+c=AdWYGbqeH=hGuAm0zjASa zdI!_zykQ}5vgPhnYv*5%*f2#MTCiWA?f)z?gwQcw|0^@XCiihbL*Qfk+6(wXaHkW0 zh#DfExNylsd1>Mtn-W`x^vPe!0d9e~$7E_ycok#efRV-p%SC0Ja=zpjwY^>=N`~F; z@y3yt;iM ze-K}Zqu*)1(@pQC_xHaVfGRn&M#1H8(aOipPu2#;$py zZ}%kPeA8#C$@zXI2{1pQxtzQRN&d(o+xZfQC$R;GK0B@=ilfolK8tm&`H#F>rO6(_ zX`Nu9i}a+`SDqFB$?>UrahS2Q38~Uw462{4Q^A8px;*BvYXj>WU^WwUI;qcP-+0(% zIwba4(Em~lI5U$#ITK)%W4bq2x0n@7YrD}0d;;yL7;tb_)6$fs`~lTxRnhtxu-J%c z>+u5r<-b2B<C>so+5;@cCcUlqqE(Tj_}6g1m!_&)@8GpQ<6Xxmqf>(*I!8DxbF zvAdL5lV52!W+rx3$KgRGVD1KT5{*CU)NX1gSv0AdaR~@ zTVd1k-Eg_f7*9?u$j$js`R_kO{7Dvc*sVRCs453sWrKx2eJ~gJS7D2Xv;v}_ByQ7| z36VqY3^kFWM?H>h*vtWdI-wvD@&r|Alf3|nX^w}iVgIweU3rXG`JV>Yy5(i;+$$gN z+P!*e5u!@vY36t1=OMZmCrzoR941R4T-3B|PPnr3?m?jS)2vMeuZ-VXL=x` zM`7&;2X_%3dr91zL=s!rKc;ohp(${!;Ejt5(XQO;>z#}Qk@kaSfN_q4LwxowQ}lv|9=U+ zLjURTCd+U@R`*+}trO1_TjPnV0whF1<_gWDWMpZLWkf%R%R!WMy^=;qPonlytPe`G zu$@eQm~ThvN*^NJm{_?oZFckx8TV{_Kt5NC4YSG46w3{Z#iE@SHe?!ew)g4xM=ap% zsi`U1$?ug}hg;^YQvyD@HVgoPiS(A&5L&i8dw>R|Je~7tTPa9?TqJ(#o_Nx)X=*Pz zYjH9PGL=7xL&x!~2!hWj6lg1d11EZblttpiS%S(XJC?AisfP$CfpZoU5EXK(KA5^t zSR8R8ZvV-!OM|yhcWmnV6_TTcOMxM+-e!CxTm5;2#AoMm$MG$%is%VzBzAd@*0bS8 z!>cL6*5g~}*$^h7!(X<2`B$`dcb1f={LNk88toHl_5&CCKcDUJxA*f~1*FZ_nooId zbErHAO8u3`Cq<7B#HzY!y#kJh#9NeFbcN0Vt3PfUpPNHdfb(eBYXw|y8ow*lC5DMQ zbN!Dv0)!&VuM(WGRqHR$QzVfK`*qAWn^)r9E1V(5N*gb2ws;^-kFg=3U>WAi_f=sl zc3o#-%6|vlvDe z=v0x&YqpuqZ9ff{zgDAYc5%|>|F}-(;LHhO&z8;z!haGLgikQk-PWCL+TM;^!E$uc z!()2*_1j{ShI!jDgKVJ8VgEPY3#Xz|Iwy6!LKlAVLkl&E`W+mvX|iQPtE@C~M5~Q{>wj#i_8eW+a)gLX^FIKb(392cx;9+;%RPA-mo3 zSwGJ3sb`uc?qaEX;6R(sNPs1SER*WQ8gAK+e*)Lc#HilO4U!N?1yTgnbZqYBTZ32I z_QdJXr}5CZ;*~{@%mwSn9%TH=?{fir@68X#C>XQOFS7@Idqr>iYiohXR!rhakgTI3Pd|5Vp8wSNcS_ zv0iliO1hX7Bu&gjU$jzRU>XL_v?#f13!z+)Ka!NfXF1+9#}Sxf?uoNVP-QSU1FW0* zOm+vlSn*wg*~Mmniqhz%kQv|1(K8_lL@}f}r7XT55eHJ>YeFKB`g6UItE1q=xL>or z>+8B}zsOaltQ)B%Cf(bXJZ96GpQYHHo^}M$jh$C$<@NEGy^M}^-NdNm@(F)G+y@iK zT8Q_iyyl4}w>CHheHAfZ`2Vy3bn=W3lDOmiXx@XWiyAJbj@y>9=_s!7;~LWH;vuOcyp#+n2w& z033^`@L&J+Kq9xs`oCYo+BDU~ z)8LEA1@IEE9!?Y(+g^=p=8>@*qf$ly*X_&aNH&qBu`+bXY2HI#UL$SDG+W;fE^iOK zkr;!MbD8yE7ljbHsO8qvPh`ddxW>x(&AviK>Mw*2H;dFwb}cKS`KvT3rZ+eL3I-(W$^i$sKwvXGu$@2zui_sPfWd+;IPY#g1z4iA-&`Iyteyhq z1MR`!ldOZg^A>UCWWFEz^?I-WD_$1k=?D$+{I6h|2}bvj0phG@QB4~z=zQ+y^P5dI zLA@nc(u`OHf~dk$xuT638sVT|nvofoPC&s&z-TWM(@0d+W+a0&vFPkhpPARbeI0)@ zPLW<9(A|kgE9svWpt6c@ zJciUt5K~a^W}mEWhoHF6{?Cg_w0L&Nv~*-*G(p1taBNTh6B2WB$nx)V{&e?zeG=l8 z9B$|(h~1FO+|73SFxmGKQb9~deMFkP@3qhv?$S3~7q?mg(mNhU5J^Qghcfhhmyq=u zpYhudWj#txIUkyp8*E_hGb}3*I1ftrMaPvQTwTLUAm&AS?zfVq zG`|zl?j8Y_GLl8?&QpM)?k2M(hs0fhYIo<|IKSq4fdYymn(*Q{zy!rf*cGAg1 zfj=%|@3^M{!__w1qLK+>Vm)R#lVMahfH{!YPmKIusGjw|GRygUJ*ZX^7lca=rs8b4 z>0K#TE0%md{nCKZk^^nqW8>R%BSdkt`;z6~qKt6iYw+c6w^TPdwfq5`PtFJdHI?KW zXg?WXj$pk$qI|aU@Z^-J$-ay|EVV1XG{PKyq>l~zo}8edYYZ`UAfBnTR1yirIEgzz zAPm{W*DH;C(1z=@C*rwC7<)xi$h~_$zAbEe!+h5v$4ko~Yy9{2kS7k(iS3b7VTMFZ z!k>s+O%vxkN`?b{@`j!0Ve65~a$lsyUnR_9uLyxkIJxYQ|4Jq@vRcJQ^l>1_(V=cyKpzGc@xv+l+x?N@ zodh#UThH6t)(`ZWBozJerxh~&tws~uF9>>=9T}PxW<>6|jn^LZ$Tb)Vc-y(*EUf07 z^unm??hGH%m%j4lRBt9b(gcSY^9NawgjKSitujC|?d70U?bH*r7;g0cALps?xA6we}Vw-3JC zzfsQ{5)Qsn9SQ(wxpXp40HHNPiQc)66)~y!JAAO}V9CGh^ko zdc!*;mbk<6OoI!b+SL&u_RR@jHBhS4!rs7Uao!x%uAw8XcX-p9yP}J6s4fE2U7Av7 zDzRi$c@j8=f|-RJ#`=aogoYNSe9Yc_HKte4{M5`Xl%sP+&2Jl*Ay9O(3wmI_@oB}z zwolH!58ODeo$_?T;FFu~=d|i}03djA_zyP3)O(l?EU!2oA(?|?RE1gD4Qd_hJ_tf) zstzV-%!Y&)m(|x@{=(qn~{){r!Gmd`;omu@Dk6M7exT1^7wQc1| z{JEe;+OIhzA;k*ebMZDrBd|w>CmMbV=q-z&rn&ym2@g*;0I@`2jy{1Tit=Y~89go* z0}sDCW59D1iLNbv%+ThTvb|(~v+H5YAk`qxq)(I@hd$g*Ano@-jrfaFQ< zBZ>=Od!FV?%?=fCk-Kl>rJezuH9B%khuR=A&mA0OBzsc%3@|pSVD0X`UWB-dXr1t? zxaBf?j~f6O_t8;|_bV0vO)quYCEK!|-HI3t6pHzLZVcfLQxh}fDGJHm&N2mQOv^H2I72(+U0H!{zSJI9*GfVeP94 z#`7UYOb22Ey{+>Pyw%{0Ucg?~*f#JP;|z)4Lmd)JfBu??B3aW%Gm~JyLf4IJ0+>gF z&q5Oc_za?A=^2AuKr8dP-M$`>!UGCt$Y0Y4(kJ!|S1}nkxiQpZ1!@jT6}i*vJ19^d zVEcV$Hop2&j$}#+pnUbgg{rV@e@qqMLb4^S6*Y=QVIn7g`ovxVR+ij=uaU&aNITGq zv;lNl3cw3&?W7(!j5ffi?AuX@QEAPLA*}oMFv>^yp7wOYUCRwIV_U>++7_T%jB?sR zX%(hzXa(prZE5}Z`>u1_j;aBF&SgvmCbr{C!G%JrlCjrG z3L$5UtXO>%-r+Bd4#hHX{Pk!NaIeuu5w^FR%RGgh2Knf}RRe~@`8qntobztEBaSd~-YX{6SFuhdbb#26G^$vVSN$ z@OnJn0{r>5Rbu)qw+;6_buNJZtJy%nQz|J~`V0sm!kU1>qGKX}qIEAsxc0B^@Li`K zHf#?#cpp1Q4!=4DetFL56^`~Nf^$z1z$ecubgonR*d9RTof1N%H2o|9`x&wH+ngvc z)gZ92>tgg`!LJE>g^NW%;0xW&Uo0E^h>NB-K3AIs?Eu+FIH?{`b;aoH(f0KzVpdC^ z3$>oLy!h+|z@rd4tLOOtCf&Adx$4fRbY)M@|6EI~ieD+t!x?|rO8JxD?|(-h!wNU5 zg7v)i`odT&QJIZodO@XAX@V@`z-yWhir@lMM1w1<9=V#oiGr%-lyrcFmynTNf-xU- zcm?u7gZ#MzWzDVh9u6@TRG|6_ea1D2@}{T$hxQBa!K2P&8{Gc=7ef1+-UbeuyloeL zYDay2k{I@>*}BgSO)reJg@M_z#|W&YxUBi+Sh#W>7RXg!qyLd1M>1m_8Q)bcL|$MH4037y<0W5r1(473(^XlI5!C0ZO*CtU}myepiE%1y61xwj1p+@Hv6pCE8d4l z_;AL)yYyL&(+L8IDwV4R_C@wC1?rNLH@+D7-+9N1_z+ppzkXPw@k0Z6>ypS&uGoXm zL~5SLe2u|$w;OVcnf~}kxkh&Dk_u&kywqF%j{z<+dpe*`y$E)ZWOA2Ss;1ADv!VgvV`HcT_3kkK(?679l`~@b117@yE?eRW%uF?#aw;JfqZMN zm>lZBhl@FDga${uMjVE6)N~vRS6xG z;3w3{0Qy{YmFxHy?K;?XGEJu}(d*x2%J3mjP$Z|fft(zH8S&ZH1js(6azJ4AVJK6h zdwu-=4$ykuilN^8g|gw^^#uvGSjWJN^Hm_9$STI4T8Llo0Xf&?@cC1H6$EkQ>!q&; z_je2T+s%!7 z!c#t8Db-#(od1TGGRlTAU&W(_< zPCn2#_VHnd3jgEI^11W+L&09IW5}Hnh(WWcvI#vVAr;7 zbY6er*~w-!vZ`6v#P5cPVHi$BrT5R-ahBIF&@n31vun$P#TIV+vqXl|B(dFuM7C2u z4Q>uT{S)7GwN<1qybS`H2WZxcX2#V!&-e4j7I(7QnP(luYVOWu)xf(lDVg-7niU<` zWyUE!bHD5}p3f%~PO)rgYdW77IruEA1_-5NBlKAB)6W|V3flR``vT$lirGRSo?gwx}S)+JpsiIbAl zF!ndchKv(>GM$`)#fX?TwFmOD;&7UIu!*MLw+n8sCwr!-8R0487r>K9t>Dt0rc*R! z-Pu4Vhv?wws=A23?g`09sC7_BoO?#&PeBnvvF6M|BNZ>+*Oh=P(UDgkA=-$|>(K-i z7mLa5uzlC2;vIQ8*yfL@<~@bEb*coO%Ir&?M#z4!J@CaFF-$|=jf>``H5)ijzZY81 zvzw38zH){D|D6*zXa_MnxXJ2Z{SRw!epvXT+~n66$F(G@{HCCy^q%6; zP0k4B83bSh|BlZzrBtW+D9-Pd8ZUq~Yyh7YGPD`p^C}_C*_~S^g>OZem5nvU6+cnIOR&%W!X~1$gGPrw3hDNKNE#F4J z3wv_BKBU&U1N^uWIjNfWEWa)WMfPbp>o>A#BgT|j$rY1jtQ-$fW%K0EAsi5+r9;c{ zp5wN$L)5E^T=o!?i~!5f9##*We1c&_Ge|Ql!J{&cBsEfPh8zL11WV|AnI@Xh{(WGc zNiS6W%RVZPF1wOTf6Kp#)2*{zHcr%K(_L@o(5IVCTb^YkvNSU_Qvi01MVV3tF=t9U za%VDM&+GZjwR{@=sH0m)UQ330rJVVCG!86~Ns8FHYA63Hw=hfi$GUTiLQO|s*Bm=x zlS)$;dK#G37i8V+bs;yvgK*KVwCu!ntQj+(ieyzNMuNH?a`Frh(FjWFS?D0bnD|n+~Q{u+C~sE%>AD0cF@#frc^=2*!ek)Ppekz1XX{SZ200>t zU9!~c2{pBp@KARuvM^8VOz{B zT}l@rb=wjUYGe#t$uo&RUKaNV=nCXA>_9GGNoQhgAkU>u*w4la52MG}IE($JdCKrT z;Rr$N51IneEP>P9ma+CWLQ)zO~6HjtDWeUaeot6sjHZzlWHVzag1zO+t@q-7qD}_rLn+ZryQX` zPbPgUU6OE&i01cTgZGe6#}{`I##=(y=Hs3@0#c*|q>3FeY3M03uy}yJaj7XzAg-tb!kl&Xl>;6C+U7n@fKnd8!TY1*zh~gLN0(DC=Z{6h8ZMC}dZuMmT zg2rmd9nkzK66D8j)`{1?D4+M-&O7$hMgAc@n7#Xyacyvw%7QhbY}FG@Z&UqOL+%+V z0w7p_$PVdl!RT??cg3pLWcjrR*tF52Mjv~4BYE}>xS-;T#l@;-^ud!YrtrhCHtejY3o~ac4e;tEe>4!pQV!dM zYab>dcQ^cjeL9ms-pJ*2@y&9ek)Dp*a(H{;rVWhE)KbbpPONX-uT0pOY6@i ziFQvOSvG9>^ZOW!`bO@fBa4P8!a<&t4CE?|t$2`ffLUBto78CNqZUIzhVg1c2UBa) z12zyL-GZDTcO&9yM*5tpOmxK%WGj&M*5S^^ zcRk|N-D?cbxSbv~He1{_tzO<4ec;SDo*J2I>k<^QOM%z0mRi#+0a7jD<+ALtJC_rG zmgziF6d0HPGxwpD^QD^tBm_L(cd0E}{QB}p3^88#Vj&RNbo7>64A5d0ux&WZw$<<3{6hSPL1rXri;(Mjjz5Oxk z;i}>1Km4e6($y`#U~Eqk`QEOCbPtF+t4=)IXKzs`O#-?e?-+9+QK8SIv5&d?$~QUl ztPXg;V($_GB%@Y~y7*zCT9uav$Wo8vxgNL6G^@o9_NDU+3Xf2-K1F>zji(3V>W$)4 zB$-h(A)kD~l~g)>rWd}@Ts8fp6B&#Y4*1d3npQ`r{d=A^_ew1iX65|yA8};E)xDAe zlxjy87}lI7qza9xD|NE(3F+@UiQ`qS<~d=RFENKJ3V1*mU6{`s?!dmXKo-WfXuoC_ zk0xwkFKh17rt>Hj(XDTT^N81Ry4?nJVuh`_-s)C8qUt+d^bn6egk4l};APZ}>CmW@ zf{hON<6SsDkm(Y2Jds$;Vt<4>I2RaK*AT zLhp^7I!XOXq6&RIqN{EJTA5a7Y0hb`dRvRg?v(fTtBgx!6o9;SV>dx*A4qi9e7Wo3 zLp>xkS2A7I{tJta#v5?66^Pr!Wm2<@(LHTNNO|p6CorFSHnfl_dcv}&|N4fs{wPSF zS?$Fc{4Bgv-rj~9<&sZ}EapHs79EuP7AEnnN)W)Lfhr%P zT^2RI7d} zyiL1H12c#r5}hwV6Ef%K0!O6v5|DB_IjHGSjD6CYWm1vEWV{w{Uwp+?XZqZDYSaFP zyWv&14k!0T#ke;6ig6sLCUbzd@`&<$R}KfIq22VOFg1NxIrISR(Y&z{mL5KSRWBzR zB~Sq`T}P@8;88OMB;!K%rG72kUtyjiUGiWt&UDAj(j*xpv_ zWZwR}cGO}tcYKMyH)pBZ5Jax=_TjNU3gt#-8ZkiE^ z2C#X$-wE_!11SE_Kz57=8&J{bsY}|UwmS}hxl5yI;(J(cjb2tj(>gV&`D?!PTyKZA z-I@hj0f2BAuY!+|#9r@%p5)L$gzxu?$!u+{g_5X0)|%D=ZI`f~Y2ZI!8r73|Mj)R# z$iA9=KmWLK`Pu&C7QpH>CE1h|DF~>BAc;gw2mmC-9O`u*@k0) zhQbHtm6Da84;c>wk!BLgj8C5z$k@o6G$2=O3H$9u~pJpun~ZhDlTA zbw|-kZ)JW#wxR~h_XMT2u@s`sM|r|}AKkWzTYzzmv3O9zBn|r$nsw47BW-N6%^ptJ z8~P|8M0KLx+f-q4_2KcU%CWEc7O&0iz(=^$mclU5nx&?>lYR8m<#)U& zpM>YcPcY#2%vN-oN$>6{B~{ zS#_B!RhqP2Lrqd|AY9dSD1ej#t-0pn&i3K`vG_gl-F3@-1EE*lCUd&G*_ajspw%wV z_Dg7`iEYCs>tTa~N1y(vz%Rpy5mIq-#rn|Uzl#87<5g-oiwnBZ1>b*B5>6j{=S-%I zt_#bTqvK8oWrlgvE16YCbKr0&)a7S{67OMu2Dw_QRW@^_Oj(gW()Cq)z^k~UeF{nl zSWdar=*Jc2ok!GQmkGS<&`uo=#UHDd7J)p_i(s{PIi9T1q8Ygm&|dz6Sr2B~vbzgc z`PxX(^Z4fV@Jhx;_K3r~fH$%gaIWD8(`eV*c5_S5#E|i)p>@BFiMhC*S=%b9HHRcT zv~~EpO;-5Bt=!VsbDs8&h~O|6Al7+WHc!UT@U5I2@mAHz`BrkACIv@TZ{)|kiz)5$ zSDU@)w0aLi`U#!5&s6=`Y*oQGT8Ywm;2Nsh(?fQ+NH9p~l!pZPYV84s%Vvuf$`sVI zfXpx6j#IDQ#_%FfJW|;kwLAjw(=;AVB_x_*2NL736u-8yWQ(VLGVx^;0KD2UsqLc& z#TW5NW*T)v3if-*`Ypb_Ap&4FN=9<$rz+;pNfjSoqx#GP01|IJ!hvqXn&?L>T)0zkKQ5e^79_na^G0?36qm|=&(dH*x1#| zo3%6RNjO()yd_V1DQ3Nt7vyye7IChOS<_+bPkgeY`-MVpI1+Ul>*G{z2jCbXZw;Ot{CetXa$dk zPB@HgmExOk1dU)zM#o2@Wo-4`-l|4)l)l?KxSF=q1A9e`>WBF}mb2SmnJCMGP(%(P3mQdqqi%sE@0RK|QUTx)q|vQ{a9Ee9(F3CmAEpV=6}PLb8S)l7M&j7oAv zGPzho8zyYoK zM_|T-g$UThsRuZu(BpwcAi_B1Q4&qGTn;$A^!Nj+6#g*Z=;3O6lkx}xxVO8yxME6W z1zXkY@wrZeY4w1@{z0Z|ld-C9S}m&Fwvb`ELOBV%^_Q>QGB&d&;cr!*Vgf#rju zsV2o))8J=qD4W$F0sY%jdup%M+dmIA4;etw}Z5(@m zTjW)04n!JYDB;kK%f+t9#`doEFBUZy=yd$NlhE4Nz3ov6c$&>)`Pj%8-lD#>+D|}K zs+ixdV@#IIz%F7m!R)g9+@=|h(FX=5vi4I&;JQ-D5aiV>hOPAn3)dRyRpgd8li}bvXb^_} z4*9w|S((1ieU6xRg_>`gn6ob=5}<`GMNywQrVD-2zR59_4hBPwXhl`bJt@sT+$M|) z@k=f1@uz-jqXXqE6Eoy2yBC##dB`+n^C0%lhaGZ)HK=uudTKQMI7S7jMd{8MEL!)> zh7U6@hCVMA>;2rqVM+BtS!Bo3{AI1j!w^JOpN?gJOirPhKn%GG#rRU>+q5lyl0~3Z zv1M&Yst7)>8_HYR{z}NOb+U0Xai(7!Ytm^>sf&G?JlUAPI5t(nbl>ZCt(1{ixdixA zBW$-uQ3M>$Aqi{S%!DLSdLICZcB||~n4pjjW={v=E4!?Kk}ch5?}7l7LV(7x70uGw zFylA~F=ZAeOyzTabh)AcL&J6&{PG-gM|K#`{c#XP7qAFDaRdYrTy9`9$_4$)uX-^2 zEHm9bfw@E+lH=U?ynZqe&`_v;o6eEC2M7bGHfpL0<(X=rn`)0#;FL1ACW(;1s@bHc zWykqfJpTZTFqR{cci7CSPr+vuQLZPme=dN0Nr5kV&>GaPUX}Iy4_-2AGxBG`FlrD4Zowi2X=bob0TY$`qpbd6dRcHRo(GKlo= zf=2;^&QsWqjThyDY_TYMV6O7(Wlo~PBzq*HHITy)BVlXEvc&y)1>B5 zs0ov*0SDNUCue!SwRw#x)^I^QiZr7N%nW zBg&{vC#ktN1L4TT8i-c7ub!j2>~tAl@!s1);&he|DRFlXp@K&iv#CO#+UDv$i8Qg4 z9~dTOodeUT3X-w=-NEO2)*qoRxpdyN4|5PIc7-9HUH>isYsu-4Jj2hs zI|vpxSjV)=Eor{mI;0;X-`d#SIGL2hZb>N&HDVmA0brv1tlHIh{ZSzzOZvOCJnL2r z4-)b71)uv$c_kR_SD~XG1`O=`y$2bGg{``snCu{a`?Rsy~dNKqipY8x4KltZ=J{vQ{G^rM3p8oYG z5V+Y@`nG(r9OWSVQ6~`oxoO*Q^XI~j$rVtyzlQvp9OI}3y6{a9)qkP$_I6n-$>K(I zdrjxfu|1$9r8am1e7{+ZmyCrR4CJ2`T#;-)wCQp;*inMT14JtlAB1<#2gPbyGv`gT zpP(tSHkTV7iA3s8lX67p{8U1p92;O(ayBn|%^(@CsVX?2;`yi>z5=YJO`96|Zou=n z-sjW-|AyeF7G!8vKq!o0Irer~?0J0x*K8J-F^-WF4EMbT^jqX!hV$HQ?dUm}Rp0>H z@RM$IFT9D?z-8BSR*Z#?5sPdi<67l{p%@cjtP-Vf5ut=F;-g-%1`2O53)UQB^HWiFNvamvM`@LN8M!8a$ zMF2fR`l@|tnkm&)as*J$R4_GTPc0@RP#^3hmU;aT@7a^}-A@CQB(d(It>`Sc%TS~4 zXA8gnR8N2O2xUx#@mdY0s>;OHRg||WO2E1ivkAXG+^2i(DXfz@_8eP@Ur)Eb%t%Lp z$Bedm1!<@Y3jZ`H#QZE%pOkozwC?X0+(KQ~Ds3DPU=gZzF+Jq1T~MH^g|5-4&Argh zNX+`t>#v}wK28PX$?i*v9yUc3^)7H9x-OzMwv`Ex!aPiYAhgjV?!v9@xC{ueDYA(* zQ5SOqTnajl8D!U1psoo6Ln!uBnlg*h*+SMjodUwY*w zXUok|)l8W!jdiifmW>9$te}!{Js9~{&Q+BJ!ha6lLH^Pa$Fsl0HVjm#dg;gBOjPvu zOHDCD(sL@)H<;AuAeYM#7OzJ`53}s>+!b@L4@Ch^43~XaK94kU6T4D|6XQNC-iPFM zrra?$BZEGiIG&>mQ@NCd$)nySAg>5a0gK9Ee#RFREpDVEoa_T21FtAcn-s8iPJeS= zbe@?S=n%mDg3`|woZ;ULH?lpFc#UDE7JU%yZzj0)Sik`Oka7LmHmA%@Z*Z)MU>LN0 znkBfzDyvoT`}HHv4*4Kt$O@GRpplc)i84x%+C@AerB?avp1u%^>d@ItfNmlsm7hp*Rz zK3P1#d25exlr#ioI@mY)3?MJ{#3k&rSTVWI4Wu|MBr0AwqGE$*5|)3VkMMEVYmQ9VV#~#di`)TMLhW=nxr~A(y|jbh znrg1YS5oExcVhD5U3G(>o{)XeyB?xhH%6cB3iZ-9LYJ}#}dsAAi|H_ zuJyA5K5gr+&h7kl))q-~8CCG6dNK=WC7PoMCn9%XF5IJr_#t|y(&j(@#3`GeGrl`Y5ER?~4on%r$G&(KHfPjHT3-jWAMhu8amMn6_W#ZK*H+r- z&bbOXagr*Y65ay!kQ@WmK1|E{Q!Vf7@5;2 z$1j%RDyGf)lLzEYJnNN{*)q`RVss@HGQv!GG!h4eL)gr^x;&IA#0BEXA&>Ag5<`f! z`#MR!r2bPgSbcn8*iPGXgO*2*)pgua-uzi#V1nQw>(}eHkQozFZNTO@`Ho{7Yws)s z*B;~Tyjz$Bnwp3AQA~#Nqwm@@K;NgG9cu}&g#ehU?R*s2YZ zkks$i>;wBjc(+UBE#IM%SJPG*5iR}&RqtYdIDaju7C-PKa4~!4+(W-<|Hzx1AYT&u z-p$3;U8Iu`{`1ct$?uv^XC+?W9JIH8Zn^%W8ZS6y;txLlZ22kH@#p8G_ zFK#O0VmDim&4qgnAb~y8eKYnrdGnJjw!Q=QRBwr%lgnaTpbc&*a|ZP7OO8cgy3MNW zFUf+MqfyRkvv{10bhqzwHN0RCD^j>H&8M@vk;;>1#g?BLmN)u>20~+8k6ByNka+&h z#*^_&iSFN+^{G+ua4K@D=d-qU&<(|0TlTA}ieE}t??y0N;orbhHHSLEKKf=}@<&3H z-2hK2q8;!=?gr@;=z1Kcb`m!!Ld3w z(Q1ML=7VqJqW$@x3`>7HQE%nyMGSAeoZl^BT*jUJ;-S)Y8$BV+$=oL&)Yrxm#LBGB zN~RboFN_w(^0qJ2*VdVu0EKe5aE|1Yz>3A?&CO(62E&0OLb};X!4Lf#RV97pz;e@E zZA4f)&a(F6RXoH*|Lm7h5Lmqn{$t4_xvMbAFRpbqCcN9JW1|2@p4obi!L`;ArPdYo zhuiGpt1e&CQ`yb}ckL*BkBV{K9J&GlXYcl$t@IF6GArN(DERxa zg(InYt%<`8K!q$0{S$(Kg$2Ta3TxNzp&jW?-Fo^aP0UN9S5z$^A>y!oWdhMA6}03p zXbWXm`g485wAT6#6_)#}PJPKGz@EHlTqM}1-I1&vkk?*#v#l0Ucx*cr6w?xTg`=G> zsgRR64=@ThX$bDb*3L;<`Ar&_wX+WDY##0;qUr*dbQuQ9k!$m;hYR3aVDm!E!Jp&% z{6&|bEmd6NE4lwe)ma8b{r2&ikS+;nL8Kd`8$mi2q`RfN8>G7%mhNs)O1fKGy1P5h zZ~xCZFV5^cXBeE}-a9^VeHWIhFp0g+|Cn=tK4f1B5`M^EWU6`2QZ>ofVei1xdl*(z zP2aP^9Iv)`_m4I))^zN90D0U&lBg> zuH-#BVq}0_KZrsBi>n;P2(E(rG94!rkC|+3Jaxqzk14LoJmoauCctcT)n}ua`ja{5 zU=L+t`O^B}>kP38LK^_j*n~dCp4sqtE>RezB-hf{^&22uo-q@p_EGV&3YA|TnTlCq z54QS@GPzfn3V@5oMm|NOvKvOdkckHO=MC1de6jh*;dNiLpvS?WQAbfhC`3;lf&mhO z7{3daGTrUAtyz#Z(>;0D9U4MFx!opf$oE4&iyEQw} zl0~m2?5JOG(M#Q)W`3;!7AaPEj@5yRbB*JJ)K;Bd)T5QB;}B(vlVL&^1p)fGmQxL2 z{qeMA1uXM)wF|+(y#gxiX91R-m#r1VaqRA$ zM1^M3aUR56z{o~~Jc|!l$Rgvxj870dI9N$Pi9WlP&aB~!be&FCEp+EpasUpG>#{Xh z+M(m!$wDLH9p5jgeS3E@u1X2OWmM8~oi3aWq zzyUy*JR43yfzIe|CiP7&L}>K}fghj1Uyl+-&SA&cLOFtehTixP1w}fzJ0;SN=K(2; zG=Wx;?m;uYNxJiV-YP#N^TFJDmb`QvG(^xY*>4!byU<47>) z+f!Pq&-?L6=7loO$KLB_km5pF|I-Q*$AmSz|D;cxGwchxv#!SYxGzoWqY=E296`Fu z>4=?L@o;#s>A7stb9FoX@+Icyp9&4j{v8KZ)H@`8FuA80>_B!^K_yy5SDj z7>HW&)!iH40M^mwoKAi`s^DYFv1#W2?u9fk6pR|)c^_!34*C+xAz7cN9X0Q4jyewY zeXrujO(~U(pW)06lM*pMn^~-Ny_j9`+~Si?Xf&ap^~(`%QJJaLfxlG};*YeBNMSw{ zN-w>OYge_dGbV4<#kY38ayNiHdZ%U+F>5t?qQ#J?pRec;tHO|(dk!QcxQNQI&w#5S znpLd|JeE7fHB?7FpavEs^(_CEHcZ}{oqw5cKXpDpy1Z=pCl{D}^LtrO16b}7ka(al zQ0iaz3{d%CrKAFfqYZQj#;(t`KIk>P0vV;wR(m)!DUV%ExZbc2Zv0ML{??o7&Qtqd zXg-#8p96qX8il!yr47&|tBiej==G4Y?|Z4~+`YdREJ%}8Aafu40Jr3FLrP6o6iOOr zXC~ykrG6sY%7FSM8b}692Nrdi&Dc0j`dIR zs;VLPHi+7K`hLZ}T|-OCKk@Y0Q=gf<^Ba9lrD`lVTzmR#nA3$_jhX&w(*T1M$~TLD ztvNy8ONn8Bq-beS?I;yA2(4RPmK{j9+D5WP6KtY5Bo+K%UUi8*-z=kL`V$W(#~)t8e(6hUy+7gzf*e7kOYR#r=eYY0??gYjQ4cfQCT1iV}Sz zF}<=fyP`u=xgbKS%U$eu)sdQyc*_oo70?d3#)Ta>k2~AT8Z_p6fe*4I&a;XKvN5O8 zGr+D>u0r^6d}K}=5;)f+0iTgznc*l2C^#7`Gd5YiyFcNb1fXoIZds_){@cMjaf zmIn$t8#E5S%R|)JvL>y3hq`Q?p?0_ zvEfyl=Fbqs+B~=eHs~xopnBEpG#Y~NT-Evdcpk3SmKU%~1bIVeiz3sGK@n-$w3-wm zb~<+e|1Z_c$Dfd_MDvC;p$}Uodt) zkx5B3APOiCn7Ef(GbU<3hS2xC+^ymz_gg2?pvDfWd{K+0ml~Xtsi63jh5{UFM>J_N zSW4DFoB=!^f-c;XymwJZ@st<9NBNaJ>U(gzgf@L*D)Eq;3@=_2eq4c7WA^#Ep3yYI zEMC8$zRx^f%&?#|0#Gc@^K@U*#`Yr%ciG#nzBXyL!X`2ycy44JH9f<)^ka@yUwf7s z?PY%Sv6`6A{08f(aUm8kp>1gpRmbT&L%$S%yOHY6i zd}?BTB$&Xw!Hc@Yy&a0+1c5-i$ z3rh+ZF%acTywEW`eWmU=!=a?NWrn&xJrkobgv>P^2fh!7rnmEogBN6TW3A$z!JmA0 z!^-e%@Vbi72XZ1MU2^fB;u^tG2qoyy51Ef8>*=l`D=!e4*=GgJHhUwbO{?A#Or)6b zRPW4;m#?&)Kz9b>vz3HH`I<4RE9iV{1?U`Cq)aMbk6;Pkd@dWdfa(ijvTn0u59UDI zDF&K5z(AU;LPYHqLF$9Y%FDc&!rqt94P5&XIRGnavM>J`^`E#{$|d;r`Nzk>pm?Cx zQcyf!ESI5*t|5_a-RXXrz3J-k90KSK@MnV3>!^h06sSnRD zXTBIm74`Ck_~io&(KcexLAijiRP5kJ@@2Y*P6bAQHc`JgiLphKu?ToD|4kyu1hS;& z6VBF_xyJU|pKp%peX+qlyNmZ*3{>Vk#T4VBQX<)z#vanE7G-~>zFi+pfP_0T&8|7U zmPa`9sY&yjSt-q%j4r3Yx7`>S1h~`KGd@j9rE9LmuU|`f{e&|o2EES@HmNT23k<#Q zV9dCu{AGw1BLa=2wneFIi8o}kNYf(rlN5aXXP!G8ntf@9LdXgF>VmUv?28Vs{>657 ze~})%ZT+YcX`^Q9VHJwk;%w@q*KaF5?`^2+2~uk53QwLPN!?ok`F;!A_!KmQ12u#E zzj%mK36<#w(GN$u*gEQD$>5rM{lz}tuNu*>3$;d@2{tp+b4oxz#D=#M7VUa^#d5eV z{`jxe&$gGcCNup;)>2MfsR2g-v3jo_t=%!VY`Io$MHT3m5q?W3)uRD zlEAX;7_i#ZHORZj{!rj9#(%AL$PU!cfA?>guz59|%Qw>3>uk#4Evq;rx7JQKSeu4c zg5vIfF>Vp<1^FEyl`UH?@Vv%e*;@gmJCA`ngb?zX&)sjnwA0gDEu3$wH&w3Ab)xYx z#a(c)fY~s-abh)cS;W;&D-c)7b0;xMmkmh4En8=p4=ta^l{FTDM8T#9U=5eWx}uN* z0n~%_D>}+?{zo88sM&XjIS6z7|5yMyXn`U_1yTv!z&nVLMp_lR^MZ55uBYapD5J5n}S;%W{f?u&inUlQ{uY z^(DhB)4mT^8z(s9DK+0(uZ9-Gianx};e?^#ijS{uc>N$s1mimhI z(}f9cNb8OLHzjr)`2h`hT!Y0Ut|3JrX!wp2WnyMTDDZ$jH!RcEQ(&$CM?3g|PADpd zaT__Kj*)a$VpX)&@7=^xkxzSc-_-KA5N?>uK96MKIpS@QdvrWw4B9^{ydk}zuKGMl zdlqhX3_=XTR~R3wMVnq3WR8!Mhb+Y#JOES6}1#gZmYOa;;vupu)+ zI37>upl>T+^UpI($Cy_XH}w2_uZg>bF`;XSdl%^zDUo6S?izZ7j@M)Vll1bV3vQHQ zkE!OT^vCs*X{d!UNLb}ZA&@j;%n1wMzw)F zlOzNYlqPhA2efE11GoK8`>v$ z;MQF2WZ;fW*$e(4`=rRAmwES!hui4PAmxmA;KrS2p~WNn8Pv)|&n|QZnY& zaq_epyg%qNI1`NN&rEv}7-e+ZclRnXgS{pgwqzhgsBePiK+@(tl+L(JB4vb>PtVhM zkRk7#TkJ!8BsFKWXdcMXRI}Yin&qDC)~v|anZGQ#CL@FXCq$Mu1h2SB@ay|C45_(_Y;YfD(|UU zpEiEg(vOsuWn|W^+Q#$`p7#N%#ssxLh_|iVQl?3Vi);^*>HM>Ss?Wu?c3OV`cVFk($vH5yI+M*? znfoF2rss%@89Y836z}hhh7i#V+pf^8TdpXuT_|DE0Ll=`K+Y*76~SbijU#QBMOo`Q;Eh<_!T{E~>=N~Fe_05(TsSviW#tkS7#mfZ zc$O^j{eVrKrqwB(FWLJV=>fmrIyBd4F+fI1R2E~>?x_(B}j^fma5;+LKLVQtKnQ4+IM<&mH1vxY_ zMTlatAn1@rp@=#L^ecxNK|ii!hz{OAsXF{1=gGWRWZ3*XcW!AQy)^{ijL=8av{C2T zmwDorhc61updH-w!6g6DxGSoUWToA{VldW3K%k4k`L5RES+7XLV;4jrq>GOH=;_k}LAb%f}Y@ zimL5N7YwWAUW<^9V8H3=7X8hiRMnaNi|FWw)Rn3#8(M(~62`dVCmZL#{)X;A$RcP0 zDEl2Y1@bWe0B9^_iG?}$Kq0dteje{zxFCexzN4TGx=e}EcSoUIcOU^YS!;XZn78656T#KRqwkbHXoyqW>yG$@C6T;wKtEh!9E`T}aeDU$!}1 zd~^H~S$#p+r(R&FoXJM7K-G74zMZer(Hj z45z_T+q0{i-dI-g4<1?P`O-C+H%{d!;AmA*9BOcp$jJ(dUv-<#Dt6YpN}mseK%-7> zRvAbra%1m}#E4yAuK;*uHgD@Mek~E_FLP+!!^#kb^wZW_R{d|xERQLG zgTTDvefRICS2N}>-rDmn7rpyubYNKvUOoNvOv;m@xBIK2R@ie&P#QKr5Z++s#c^F^BSlS5cfdxYB=-aTkE9sDCIf-fbjimCvZxX z1&=ofLLU1K)a%}wG#5(xK}bRc4?Wi%Pb7%0tIiVLAi;-fNnfUyDIS}Zd#B_-Evb?9 z1$$cMecuU42F1 z@jn{@AG;B_Nxw5f2&G_Bp=QvScLMK@L7njXWsN3TNBD)MF4F1RXmK zzK{GCde!A{Z0U7`%gUs8(<^aJPf&!^2J*d zKmP`@I>l#)dsSWmrt^^UEE93-j0EQXSrAr`|C-a&S*PzhYUB^3KSnVEoOY>>Y!FQ- zL`L%R3QCdS*HS>olm;Ha>fq}3eVO-78Y^6@em&D-o?KH+1fsY6)@$`ZIoplvXvu32 z%dyFvkmme9%YU{V!Kb+X1ed<>kq~g9hOC?aC8Lv_nMdyRUD3P(@n7XKjD8U$Y=Og) z{O&MnusV+^Xj_&-^-$ov3l_ZcJ>vs;R1<;Kj%Rb>R}Y%rWD5u&i@uRozwIK*wqpD1 zaTE~q-yP4&Xj(TEGbV>H$Y9@e6M-7{FeX9U3uHTP)G-Lc68aeL)P~EfOYBR-PXZi4 z89g6eS3lm_cvCPQYzB;fb@n=lHQouK6uM2+?!~pHg!>~cXhD22{8C5aV`fRBS`QUe zMTyJ!V4gfI#5)GwQIngGPnnKm?1yg47PQYwy+Gq}x@Tqn_&7b|wqy~~OARnv`;;zC zJ|1V-uu7cer{HC^T6nTwl`kO64Y^-So|frvCR7qC-Nt^H?5E*G}Kk9-mNj#qjwHh2MOq3o6 zrJ8nqj+bB5l2x$&9ZfS$z@=B=0UZ_f>PgJ3iIW1U3S6QOMHRK;shrMHX1%A@2W;{@ zxQ8g@<(w^wx$nSL9D=DDz5>opCAa`*8tE$%ai~U|CimiyijPDFl{1@^6%4Mmik2WSl23DqTUsBWMm77 z9^YyH%(-@4Wu152wO1!-7`BK%SB@rsRRPLhcm-_A!)<^@z+3ZCh2UO5`!z%5%S+{d zQbZ+jPUXW5p7zZ(KSGOR>{GZOaG8J<7=wW&+caRHO8_(jDj$BYy)b3*O*7byIPO0} zGMCYUkNeVBnV@bSN_u2@prl}T{s%z~{S5##azpB_N&KPhDl~so>wltoHD#KhR5{dg z;7Ny$4?~y%;G-3Q#28i7ujCG7wd6L+%B#W<{^;QWkH%XThSRLhxnmv!pe_dj>7zzz zbuq}mMYX7?u1tqVQVk8HewQ8MePK~HjH^u)J~m5OkKjEXs@^930waTi8>TA5KRu9K zV3y#=4in0`h1>AifYIa6)vv4_c4;!(FX<^*EBD(G6!9s<-cwzZX?h>BS#xD1#4xaC zU?|Q0A^^c+D|(H}j%Fk@nkQ&EpT+wMD*eV7bhGCBr`9}j;ihovwXSqFUe4gDHs<&g zouR3_#||`5#|&cw&{Wjsg~?rdKO66z*h}M; zQ7NOw8lIdFvP|%WdlcaZ6pD*d9%SgNO>C^!1Azm-oA{0z<5Vu|oUW#DasGbnYTc^I zt7i{_2-s(w-{~M)VJU$Qv8RfnoDsg$eYVu`Z(;neGA>)on=L*Ib!=)5Gxy2)l*D9htMzV{+M!}hlJ+d@>TXlM>}&qavz-s=7-~ZI3(`%9QWwK{eO{9!<#UW*JrN-89fVp&Up7RaMh|{mCgRR z!6{)$nr?oN2M>L8!1_x7$3ljXug|AkEx2dh8QY#OokAZVrs@l%>qPoE88SSxKmYPq zRg005-Tl)r>>V-vJGFNeKyjui*aKiyEN`(?M+Ra2?6U}}LRU5=5)Jng@P|-MHkwQ@ z1n>2i#A~QsJVp0__B|?*mBNdio?7X)-%v?^@ov$ib% zw_ulnp)x+h5qO>NJ1Jh@`@9@EcFWbe)!A)`+9wSy)QIzIyWMI!c8{jjZ{|vGeXk`y zC=4vpP}oEt#SmkGQq7BN(mnr1JyhJ}WO3-kCoJ~vqRkp=9+h zZcrrv9i#v4BfGQ;eoPNIlhMQ1Wz~JrUPq#cVPI8i<0l30Un2M{VpNlN9D8UmOB|~- zi1d#K(IRUNekock%?`o3l-A9#+Owo4>b?|n;h^4c82Lo_3{3YcsBEU{oj9ntcmI1x zu?SomyMMWkc=dhYt?T`qht8rIB}{CQ?Y(%D+T<0bD0$0^+6T|yKeH#%L9PVmtfj2- zQw>wV$0tM~UpV(I+01IHpEjkXom*uGKf{U|5ml&c4l_FKTQbED=y|4*?Zap(8hV*c z=SD@JqR~64Pxb9{CR_OzqUc?HCTZw1`Vjtb`F-F)_PkX!@(;1!#>Gl{t^Lk=CQp6O zHNni*a6(i!;YF8>p`1b30*A*E8^NIM|2pIVr*rH!`3E6jMQYK-S~dPvl`~C(gufHg zfI$ENj-xw`6tEajowazX_m@zU>x;xUztX%b*_Hw7N;0dH+W4i{;pM6NM8Y1$hTmR`hB+{D&%3skVtouZewJF1|k4)+F_{`1w_KdRS<0TYR zE_;dy5_-~J&xG|^0j75{R&^?@Z>q! z$>F1|WYkT`q^6szSzkdDtz3TeI^w7OL{BuusWNh9{ zia+SekTk&1prF4-vNW{30VSW+QJwshy#EgLHJG~@ih25`kqht85(i%1%zk2Yhq&=e zgYP|sQe*!t$bGu0ALX_C3QjQT3+c6?{A`fhB&>B-6la@>&yeca!#R$5k?D{E{Z+Y{TS_ru)%=HBP2m~B!Kq&YS8RdDV3RqA0geQ^8l%*t;@7aKb~@Ey3#4;zpOI+We^9(m`>Mxl+**USAk`PclG=9@gr5x)` z*}{N~w>h#n1ai?DpPqH{9j2iCv9@)9O0%;71!2(IFF;E^q`DUXve2-t_=g|7Cn~vI z*r6yZDQpo2KfCk6kH)DAUZqXE>kcY}6Qx@H+5`7!oI}x0IJPW7uP{xchFMGyS&RU* z6(1(J(3s(3hVg*x%Q=ueRR5QJG(J8KB-r4elzC8CBE zU@0p^L&ZfnIO#IDY;WId?Z5jUP|PyXIKZR8a? z^_<5it9$P6g#*IQc&(S5t_0H*pyf9ch6e4nJ{PqP&#cAM!x|5Iovfmg$b1G^pve*-1T>vbDph%QsQ z8cBlH-+0U;n81X#Yaey(VCiKZ)`$5qXf9i8de&jMt* zv}EezHL-y&v;;H32;wHN!;iM7w-kB*@>t|M{IO(+Ep~29&9~+S_vp9_b+*GV{ zL8U)e?=TXtpID5%Da(Vi&s1)P?O=i2Opj85#-)oxV@@#;=4xEUjP_VsYP9qW!nELc zHvEmA;vYZ9vCNdSE%dr2)D0B2pXa!~Z(8C0vS1OGV#~>C2(oBaEa{>X4d^!&TIR+Z z1c+J0Ol{}g@NCNx{q<23*B*sQEH)=l()R+ZYU8QbY?QE+H&v^(QX|5EPj(cFAU$lM z(P$Cnz4%z`z47ec;=1S{({`u8z2HbAo5lGz+br9lzk&Al+vPiyi)nKZ-4#V0s^4JV z2^a-m;Gx>kynv2~iy**Vq9xtqMr3( zL#4n|;4tx2Z?R9}B7R#O>g`yH-JwI=C=LB#zN$|p2~YTSDbJujdi4)W4lS*^*xaV= zB4-WpCD6oYg81T&`;oTuu;iw`>^T_D;XCbNQLW2iXW|H9$%k~rZ?s3;NsR8t_56BA zhbxtVIL4- z8&6{!@k?BUPoJpstVt+sHAaJcP=CWq!h0MProe*{DTQEyQ~eWB6H&iey36x%uzdHA z{}F6--RBy5dV2b+tfz!CmY5Q8>-SGy#3czU8}Js5&fb3`R*(O_>=4=H z1hN{BRR0!Jj94d;UYhC_4V_dEyNf3KfHmBoJA7~^kY8&a=9Z>-EdJ_3oXfj9?AFb#<{(4A82yz@@Z2<0f6UqSou=vD zJDXB-2r&^M{3im(Br5gV+Rg}LGYc6)lcciulU*FGbG10}p?TibD1Fa&WWKpDagHr*`EQ2#RFE7P?eys0`UQ!J!ZE z>7z)`@3|kg3kmq`D?hfsRp@oD@{4!w>D?pM-%9viOKbqS1uOTS6`8k}wYL+Ym3vgT zZF16s4MajD^(HF~2#lhv=UL&oeeFpA!KJ+SE4y9P08OLonQlt=9-7eeh0lrY29io7 z0seyI@yHz!*ljm#3-B-2dU_j(h`0H0b=%$U9Fkid_kv9Yjx=EB@HUjsfcoXHvi15@ zPd^TbWRN^B%(WuA+-!UI@X{V~X!8RxlmfYL#9=B=EFeDFKmKbeGsq?YUYt(hG7qnP zuxD&m2`Sq?@X1PJ_y-e*#)-Y+4lR|i zq<%XX$j!tTAh8ocvSNwN0)~&aZaqZZz!39S-6tJjnETARKsyyGD;X?hC4J%@-WtYH z5+pB)h8-W#IW_a5rsv)0Yl@}kdnBGsG)RC5v?z=_)Z!7$KEZWu4S{?d#%yOhw$B%5 zX+YSHKm+0|xLDxC+&he*&b~YJ0!pU6&iBpd4gHt%(VRz*iso-WEZqGgHta5BJKSaJ z+cAd?u5rK;RH2$bv0Z8yBIlkZ+HS`06mlT&K{L()aB%CX@Y3ItKJq$sy>?Y10~-l5 zBP&p#7P#H8e|tIVd42hZ+h5q@TXEUI*DCi8tS{U*IEW&eCC$$AJ8S0cy?)M?s4hI; z{j|%mgb}y6WO!r~{)?GXDmkw;!8;VftIc16xnDw>w$0?nTI3AjjEjIvTfhF4N-=*X zkwXj9-puwqTdb?;f}#2Rr)-hF3g}YX&Z&o zlhO(7EoiB@(tG*`bDNvqz>;4&T@;HZ{IsM!mU^5<47kQkiNB02&sRrp)!QZI0>sdy zTBhe7UU22Sd&r;B({j}xgO&o#m}UAeyaPGrQ?}X^nS-;f8JhQa7<1)cT;V)H4$Ff9 zt~!cp#L_-Zdm+O7AN=DnBos730=>2PE+{0(UO4@3eK+LQ{S+u2+`12=Rilk42J=sW zEOTu?`_)MZ9I~Cpj=#J&$rE^}rvZYNU1;aqU(<~H7qp|%(e(B2*Aax&|Ba&Pp8q`W zWA>?CIxAP?gn}j>TvrB1u)dUnye-m>gO4F4>r9Ws()RvG(REs>$m^ZJ6gR<2?bfi28Fe$BleKRseSKAaE^5O*`QUKN#-rz$Y^1L8U>CPtV$R}x~mGPGLT{CMF4FRiKepzn_* za(BVkq78r4dX;tNRZJGj@;>L?R}a z55J7fRNc{ADWFKF-kPkndi(SK$vSr>j9q-du9*3e+1gM#B@o2<#FQPxI#eJ>o6Y>) zbH=%yV_ee@^}t6-qv1w+9y7+GpC2bFm+?4?w0(BJ*~?o@=HZ&0=Y|kdPdY(+hiYoI zgj}fcmRJZow|HG*nBEr4?HLtG5{7Y@fnPm!gk*Ei`kiWxxv)WpbdZw&`5nXQTen*m6W!8G9&zefXB46?PAuLD9UjT zTdlZ{1I(GbhQ)+}3mDT!K79@hDp5lm`Gc)LhD<`izjKQnv9p@pZMw4d@^6g;ZDg9% zee(sts}Q_CR=;Wfvxs7?jZ>3MDwWSqdp)XAlX64y2ChXm`)RBTo+ZB~6_*BuhZH>w zqPMK0zU2g-z4Nj$cweaoNgvzA`IAK<8wEg1$D-(W7!Sqk%Fx6>XPX=Zz#206_J@Jz zoztq_HdHPC zfuRJ7>Ybi-DQXmvSZfCG-|t_3nu-ckLvP>U(fk?lI~$GSkLv`S`iB78LHKoOm`7>>ycKP@IYhaC0Zt@ph9|uj8*s?&Ut58WdpDCzO}Wxs!@TIuZQM| zyq6R;vYqYZU@Hwb*r+=&%tqT;w9>mac>}H0obMJT=bO05g<^3~E1iYX)sDdMUo!*L zS#I(Jy!FH?mqapA59=|)OBa7oA)wD}pfpD&pNE6TE2%L0`yyYm3GC z^NO8+`3`-x(QGiC`n%_qSNLo(^1j@s{nsrj%wG#fGkw&#bWPBccu~KyW!9><8+n%s z=WGvsR`J#6b{4{7BTeecC$@17Dp~w9Ve(c1#OglJFueH;K@z#kb3gHH_xxk$F4P?X zfv1;_-Bm%ib%lLSj2h9oU4(&cspJ^LLmb;H!NwWB_ZwGo$rjtwMJ2bQX)1)u&$SAXSkl? zCy6$7(`Vh=0?y~Qf=+H#xAQ5}Hr+V%QreXew`{vL7tN9=C3#J(*W(E$`^kdUXIaZ^ z_+j`Q7Z}Q&#%U#cTXwd*!nT_wVA{IQJ#giFfo$Ug1hrX2ALe$$X8*Lb&U=9Y%QmTWZ7e8?!sW(7AcX#M%1rP<8zdu2W? z#}XauyI_<#+WjzlRH5^5%H@o>YvMJM%Mzi?O~$bD?`RbrT%L0ttj=5Ob$M8wAuwvE?BvzF*JCr4Z7!xST4>v6>;_ZlS>79VfZaA<^d=+j z{LBs(ufabmyxN^f3$9Yj?@&B*_2`*z#gw8V6V;qnja~)ClVLv`2TWwU>EO@s|Lxy! zoPJ?V{)tsl+*;=BJki;G7r}ZHjyOMYu&9%}`tnN;TPA@* zpixBUgr)UBxg4<4i1vLk^BzcF9>`Gjzb+&J<57l4AEA`Vm{EdD8e+ZTNmWG^ACX0` zhSfU0K!nk=`R^t#j!Zt>N^ZVJ3hTMJ@kCbvjoK__n5z}q!wzf;-{%xgP0>&*;l&9R zM6RL0h{EjQ}9t=xIXY9uOpGEKGHp+>%^Q%!cu1D1DciK z*^qNk@wy~4BxU6W@V#^#QNbb3 zkG=cC9*rpJ6^asg8q;MA<4snYz6#9SkDUlYy&t#g@^C&mYYro}-LBOSx-C8LuTS_~Tl{U?T4!Ad%tQ>=nb_VX zlsv`~td9B|z;hEEs0TOcdKnas5gw$hO7^s9{br5KiBw!Ku1s%woHSm+5pq-y$>Xs| zafA{E?ft3#xzZr1I$Z6oP_I)|@9JhAA)a;lSL+gHuV4&{Z?2{RlvGl;V2AxQVy0M@4=-ruk zP%ZN1pa?NFrC->Mr8nMcniyRE2xJ?xPC07xoUdYe44g8U-VOuXMXiXsRU^(yI`;{R z6=!)QO&0H4ML}V?QRG)B?KO7u!NAB?3i9BU=>)nGT$Lyjl zdqK}|UQ-60n5J78TMzFBQg15O=sk=a+-&N|2x_>m6hc+rgS*%Fgy!m1Ev9qy4hg`8 zb3l72+JVj%#>lT~QqLsox8JbPsLa64;wKN@ew_V}xKuW;&#fDzuNQT8)A5uaq*{5g z2=Oc8JXwU1?t)Lb#>2HGw!T2vFBKm?8@X< zZOifA7?Y0t!W~P!r}RA&Df5@OjBz5`lVim%->YUX>L=pfC7v6z!#{NJKSCseB0M`9 z;Jy55McMb#0GitiNQ~M1;(~whOnx1cq9&Hg`Q``u<3W z+eTvAoIq!&2X~gnzknB^%E94q5BZi@0^btoL+8ahlZ`TFy$@9nuNd8Pj2S)hY~#^H z%Lbs((QEES{-o{G&|8UjTP(f|)1S9coOGXSF)^$O5JrqPF+cJ&m`sK_qMQe@j!iv& zIaM0z8c8)SlZ)~insd_t&jn_8x4fM7MQ@t=7i)jk)r{Tn&K@KFMiIbYU!ZmS~XJa;hME(1fTI@3D&ZQT%)Wnr3#H%%=0e#B|uOy*H|P3 z*D{OE1MX`8NDSUOOXf&P>G7~9zqKs4R5JQ;HZ!0lEi0KNL&155(dplUy^+YoD=fI& zcx>sfK~hlba=U5sPGIUb(ie9^kMW8{gG_?L7h2z|pZWqKvL|)aJaY?p`5)+$CA#`h z57iJkox@_ywW*T=tL2f< z<1o_}(YuZ>&bGYSBXIa(L$e!78xrhSjjBt*2)z8iiuaEk12wm-vbRv)O~qW%U@&6^ z&C;!k4u@dP)86{aBG*ltYqOn_|7{-sc!z!SnXu6f|MrbZLJ~HB!vO=H#(gS67bg@AqojEHc%aH{_y(Z zbW{3ZKNj<#D&t$t(LZJim~9-J7b|3o-kOi}Tv>crO9V8){P~0XrL8IS#Pd7wdnU-w z!3wiu+_2OCH=_blfB(U-krx43r6+nuUX~Vd{vBeRTow=8PDE56UK5SAZ1-^G< z=UvM5`mLN`e=PIOg;F-i_b$ypo@|nygc@G}kTqzI?ksxtBPEd*$ZEK3;TMig|2lJwAYm}EI+_5 z(YRaZ+Y}Gje?^i%*d8L2OxV=e4;z?$ro34;=G~;v#)#1B6}hyF7Z8-=F;%aWa-`V* znzWAX(=vN1`&S8+|EO@Vs(t8iV%*|>itZ^rZeZ*vdv9r`ZCNVs{terfS3JYsC)QvT zHnywXYdCR8-ZYZbg3smj3v&XRLEmuF?T&fGk=q&Ad!y!G4~547%DXP3kvT>ZLlovj zfR*AIFW1(5FVcU(U)zeVxrq26gE$`~fpYYb`+Ym@?eU_ey`1a9lVgZy!Fw#LCV?CN ze{G5jYYVko+A$ZXm^FEoN6a2e5(v|<&SgXd>q=mn>K4<#^Zf19lW}I}!XT7>FB|NV zLA>+WQ-7Y4;FoX`b4mP=`<|Il*xTr$@1B_5{%_lAO>2 zd8m>fpM4vY6nFhNltNy+c{?}BeDsM(vfFoo={e%1B_dsEFU4J5IN}W-g-tnBOh0Z6 zfd-zole4$+C@dz5H^*=6z!mOJ-!C3QHnQ1tc5Xo1WfI=t1mGKwaZ4ZKz+F)0CM!LJZ%lE;3{^y*r_xbRSXAJOzpBDF;&%EcnuHS_! z#tIj!$p)^Qde(W=Zl}QV<-@U~bN0u%vFmmpGMbWoA~POQDZK8j7?r-Q-rCr~vC>0PxXiSo*)4_fDWx8>4o39hR>t+LEy z*Q%7=^r|)?@}}{{qI_B9O*5yS3wAL|7Ba(YdcLoRgd-aU|hFV@)fjMji5o>r>}MD{ex zu7$3~Q{@24?Xay7Q>F!DMWrYs<0SuFTD`u7y7Py?S02|0T(ozT=iJaek3K9&bL`IO2K=HrKmuE?q`n?p-v3)q5S*b2|5b z1oJV%mZAU@`D`=e{e(Y=nPJR1pCeN4 zxjov$%4}0TPDrGRpB}q{bg|h>iP!4L1u@X4dUc)*hRjsHe*L-#j-G}GQfp$|zSwhI zGUf@3NvJCMmckP+_0CQbDSe|^4dxj>xmO|U?tROI<-zd7lg?_b$O~B67qI6oustjH z+JQ##j-{!hVgk?W>awz$A^LJzjy)nyor$Ya*S!a=Bh@&XR)ZAUC#qeOz zVIibLiLQyfO${d&H_p)&K=1#;K`3emz`)mO&TfOZRuev>EmxhYFj#na4MZTyfjnl+ zUeVM7-=y6Vg@B)_%La9mmVgSU>W(;H7f~)iF3NdBAGFZ{xQOf70Wa^!!HD{!-L zFsVp?>i3>S97zhXgEu2w4~7q|>NzhnQtycWUcj}f3k?(9eR=Pcs~Z11r(30ud0=uO znq4EGp5v^W@_qx0G1Y{ear47`7wZvv~Oj?MVq&4u9|P=x?qgv&84llnfqs@wtbA5xtKBKQ{)-@9kPHo9|)(D&z` zth65;Lz>}Uz%hb^Pu6}3Rt#W_FPua2u)^dT6<<;-ALKZ?%yY}m-#Sz|Odtw;&KN`4 zQs(Z0R5NJ3S7^o$7SKnmJNjT4VDAmT7g}as>=eY@R$p> z@9r2uu3+1kCLV|xKNS4o5-1W2_V+ubAQZALhhyA$Nouuj)U9t$b!a9(LY=IXW0S1R zd$;?j)y%=C_h3=9-#^qE^B(%g<<`Ze5ddF=2?U^~Q0&B4L4QcwSpF1M9Hq5FGbb>| zxy?YIJ0&W07j^Azxm;G^o=tX4FmgB~My~KPXpCM@meUbD}zMpas z#i78Jy}))NfhyEu&4Iqm<^@xpnfCoOU^X~W^ST7&_SW0f$Nn$S2>k#R&8N84yiq{6 z)zk6v%;>|hGI4R^S%0jN9oB|s6jQWcI?F={+~oTK@r~^Buq$An5_myB@EHp6vZI5kfFE)=rhmjc(@gCzB`IO=U+296cnu%`VMR zJ$1x=i)Yn@1Ez6{i!Lt)EJzDHN-60J{nXwSw3-$Ouit;ZgYhO$qcgQtU>+w*wVLSB z)QmfugLz+u1&;2;9M0D?p6F2i0U-bQp!9Wq?r*oK16*gY$R)0is>Ah z{bbXjh#yYhs0tS)tKa5CG;)jS!GG~A(%WL5(4fjVVH%)%x%3f>8C%7+Bj>l1RC3pO z1=XRBd(l)Rqv!6A=Hc)&{Y(Y zeeS7{t)`2dL2qfIUhceJat3jhcQh}ACw5}oy9jcHew_u(FI$6G8^`PD>RhHRCABFr zN*(6^VF4VB5mv^rsgGrWSz*_%?JJaE?e00{@o63;tA0j?3%Fm3SNP#Y|#I@^Hdk!FT$!C(VO}nl{e`_oB*WAu5ZNt1pqG$O@ zN=T`Egfd}Oc8mAGP*1IF)=Woav>_hSD0_!G^j5t??7l+GiXq!H`1mymQ=YQy2eVue zInqTezf9KcvHaOyoP+0)oC($orS{{Wsp_GWXS5ifXdZBu8&pQj`fPzwhKBdHT!lw? z#xXE)U#8E`A`M%^0-_Z$pr=9xB@a`3imSqmAiU-Dd7VtPjP#Tfv=YFtiD5VKz?73&K1 z!OD<9N9mY2OSbKe(hvUHXgv68Vt0U(C5>*}ayygTydbwI!I@XAsEf+tT^O{n|6YEZ zJ6kMvjg77|c1o?tBh#_psVlJ`7b7_{O{PDpdX~tFZ>}Xcf0ul2vnjeqmGUr8RIze) zKl03&WxY_!WzB^J79Ub&6Lbvpc|*L@v(l!&gyzi-l5j|_{eI+ZUa3)0D0WFA zjTOCDm19JQRbXNiOzD``-p9nzUx`Ci;Ewaov|e{^IwhZX>+{qm&rjTKDRE!VjgZwc zUOmc}kr|gD?n(CUpO2HFKjQch5W0AFdzy^XB#W`G+Y8&|P5Ln6H-XA{An@&Ie{o5d*;eP&(SP<`qPB#p^lcll^(X4md&sSDlE3>4a;3hn9&_WDX#V74}$rT(aN>6HWm1T!IND!>4sMz=r>%DKK1|P8Zm|G~7F% zvRlHVUK24H7=ZM=m!)3N3OL+qvE36aXh2>|b+!#W1wOA)4f&wQz*Ua|Eg{%ul=)vv4RnjH;mX5y^&{#4p*q4w9Y zPmU54$Mier8}6&D0P!dLe&SvRgxE|(%2V^G-G|pmJXWmCB{$(5wCvh5dmi9Z9i?#Ywk6y(vYcMRy+d;eh$2|KX{=whWc>in)US45x&}`h`_KK95psgxm z#=n6DFV}2P?{X<(iHA|%mw~X%$Zal&DJ=^9gN4j#%CF{TnBoYgtKKFN#$t+_0OIBP z@^Gz6&gxH8>V?ux3n|Y}kZmD2L_L!?kwuqm5GFu9LdzBRK8~*)| z`&fwU+QQjG`@CNh4bK^>EAy~kA(_c1lQ~LEdU2$?t$Hx2w0Gx)r3VOC!YaMtrq1sm z7w4*5Bwgff{s~soU(nlEc%4dlSid|^i4&|-8*)b*)$DEvQKt{Adi9f?fny`M0oaF4{z@~43BMJoVai{CGB`VER9-BkdyefE z@SB=P!#corDHvNWcXPU7@4PN>160D2Skd9NWYl78#1V;`4l>ZNw}1qt$O@Y4jr)1y zE4Mf)*2IZcS&KiCxlDQSarGmdTyGQ-cI($wH~-^eZ`@r;q01Z6u@mmCCA*E=!LO37 zm=e{Pnc)SSfc50~a*6ai>lR4&x7b;FICj&R835$NB2sD>)w>&ybDNo$f&pJq+M!@= zx983NAuyq)Y{HadaiDRi+j{phui>bc5mNf}2Fu8gtF*#N;@4}7-(;?j7VfYF9YjMC z-dXS9-b$|eUSxSOQUcl!Mx*f@&6DD^p0n1RY57CIc8!X;inq_)U+rFI(_C-Yee9zZ z*1J?5DM<^O0+6iA)5qeuUW<>s0u+kZ$<_26n?=({MH7Z4Yt!{6OH1ox#B6SLvE(~K zW^z{DdX!PkjI$nnc#jKVjmRB;c9#gbSU)uqas}h2W1JACZv{;>d%#302|+X|WDm%Y z%?;JXnb%#o#dxoNaBzY^+62oLdX(sk7wgV0^|OShhH&_+Pg^~7lq`ic(^DBOBf%bi z9s6ZUdVXUGHZ*mhmvD&HiDsxvoEt18MtX*)67ifg0T_wq+qBDF#$a0p(xH}(JJB^5 zJk|^G1YZ#PRjS2H@@{Oqza0Umc**)J(qiKj_1o{3m!rA|G!FkFyM64&hW8f#(fWP2 zYdF^`=ep^G6DdsAM9t^BG3NfR6G&I$!>3Jkv2ScR=suWXTjuC>2RdhPIV(6NA7tka z*mXxl^q z?)IYQ9~SaY`rmV}C&drNk0yZOT0oTRZ7nQ!}8bs=q-SWR*SNG`(t-NUI$Q% zJ$I@xPej6r^pR7{iGx9~`q~ka9j>{g`e<(P-2&YY0aBdYsF>gY3+5I}}dc9Lzh#-Tf(U3XRWi3F) zyheEH6V9R zS@#?z`W!k?>iKy0q;MRSx*Y7jbehp~up6wRvS)M>x z@k4Tb@hI)v4H|T7zW+Wmu!;1=iSJY!h%G{X^|{cVwsrCA^7`o+SE%7dn|BZmiGc7K zXQw+BFJ=WvQ4&HnZ5`e=|xG_U)gg%)Y8#a1A-B&GSG- z7Yvq9xspsiYIr}L(xtkT${!ONB*o?yA3*Nu7m?`qK>L85DCA7alN7@dd%8rv*@~Qk zjl>e*OdP(C=z_j~1$Z@T>ay)HAe%_P-oPIfYv>sG_>Zkg^V7uEj*O8xY3NN8L4|VW z_9~=rV64GJ$85Nb#}%A0xTKY&e$NGrD<0+wuEzA-fFUq=5HopAGo0>pxNHJo8kL+njUP+3MSciXe z!f@)6bqv+SR@rq@+-;H_BA@nX&+zo$cwnDDk{n{*a~=5u(DImCv3!`TCAESc%l^6| z7QdJ_DTHFI(zI`rfN<+WFLp$Tj<46qJ=6Ndc9dc7s!?I=M`$~ZhB6{oXq@d$g3!p# zl6a9)+ zEC+gbj`Qrtv#Rdg(`OOoF#Jl8!ioOsu2{;$D+%jAC?gSoxTYwxyg!s2ox6JU}lu3H`0_iGgFL1qK z*9YED8PnA=UAaA~>JM$&6{g9)3HBr>r-&Tv29MlM4W{~g3 z#PqMLcK6^imV}JcGto-jy4X-U#$k?&Ps&b02Y2{y?#C(V_15##3+O%w=%OL$YrE&a zi(*%CyA3wcy~E%7u2y9pyuU=NhP2X-fQA#uMd~Z0_tBN0`*MfRJBbU?b%nmnPOm1g z#;p~AcdYmTGJ0fhRHqXM#zzbG6B;ViL%E^=y?NEs1nW-mxEy>}8yW84CqctCkGGsVgXJ!sy7(0?}*b_s-wldZId)a=rLhcErW9Y*aK zlH|T8YcMs)em{s{onl%d%a@9@OX&!6<^6ZAiNG)9)N2~!fHMuG>r&l?=^he{9zNY4 zUysO)0bQI-_`i&w16?derS@&Y*0QDJMy%V_zHO&EknK4yg_z1G&C>wpeKjYoywux4 zvw(t<=6ZH34O>LHy?kc83dHjN@koUOTZ{3OAYey-wR8DhW|P?&9j6nc$NwQvtZ`~x z!EokK(A&YSkPMVVX3lLS6-P$LWvl--GdDD`i<4ycLe=QIIEdtxQA;?u1wCd=T%)&V z=Lai^KotWo$rWEV-@sX=Sy5Xps!@X4$-6MzQ;>u?d$&5Ol-|#)3=w>H{{`+;*YP?_ zdFlLhZ4#&k2Wd4Riar9|=rTP)DSK*0WaH#7he{ovBrx%RIT=_C$rokID_f9CY?~E^l8A9(-ADQrp z%%s@MU!$dcK;T(}eY{H#7>1FpbUB-zkt46B@Lhokf1?W~j3~$Vpt&Ku`H66HfT!!t zh+uDr?TD`Q&5BfvtakZ}*HlWdNaq60ScBZ%=yq7GM~U?)4*dYk2tHy~6|My!9PvtC z_5t){Ahg-Qv^Cib_G^WvX(s9jrElPJEX$`nA`$z=Gm~9wiLP&Y_wIRTo#gMJd!P)1e`owx3rk*>WQNm2=~*-?e*p1%ynZf(fX+>Jv$VEEZ0>; zXV@PO*EY0vfD*gOl01sM5wyyatIVvoFXhfmLFA6h0q(T7*^en-K#!iBP{}s==Rf-| z7utV*2Gjksf2h3zZn|UprE>KA&nae9t4LxuJ|r&Fol4TC(i^*n>@XHeFa~=vG4kST zH*DWbsFwPgP4G`{8AIeO{V$pNX_-KS7%=$qeVnD9+Pmbl(Jk~7 zeD^Bxx2U3-`{tPUYxG8TEPC7nFe1e!(&Ti?Kg?K_xy$Vp^g6VgpM;NiuISW^c2lZ) zq8Vn_a*CxVUK>S9>aY)O;o71dV+HFkXr0(Dn74itacJ~>1han)pjV_as}l!Z_x^>W z>aM^eN;O7oyTyzQ?%wY_dUN;=rZ499CS#~7tYnN($f^dKURa?da1C{hO?ectHHHA- z+zu6tId^JeEJH(l^pDBm!@+c!CF^)IJpx_; zr+hIjRH)IJOS%zk6$Lq6eLZ%zqm`ga@>&vH9jqAk6O6;j_h}=ogOj7Y%$5G6C`O!y zKobTXG~hX2<`KZykq%cXqk5bd>R2)HQxR;QtY8a?7b}C;aOEN$$O!l=o9>2x9NtZKW6TG z&!>^De{iVK=YKvpc|O26e!JWRa3r~jSWXRkQFI4$Ojoh zvU9mi*(|OjeO|WEH)1d3IL%dzH#%jQC3AKrnLpSJnIyd=*>pHnHhP`)S*v^Vb;}90 ztPhTe&T-I;SZ|56{B%0SEow&5R!@~-n7!y}-<2VLjEr4=(Ae)!OGa~n^7qUa%Drrv zZ#x7a7Gv6$$^NH~!pRvf{j{2zJWIJN^x~l?7XX+;LW|>wWL-=Pp%>KJ;pAskV>8bW zmdf^1RTLbO)HJveYb!1r`uSIFILj*CDZj*HG;SN;k*hW8ube`3fu$@DC{3~C#-%Qf>lCq=K?tYEZFO(3IG_A~|0opl(H}028gY!r( z{~$7OmdmPPdB!Fd#F)$YSN7Tyi8NEsvfVa>!U`1T!*CH1GCm5oUykX|wO!-%NklsM zNIb{~$!{k?zn;vDy5AfYgXV-%)a8M7B7w}+8jS3A1ApYvp0F@Nsf$*En;58uey^{) zQ8Jp$@lD>F)*COl2jP-JH_DZQsoR$Z)re?)+#9bkw;)S=vo`9eZR-AP;@^pV9VKPek+x|U_ChE74NU1ru-?*KWk*y{E2NQ$0FJzRyxrMxo z_h$ro6ftu8d0Ku^*dCusFcCc@pbYJsH4;vVuilap`a4BgK6UuIi2`)1PaBP|lAaXD1wWKxV$-Wz+Yz}sFSP@=~F`+S*E z17jBKr0sI+O?hX#@el^_GUu1$xuyZ1hFL^o)?Y27B0RNJ5kc>d-W^FER_J12P_~ka zgHuQu-w5+2nzy2)SMdAAyi(JwEvcg1vYM;hO>r__YOFIgPu+lxY9_^1z=2NSFIugP zn>H2UyxWXSrV3|cX-y&+K|NY%pl|lB@JO#4jtL%mHZ^Wo&PR!bt9z$$kL_B~B{nr= zNE@=)+@XDW0LjWxevqt5`Y&(gcXq;=IJ<+6@AUz z6^y@+m~z$Su%iBNkNyo``11zQ_umGQR)VF^8ojw6>;+k>n|>+`aqwTe|rh#wVM}@n1cg@zd8?LIk24^Zri?N$asM%-{Y~ z1_m<3pwfT5{QlY-4g={>c6+gUsXHVQQg-KyNPM)^G=0T83@J2qeZoGv_a%I3nRw_G zL?33>WU&QTq2DGU5E;rxZQ7EIX73Y+O8R2KUf)NAOJAG}gmU}2{*;<4vKY(#LVDh# z^Bzr{At3&zH}Wn2_}hc}SZ@k==hrm@r}wQ>)z?3k6RXWyk|E zjUKuT=B~8`u9KMZaVhvfTu4N$)rK*WlSjnE2v^DTt1^(3>CNghzO)vYs}$6$sa%(M z99sD-kh1`CM&}ZqrTNWqR3!<8NG#72Uq026+O?l2dq8msm>R}BmTjeS%tn8AJ091J zpYr!%dzyf6#!5#m7iXTnX3R}G0oX&+7M(WgPKp1wr}e}N5N#aRA^zJyNGE-ipm_u) zHK@~iULH5Vp8XJ|;QCC01QUlc1+7o7luk$X4V3twAav}w|HlfjsXXieVW zH2h18ft`eLJ~bm^V-xQX_fwSXN%6$tX^6Rn?6l2W#qGJNrneb3T#_EaZJQo3_g#g* zvph;a*O>=bWg-1Jv20w3P5RPEQUr-T%8Ce%4!pfsBvalh}GqMAn>D>y<9kxE1= zc=;=?6_gLov=wWn&E2>TlA(-do@R<=K`|04AfJxp8t$*{N+2aKPH``N4bSuWq*+?0Ti9ZiGuKueuX|f=yMvmw{^Av1qmI#lusuvvPtV3vgSW7xwVdY-mph@9s zzcavLQJ4aApF=WDak>~12#pQZBOG4no=RpcTEF*3;FHoKv91a*&eJQIO4t=T$tw|z zz^Z2|t7fbIIAc_yVppx$e{OG zHmB~V6sY_<^C;bJek;PsSL^d%f4zy3-rI&j z?N_?sRwRZ~q7((Yg3n@I7rIfh2;TVZ@I|CF^_;ncoB1_KXNXOAlXHIA1|!eZyZ$W2 ztdj=G)n;i^Ax~UZR=Ik;CN?(8ou0maWQL$>AoutFpAywB`i?vs%P+*yNd0dr{UvwlLIUD=_04V+C6)HNRy)l*lapGN2p}zILZ&8YYBkqJF@Q@?f+Zl5ua);wRo;Q z${zm)Wkzy)TSJM9oQ@M81cbfIZ5QWCXVE2yqk4i#wgW6B2ynjrNO-_Djc}l4`x%Z? z6QRP(o{@iN@`TSirY@pu-kYz+D^*l|^9%1>t`;MamN-l*Nb?>$3x_kZUoEj-!{D`! zz~*$kO2V9pZ}rq@?cL;WS=|h-o6ZbDW{$%jc#Jof42=`TI`bm$|43qqY=Dp`sp$B6 zgpIm`Nsj2f1tQ~ql0IfT*_p6&WMEm z{)%|LTDi;P-V6&mSlsURy3PfamR6x`zDo%rA+)qAC)r~j)9Q?jB18Vhr?>CqvMa$y zNplccssv9GTwv%|30r>ibH~DA$In%L7$Nst8?GYiRt3?M!=q**tglDuJAs6*#ZWJ) z30$_h*PqLfIQ{^ortlH>Q_jF<+{y;qkW~2OxSy_{f&!aSANF$G?Ws}p5mvB9q7}~Z z<1K}b_YyX}=hnK=o^h=^>4ONvGPcDPh?&ptp!pZk?mLoHs6dF)e$|jJpDxiet`#=+ z4mcY=|5B@62eg)cTj-v9-ado1bpe?2VEaZ>L^g$ z1QzIC)O6|vFTx_9w-kH_J4enXFddALVc>{x_z*}mZ18Iy%S-?c%uK$N>4ATQuV>2F ze;(rh#g2GBC}aCvubh{gAFuEa4_Cj|JhCr9rx#b3?vKtPSg@hAZ|SbV{!F;$5;6?p zcqyYflRWLQyUbyVXV9*R^WS-~5wXy`vCr%I`vYvjjk~X_4QIdMHI%)b74q+q5xgLJ z|2vDjAj$(mT3~gZ;)s7J#l0faGw#CG!0Bnl7V<+ZD~KVb*)rI9W3iQ~#d*LKOj^IF z1;)aVu)S2*#gvdTUJ~0_Qu(U@EgD~^-s@z3>_&bKiAc3a;pmb zn~!^Mej*5_uD4yRXW0fO>QuJPw_BEaA#J)=eazE`ky^TDwReC8gnek+`G3wn0~Eo} zJrpA_!Qce6;797E} z@>ZD-yQHJ524a_T^?xgMwhpa=J+iV$yxb6n+MImjEQZ^)4=?HVDN(H+{5bzO{;+2l z7B^>X6umlHv=S6|3Dgn(I*ZpYypW3(&Sm!mkJiCG4^d!$Z5@j>p zp6^@TqRdY?(&JgTtu7RhYw(v#EcBSE-y!9tU&saG>o!ds@|W94;sMEUNIzp0_9{#HZ2UwbFRGi7MExadtCE6{Z_BgPwNX0z3%I3{Y8 z$Rjknok#K2*Yqcs=CI}8w&jJ6rUch=WI2l-d`b(da37{w;Nv z{lJKm9Q6E_O!HtryS|ClvOQgpXZ|Y;xbf_Pxbdva>_fm&u)CUIRh)MQxhf085LmF6 ztXj5Q|6u2)+KNZd=B`IQsjCUxvs5)K8gv{d_yQ^bSH$Y1r>^jSyJb;Z>AUhi-?Mf^ z{#6+VMgm`ElvMzz?E&mNkjQ!KPP@s5)GJvIxYu6?Xo=%CegM7irSVY~5se5N<9E|Y zd)=b*~W;rOvK+j4B8e)iXmipTftg#7>klih zj};V;RuqXZZDemMdPinmxk8Ci&-#>cM<~vdYOPbP?tQ`8{R`mwoD(T764xw{jD2yt z>I;42mg6`2-;dg6!d8My!VcsN6qC9})YHp2ewy#81vGuj-tQ4Y!qPM^2M4|%$zWp~ zoGi6(dpYpOUh0NB6HB%|qBgJtKj zuLT=ZmGJV$-O}-Tyj)!i;?D-yckVLXfN~eWlVyfrq1$2yF13G;Yg~RQt|3{gT{#Ry}{;SMft8f*E z&%{1348)b73p^a;tO7SMK}e~m$IjCHVZgEjA*KcHYIydCng=|*h1#D6gud2wz2D`B z9X8C_ZBC`@=ex>O$rca){7G<-+j|pv^-i$~W6iaU=n^29uWMe;?beNp z1hFR!r)lmh0rGa{4~HL3S)|u3bnF^4jFL#Yg>`C(A0P;E8rNLiaCG|Gj2rlrFM(dg z#fZ$23G zv_S*hqlW!+x$Y&S@OEn(+EwVvlj?6j#XAXBv+m7=m#=fJExcz3mOLIDkM zHig2`PEed6kjHbTSM=-k_7@fUq>r`ghb$Z-$hlIjcfEPdLLdsBF4oZ!(5Q3Gy+@1Y z+`gK(!zyw6Q_%kR>GoRB)H{!koq=F@wG=PG;A=)qg(L+J#79{qwD{phc*A#!@~acu&h;?aOiT4 zGN`a2zGbxKyQx2vPRYh)ftX=eN)mqC3H&w*fr{-s-rvT^z#RSwaly5DtJ5Sb5KAL; z)1h)@AU18Mx6{0kq)hJu7Jjj1SZ9wGVG!%kM!DMQ@I1jhcVkCF^XM~mIU`ZPWH;t)ungky zR`V9+F&#z^gDn_!04xhvtWroWRT&Qs#Td?!a=TN=8B*Q-iAinbTcori$10D&Zr ztJy!=FI{B|wbV8SWHwh;PpWI0isam$=4kW{dQiS@*B8!Ykg-VaT-9>jg9ajrVPmm zSnpLu)Bb4P&o4k4TBWmvhW_Im)tB1?j$tiMS!hu*Ne%L+q>7iZh_3|_C~Ah?G{{)* zI(d!KCpH(iRY2Hsktr0fX=52`3>S(m8X|bT__c&lRqV90%V0M$>cK5uWihdxSR0X& z9Q1-wotU}Z21m;3GXIl76*TN%sp=K0PW`0OpH$Dgjz{JLNkT_dP*Fp}_nr|=zp8c_ zjuoHBwq7pu&QFh|!+ zl9;+?fQd=YM%v1Q6YepdDVq|M+j75l?l(dqinpb%E(WO~CBRLMlK;^nvY}*|wIYI| z-?q%QQKJG%;tZ*%Mpn#Y__iE&eH?Yk9|8*W#u|gmW?5Xv{M+||9Q zj2_W3c2mVk*Ktfb@6!E?0VbZ1s%A3INz_F26+R^^V;;tDnfQm zV$dfAk;_R==udiG0biQ#*OIsgtjalllfQS~Pty4_zG+y-Gd(fp7~Fif!|aV_ezH1? zdp)(f z;U;F~O#yhA0S;Fw2{rn<)N~`zZOFKU#n5}YZPWi%!&`xU5Tcw^e@y@V;QzjE{_kt2 z5U|BD-uV(Q*AH-*Ll#6WdBqKHsQ%vctOAyE<(tLhBjm&y>$031wrhVV+?OvWZ=Z}| z^{&4eaeG|Z?ZLEVWL1Y$b_r#~v~Ikwjl4q($r&^Mm+8%Yr;<}C?Q#6n=s*=Ccpe7IlXW}9;sDADAw)SZVM2(Iz9R~ z0N`CCmz!IC2S2Plq4wOHD2Zs)*k_Z7W>-K_@En1Q5roR^zl%0n z@z+@wfw_(?eIYwK(+}L42v1wY1 zXW~D?mQYIIbKlshBZM;k_v)zumX`b1ie>+6#Z2=gbK%ejYZhG+eHf+#fOus3LXC>< z;1SpYo;Nm2vMECMgkKT~z=((f`$o>ialMRd3;&YH%^k!5qVN9^V7Xz?(*0eBU&SiL z@-)9h^UEQI;F!Mdfm%6VxAqtvBY+g+`z^0~OTGpq814$~#A$Q;$q2&7q4A;&<=>wG% zSP4$N)MTR#uGL}GF&&%bXOmklzn1UXt@R0Oxd%!($j`tA)jM7fqSgpedLKI^{n;JI z5N(_Aep+|j*Oz8*o^sUJm+S;C=9e`@1^K)R_7y$j9lVQiIeY6*<2Um1ZWxp7=a~-{ zi7RBZcYvE$mY95pjvMJ}(xgWIuprYOQ zp~7>k9y8}?s@KgI|ZhgD@cO;xfm>toJK$Wm-qG(2uz26?FxZe?Zc3-D|0Wu zxA7o|fE8{iKV*v|N*FpgNOAGcWoo6qJva%eZloTIT2C7ayVKXz?s2h&fC$paGtl4C z&;Xe|g4iM=wMTt$Ridj^A2H*z^8?0Q^jcJA`uSzFa zGiUoqi1S$Z1pez|47GVa&oj-GhK-j?azS9P=oWZ2jTnBbQ)~Wq^ zNDF$+vL{Tv$?d~T<1G!WoQmTc9^?(XtRvj}T&^=C{p?z4@0vZb(#6*{>rLo$e+0`6 zTx_ftnoz&|><*9We?NaRX4m-nQ2T~?ZF<2`%#q-)C00JY?|BRDYT?*6T~65pI_zhrhCrcC4Zrnbeb*Zt^c@fbaiTCf?{1;rp33*4 z{I+_-#@I5gFQIg!8DpoN1$8F|sfyb%&`CuTi6|VMa+L&E)r6Y~uBw5msY&ubYf%f) zM{YdHa4%W4IhP?arZ<$Q?yU_SB3vMf!IhDCIEjfV!@YQr=8@v->SMW_`vF(ng)e6=yt)Xz{DV3Aaa_XEchK zZbh1>S|QP8iat8BMWe4C>G**&g~ow%HwtGSt<@P%_5iMJ6IEGkW2v9p+}$A<&Ccn8 zNF6K~b%&rvF@C^wGW`m)FhqXr1w_NCQ~(YYAE}U+s#K~q)Vyx^f4vT0i}H@5GYN|F zhAYPZ!`(k4@BgpIQosFg6aV3si`er)_GEfh5z2~TJy$8X06ShpOkl;jUihkbvuINj zc)3r73>!KLDRVy{7LURlageBG7sp3`V&Tr6=-DX>S0v?_J{iz2DT`)+{*J-eO^{?; zay#P{+mzqVXCv90E+9M2v@*v6wlUnLE8@RIZR$5$)ORR_*eAaD5+SBwpM-Lc1WI;q zr>CQHQjZ{ef5(uvOF>ogVf?v2ek4D+9bF^1g_^f3N>D}IhenSPdrJ?TA%gW?IdO?IHsB+PFok8u%iXe zW!)Xoxc{?AD}UxqOvwPGwf`LD|NJIjem>HzC0>Yfh{X*MCj<-qkrW++#d@TJs^Qis zlN9`?l!jwIyQrck1Tn-Xhb@b7i@sh)01CMx8-cxqetQ(7^tY8e&8pHGyJVWkt}Y<% zrTw>R^U8_w@9ZgV>%V22?-C?2KCJ(K^+VX8hEQ{+NU*|8SP?;5N>ClHBr=Idj~gUj ziAni#@brNeF)c}7_azeB0b$w;gSy^#5>W&lX2o2g!6I<*S!bHE?D`=W(z^M zL9!?1`V^2r6@3IHn-JxS z?l#}Rq4@xJ4x>R;V^4tf9VHSy8%#2R&J=%SGu-;>|N6*GpOw`-hZ|-8=TrR8zt*3} z!6Y|)Bftme9A_4)grRS}{QtFg<^NE%?|-JTvNtvNWJFNHZx6`TO~Cy2{HLW-9h+u*$~h zL(o%W(_NlJEyzNWt6dL*nZ-9XMc;dWa-$xZUC^^mpB2Y8_|+Eps^?cmA)i22eG>B~ z_k$D@M#{WV%jPLw6Ybotp_Wk#op!5Tj?!?!(BpYb!YjSjdbEI0E@oeyGIpVd=?POD zax$^!qL^Qb9j1Y`6OCM@5zr=v`b#9AmOIPQk(5J}w+$weHwW)!skPqY`zTdFvSTT@ zy&MWY#;}g&l!9E-_TE)d$YFMX$>&3ckA4@A_nSoVwDZ>-<{MI6k4W!uRO{(4Om4-C zh(!SWrT5;kk`?tQ%tp-UJdzY;Vq(ev>pGp7s&e+(a5;~={!`E5r^$WVlXpKm2WcDv z%=&V1wF{*K;j9FML8d_U)$EA9?`0klK)g1se4r_XKnQbreXqty)Hst<)PyiESU_sP zJ6zFPYBzTYw)N=6xf?<0+-4I&8KN21CbTxO+8gAHS*v#o3@>a0{T1IP zcGT>_K7+pZ+4o0gF zV38eQ*11;Yj(X$7bO&x}c9s<5BN<8|K?NHd)|--`@=}`@5x@q1%%-GM0sKTsbM-X6 z%1ZA!mI$siH+oR?>60bMz7M+Bq){EX6Yy)#c3L&YbKAQvxKx0JX4F5kjspxarUdkB ztHlp&8oK9?XLolj{EfO29PUpmy|*Ex{_Gn6Y~iEd-Fj#cq}rmobmN z`flSm@b$D~<5ZWdTVsGkoZQ6DYgy;RaB%WlPh-4S3hwz??x6;byUd`+?dyRzBofkb zho_suay9qGFN$l{t}CC4Z`Yv&DqQRK9m6>-&SYZ=0osQCiQsau=N%vpWY`1^uNl6k zstFRTQjSP`N9DUI+rt#f%xJ5UKEo|{h-e>rFs56JiNAs@66SvdE{fS$Wa3eFhXQpM z6q;NLFp$)l+5L`^sgv1|OYR_$37sPFUdvormUaHRR_?cOM(uqS_a`NMtDX8rj38fT zm`~lC=qZN`4UB~fZJdQA0lS0x3GaeJQsW&Mqlw-lhgVmfsX+E($~Jx&(UPO1!m(G?cEomrOB6aSzC>5sf_KR7`Pz=kV&ClHNpv@$A=F0 z@kAXDI3LFW3tzSjZ$7{~Tcs}4OBVU;5ET%ggB^Ttm&7xpR|d;kD95R6NYzmu^RER7 z2wy0;`Jti>pWmZc?p72rGrW-7L^mlbT~ifXc_5mPSb1CRaz)c>EFh0AKE89Ln+Z_a zgqQqeINm?@SCY|1YBFiR0e0h2+G%4GbD?Af#Nq;Jt)xI75tHX~1t54^N^m_6UdI58Ma9ir<_ct{q6Nc2JooRf?zyK0C7 z=&QAIXL1e-60f=zai$;k00cDrFRPR;OiLYry+_IGT~b$hbd4$Jh;>t^{m9vPX~UCu9BnApP~57xL!|k0&dQHPR9S>@hnCh~}`g#w$}f(`0OJF>Gax zBYMD$huRZuN7!1DcQS4H!XFznhaSxtJ393$Zf}bHi9=|+PO+H2`qXn@4KTfRa=nmn zrG2GKwFtjGFPwJ&jX9WYMIeC~%I+3O#os#8NI#aNF8vvjh+sK7ZJINldVsS6#Ym?3 zU;g|p7(N)|g|CAce_QkA5zu{F3?YmKuu1^?`xY3MDo#x;Hk%_qNd+PZK54 z-|pzCMZSw^VpN=5=XB6vP9P9=nLS|_CEj76(Ao77u{ytnRV9?JR>-_Nwf;lPt%b@T zf<4ai9ojy*b=S=bB6VYpp=$mMN(o89%!IM#7Q2&+?pFnlY$03!+A&AwiHxpf|)U>sLzWe^x z#gykymJUtdmH&i)(UYO3mWlrS$a7J3CL9P=4!!GZ)y0s25c)qJO)I&83+CmgsAjL8 z>}>)nfVb=?YnMNcQsIjY{#KW(U5wDK?->}6g1D9ynyu1e#p#zYhh%E9Tlw>#gj6hJFcd!g?S4&t$Ng zJD#o}2b`xF;1;0%qZpHyGtV$Bzz$SCi)VJn{FX)*2rO9>V&4ZrEX$7sz^b>cFXjm5 z7ngbsh&RAZn^maC=UGhHT5PU(Hg8Q|k+(7{72SKI1(9Co5jeofX1{jm`Rz^FoVy}K z<(xY$XP(2SP-2&dN`_=Y*>E!SWpBY>%4_CP-yWozHsI&g}o0F*R_pZ`1H}koN|nDjL&wIDH6{eCN;_??w