From d97abc0d1e9c7e058ba91e19466e1f09d45d613a Mon Sep 17 00:00:00 2001 From: nqb Date: Thu, 18 Nov 2021 15:11:22 +0100 Subject: [PATCH 01/21] add vm for localdev --- addons/vagrant/inventory/hosts | 45 +++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/addons/vagrant/inventory/hosts b/addons/vagrant/inventory/hosts index e332ebef564b..cd4ba81d03f6 100644 --- a/addons/vagrant/inventory/hosts +++ b/addons/vagrant/inventory/hosts @@ -101,10 +101,6 @@ all: children: dev: hosts: - localhost: - mgmt_ip: '' - mgmt_netmask: 255.255.255.0 - ansible_connection: local pfel8dev: box: generic/rhel8 box_version: '3.4.2' @@ -140,6 +136,47 @@ all: cpus: 2 memory: 6144 + localdev: + hosts: + localhost: + mgmt_ip: '' + mgmt_netmask: 255.255.255.0 + ansible_connection: local + pfel8localdev: + box: generic/rhel8 + box_version: '3.4.2' + mgmt_ip: 172.17.17.10 + mgmt_netmask: 255.255.255.0 + ansible_host: "{{ mgmt_ip }}" + cpus: 2 + memory: 6144 + pfdeb11localdev: + box: debian/bullseye64 + box_version: 11.20211018.1 + mgmt_ip: 172.17.17.12 + mgmt_netmask: 255.255.255.0 + ansible_host: "{{ mgmt_ip }}" + ansible_python_interpreter: '/usr/bin/python3' + cpus: 2 + memory: 6144 + el8localdev: + box: generic/rhel8 + box_version: '3.4.2' + mgmt_ip: 172.17.17.11 + mgmt_netmask: 255.255.255.0 + ansible_host: "{{ mgmt_ip }}" + cpus: 2 + memory: 6144 + deb11localdev: + box: debian/bullseye64 + box_version: 11.20211018.1 + mgmt_ip: 172.17.17.13 + mgmt_netmask: 255.255.255.0 + ansible_host: "{{ mgmt_ip }}" + ansible_python_interpreter: '/usr/bin/python3' + cpus: 2 + memory: 6144 + stable: hosts: pfel8stable: From 18b8ab1d3de545080390d8aea200eda6dbe1e206 Mon Sep 17 00:00:00 2001 From: nqb Date: Thu, 18 Nov 2021 15:12:43 +0100 Subject: [PATCH 02/21] VM name will contain name of user outside CI --- addons/vagrant/Vagrantfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/vagrant/Vagrantfile b/addons/vagrant/Vagrantfile index 1ef093642614..180fcbfbc041 100644 --- a/addons/vagrant/Vagrantfile +++ b/addons/vagrant/Vagrantfile @@ -3,7 +3,7 @@ require 'securerandom' -BRANCH_OR_TAG_NAME = ENV['CI_COMMIT_REF_SLUG'] || 'local-tests' +BRANCH_OR_TAG_NAME = ENV['CI_COMMIT_REF_SLUG'] || ENV['USER'] DOMAIN_PREFIX = "vagrant-" + BRANCH_OR_TAG_NAME + "-#{SecureRandom.hex(3)}-" Vagrant.configure("2") do |config| From b4c71caf26ed863246f1d382fb1b9414d3ff3e22 Mon Sep 17 00:00:00 2001 From: nqb Date: Thu, 18 Nov 2021 15:45:40 +0100 Subject: [PATCH 03/21] define networks names based on user gitlab-runner in CI, Akamai users outside CI --- addons/vagrant/cumulus/Vagrantfile | 8 ++++---- addons/vagrant/linux_servers/Vagrantfile | 2 +- addons/vagrant/pfservers/Vagrantfile | 8 ++++---- addons/vagrant/winservers/Vagrantfile | 2 +- addons/vagrant/wireless/Vagrantfile | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/addons/vagrant/cumulus/Vagrantfile b/addons/vagrant/cumulus/Vagrantfile index 70d6da86eb6e..57b8c5fbe490 100644 --- a/addons/vagrant/cumulus/Vagrantfile +++ b/addons/vagrant/cumulus/Vagrantfile @@ -113,7 +113,7 @@ Vagrant.configure("2") do |config| # link for swp1 --> mgmt_network (vlan 17) device.vm.network "private_network", :mac => "a0:00:00:00:00:01", - :libvirt__network_name => networks[0]['name'], + :libvirt__network_name => "mgmt_#{ENV['USER']}", :ip => networks[0]['subnet'], :libvirt__dhcp_enabled => false, :libvirt__forward_mode => networks[0]['forward_mode'], @@ -121,7 +121,7 @@ Vagrant.configure("2") do |config| # link for swp2 --> reg_network (vlan 2) device.vm.network "private_network", :mac => "44:38:39:00:00:02", - :libvirt__network_name => networks[1]['name'], + :libvirt__network_name => "reg_#{ENV['USER']}", :ip => networks[1]['subnet'], :libvirt__dhcp_enabled => false, :libvirt__forward_mode => networks[1]['forward_mode'], @@ -129,7 +129,7 @@ Vagrant.configure("2") do |config| # link for swp3 --> iso_network (vlan 3) device.vm.network "private_network", :mac => "44:38:39:00:00:03", - :libvirt__network_name => networks[2]['name'], + :libvirt__network_name => "iso_#{ENV['USER']}", :ip => networks[2]['subnet'], :libvirt__dhcp_enabled => false, :libvirt__forward_mode => networks[2]['forward_mode'], @@ -137,7 +137,7 @@ Vagrant.configure("2") do |config| # link for swp6 --> inline_network (vlan 6) device.vm.network "private_network", :mac => "44:38:39:00:00:06", - :libvirt__network_name => networks[3]['name'], + :libvirt__network_name => "inline_#{ENV['USER']}", :ip => networks[3]['subnet'], :libvirt__dhcp_enabled => false, :libvirt__forward_mode => networks[3]['forward_mode'], diff --git a/addons/vagrant/linux_servers/Vagrantfile b/addons/vagrant/linux_servers/Vagrantfile index 6cb1592ad45e..d15dd3ba40eb 100644 --- a/addons/vagrant/linux_servers/Vagrantfile +++ b/addons/vagrant/linux_servers/Vagrantfile @@ -22,7 +22,7 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks[0]['name'], + :libvirt__network_name => "mgmt_#{ENV['USER']}", :ip => details['mgmt_ip'], :netmask => details['mgmt_netmask'], :libvirt__dhcp_enabled => false, diff --git a/addons/vagrant/pfservers/Vagrantfile b/addons/vagrant/pfservers/Vagrantfile index 1f00d243a854..0f47752c25d1 100644 --- a/addons/vagrant/pfservers/Vagrantfile +++ b/addons/vagrant/pfservers/Vagrantfile @@ -24,7 +24,7 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks[0]['name'], + :libvirt__network_name => "mgmt_#{ENV['USER']}", :ip => details['mgmt_ip'], :netmask => details['mgmt_netmask'], :libvirt__dhcp_enabled => false, @@ -32,7 +32,7 @@ Vagrant.configure("2") do |config| # registration srv.vm.network "private_network", - :libvirt__network_name => networks[1]['name'], + :libvirt__network_name => "reg_#{ENV['USER']}", :ip => networks[1]['subnet'], :libvirt__dhcp_enabled => false, :libvirt__forward_mode => networks[1]['forward_mode'], @@ -40,7 +40,7 @@ Vagrant.configure("2") do |config| # isolation srv.vm.network "private_network", - :libvirt__network_name => networks[2]['name'], + :libvirt__network_name => "iso_#{ENV['USER']}", :ip => networks[2]['subnet'], :libvirt__dhcp_enabled => false, :libvirt__forward_mode => networks[2]['forward_mode'], @@ -48,7 +48,7 @@ Vagrant.configure("2") do |config| # inline srv.vm.network "private_network", - :libvirt__network_name => networks[3]['name'], + :libvirt__network_name => "inline_#{ENV['USER']}", :ip => networks[3]['subnet'], :libvirt__dhcp_enabled => false, :libvirt__forward_mode => networks[3]['forward_mode'], diff --git a/addons/vagrant/winservers/Vagrantfile b/addons/vagrant/winservers/Vagrantfile index 838aea4d77f7..0cca3724ad9f 100644 --- a/addons/vagrant/winservers/Vagrantfile +++ b/addons/vagrant/winservers/Vagrantfile @@ -21,7 +21,7 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks[0]['name'], + :libvirt__network_name => "mgmt_#{ENV['USER']}", :ip => details['mgmt_ip'], :netmask => details['mgmt_netmask'], :libvirt__dhcp_enabled => false, diff --git a/addons/vagrant/wireless/Vagrantfile b/addons/vagrant/wireless/Vagrantfile index f5246ba08f4e..a4b3539450ee 100644 --- a/addons/vagrant/wireless/Vagrantfile +++ b/addons/vagrant/wireless/Vagrantfile @@ -27,7 +27,7 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks[0]['name'], + :libvirt__network_name => "mgmt_#{ENV['USER']}", :ip => details['mgmt_ip'], :netmask => details['mgmt_netmask'], :libvirt__dhcp_enabled => false, From 67089eae7147c0bc363029fff7630f804cfceea4 Mon Sep 17 00:00:00 2001 From: nqb Date: Thu, 18 Nov 2021 22:36:22 +0100 Subject: [PATCH 04/21] poc to have separate networks per user --- addons/vagrant/inventory/hosts | 16 ++++++++++++++++ addons/vagrant/pfservers/Vagrantfile | 27 ++++++++++++++------------- 2 files changed, 30 insertions(+), 13 deletions(-) diff --git a/addons/vagrant/inventory/hosts b/addons/vagrant/inventory/hosts index cd4ba81d03f6..f76aa12deb37 100644 --- a/addons/vagrant/inventory/hosts +++ b/addons/vagrant/inventory/hosts @@ -234,3 +234,19 @@ all: subnet: '172.17.18.0/24' forward_mode: 'route' netmask: 255.255.255.0 + networks2: + _nqb: + networks: + - name: mgmt_nqb + subnet: '172.17.140.0/24' + - name: reg_nqb + subnet: '172.17.141.0/24' + - name: iso_nqb + subnet: '172.17.142.0/24' + - name: inline_nqb + subnet: '172.17.143.0/24' + vms: + pfel8dev: + ip: '172.17.140.10' + netmask: '255.255.255.0' + pfdeb11dev: '172.17.140.12' diff --git a/addons/vagrant/pfservers/Vagrantfile b/addons/vagrant/pfservers/Vagrantfile index 0f47752c25d1..2b3ac91fe9fa 100644 --- a/addons/vagrant/pfservers/Vagrantfile +++ b/addons/vagrant/pfservers/Vagrantfile @@ -7,6 +7,7 @@ require 'yaml' # Read YAML file with box details inventory = YAML.load_file('inventory/hosts') networks = inventory['all']['vars']['networks'] +networks2 = "inventory['all']['vars']['networks'][#{ENV['USER']}]" Vagrant.configure("2") do |config| # loop on **all** host(s) in pfservers group in inventory to create VM(s) @@ -24,34 +25,34 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => "mgmt_#{ENV['USER']}", - :ip => details['mgmt_ip'], - :netmask => details['mgmt_netmask'], + :libvirt__network_name => networks2['networks'][0]['name'], + :ip => "networks2['vms'][#{server}]['ip']", + :netmask => networks2['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[0]['forward_mode'] + :libvirt__forward_mode => 'route' # registration srv.vm.network "private_network", - :libvirt__network_name => "reg_#{ENV['USER']}", - :ip => networks[1]['subnet'], + :libvirt__network_name => networks2['networks'][1]['name'], + :ip => networks2['networks'][1]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[1]['forward_mode'], + :libvirt__forward_mode => 'route', auto_config: false # isolation srv.vm.network "private_network", - :libvirt__network_name => "iso_#{ENV['USER']}", - :ip => networks[2]['subnet'], + :libvirt__network_name => networks2['networks'][2]['name'], + :ip => networks2['networks'][2]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[2]['forward_mode'], + :libvirt__forward_mode => 'route', auto_config: false # inline srv.vm.network "private_network", - :libvirt__network_name => "inline_#{ENV['USER']}", - :ip => networks[3]['subnet'], + :libvirt__network_name => networks2['networks'][3]['name'], + :ip => networks2['networks'][3]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[3]['forward_mode'], + :libvirt__forward_mode => 'route', auto_config: false srv.vm.provider "libvirt" do |v| From 2209072b5567bef1c702340fa745106c17a27d93 Mon Sep 17 00:00:00 2001 From: nqb Date: Fri, 19 Nov 2021 06:55:14 +0100 Subject: [PATCH 05/21] move ip and netmask into a dict --- addons/vagrant/inventory/hosts | 82 +++++++++++++--------------------- 1 file changed, 31 insertions(+), 51 deletions(-) diff --git a/addons/vagrant/inventory/hosts b/addons/vagrant/inventory/hosts index f76aa12deb37..0266347fd539 100644 --- a/addons/vagrant/inventory/hosts +++ b/addons/vagrant/inventory/hosts @@ -8,8 +8,7 @@ all: cumulus: hosts: switch01: - # IP used by helper_scripts and Ansible, not Vagrant - mgmt_ip: 172.17.17.201 + mgmt_ip: "{{ networks[current_user]['vms']['switch01']['ip'] }}" box: CumulusCommunity/cumulus-vx box_version: 3.7.12 ansible_host: "{{ mgmt_ip }}" @@ -19,16 +18,14 @@ all: node01: box: debian/bullseye64 box_version: 11.20211018.1 - # IP used by helper_scripts and Ansible, not Vagrant - mgmt_ip: 172.17.17.251 + mgmt_ip: "{{ networks[current_user]['vms']['node01']['ip'] }}" ansible_host: "{{ mgmt_ip }}" # only used when run outside Vagrant ansible_python_interpreter: '/usr/bin/python3' node02: box: debian/bullseye64 box_version: 11.20211018.1 - # IP used by helper_scripts and Ansible, not Vagrant - mgmt_ip: 172.17.17.252 + mgmt_ip: "{{ networks[current_user]['vms']['node02']['ip'] }}" ansible_host: "{{ mgmt_ip }}" # only used when run outside Vagrant ansible_python_interpreter: '/usr/bin/python3' @@ -38,8 +35,8 @@ all: ad: box: jborean93/WindowsServer2016 box_version: 0.7.0 - mgmt_ip: 172.17.17.100 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['ad']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['ad']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 2048 @@ -49,8 +46,8 @@ all: wireless01: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.210 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['wireless01']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['wireless01']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 1 @@ -65,8 +62,8 @@ all: linux01: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.101 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['linux01']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['linux01']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 1 @@ -74,8 +71,8 @@ all: linux02: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.102 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['linux02']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['linux02']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 1 @@ -104,16 +101,16 @@ all: pfel8dev: box: generic/rhel8 box_version: '3.4.2' - mgmt_ip: 172.17.17.10 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['pfel8dev']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['pfel8dev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 6144 pfdeb11dev: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.12 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['pfdeb11dev']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['pfdeb11dev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 2 @@ -121,16 +118,16 @@ all: el8dev: box: generic/rhel8 box_version: '3.4.2' - mgmt_ip: 172.17.17.11 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['el8dev']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['el8dev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 6144 deb11dev: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.13 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['deb11dev']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['deb11dev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 2 @@ -139,39 +136,22 @@ all: localdev: hosts: localhost: - mgmt_ip: '' - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['localhost']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['localhost']['netmask'] }}" ansible_connection: local pfel8localdev: box: generic/rhel8 box_version: '3.4.2' - mgmt_ip: 172.17.17.10 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['pfel8localdev']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['pfel8localdev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 6144 pfdeb11localdev: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.12 - mgmt_netmask: 255.255.255.0 - ansible_host: "{{ mgmt_ip }}" - ansible_python_interpreter: '/usr/bin/python3' - cpus: 2 - memory: 6144 - el8localdev: - box: generic/rhel8 - box_version: '3.4.2' - mgmt_ip: 172.17.17.11 - mgmt_netmask: 255.255.255.0 - ansible_host: "{{ mgmt_ip }}" - cpus: 2 - memory: 6144 - deb11localdev: - box: debian/bullseye64 - box_version: 11.20211018.1 - mgmt_ip: 172.17.17.13 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['pfdeb11localdev']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['pfdeb11localdev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 2 @@ -182,8 +162,8 @@ all: pfel8stable: box: generic/rhel8 box_version: '3.4.2' - mgmt_ip: 172.17.17.14 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['pfel8stable']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['pfel8stable']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 8192 @@ -191,16 +171,16 @@ all: pfdeb9stable: box: inverse-inc/pfdeb9stable box_version: 10.3.20210414165339 - mgmt_ip: 172.17.17.15 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['pfdeb9stable']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['pfdeb9stable']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 8192 pfdeb11stable: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.16 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ networks[current_user]['vms']['pfdeb11stable']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['pfdeb11stable']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 2 From 8d6e0c471f2e6dd4919ac475513c874310d2373c Mon Sep 17 00:00:00 2001 From: nqb Date: Fri, 19 Nov 2021 07:13:07 +0100 Subject: [PATCH 06/21] add IP addresses for each VM for all known users --- addons/vagrant/inventory/hosts | 517 +++++++++++++++++++++++++++++++-- 1 file changed, 491 insertions(+), 26 deletions(-) diff --git a/addons/vagrant/inventory/hosts b/addons/vagrant/inventory/hosts index 0266347fd539..9f45f8f11051 100644 --- a/addons/vagrant/inventory/hosts +++ b/addons/vagrant/inventory/hosts @@ -189,44 +189,509 @@ all: vars: tz: UTC + current_user: "{{ lookup('env', 'USER') }}" networks: - - name: 'mgmt' - subnet: '172.17.17.0/24' - forward_mode: 'route' - netmask: 255.255.255.0 - - - name: 'registration' - subnet: '172.17.2.0/24' - forward_mode: 'route' - netmask: 255.255.255.0 - - - name: 'isolation' - subnet: '172.17.3.0/24' - forward_mode: 'route' - netmask: 255.255.255.0 - - - name: 'inline' - subnet: '172.17.6.0/24' - forward_mode: 'route' - netmask: 255.255.255.0 - - - name: 'inline-l3' - subnet: '172.17.18.0/24' - forward_mode: 'route' - netmask: 255.255.255.0 - networks2: + gitlab-runner: + networks: + - name: mgmt_ci + subnet: '172.17.200.0/24' + forward_mode: 'route' + - name: reg_ci + subnet: '172.17.201.0/24' + forward_mode: 'route' + - name: iso_ci + subnet: '172.17.202.0/24' + forward_mode: 'route' + - name: inline_ci + subnet: '172.17.203.0/24' + forward_mode: 'route' + vms: + switch01: + ip: '172.17.200.201' + node01: + ip: '172.17.200.251' + node02: + ip: '172.17.200.252' + ad: + ip: '172.17.200.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.200.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.200.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.200.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.200.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.200.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.200.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.200.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.200.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.200.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.200.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.200.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.200.18' + netmask: '255.255.255.0' _nqb: networks: - name: mgmt_nqb subnet: '172.17.140.0/24' + forward_mode: 'route' - name: reg_nqb subnet: '172.17.141.0/24' + forward_mode: 'route' - name: iso_nqb subnet: '172.17.142.0/24' + forward_mode: 'route' - name: inline_nqb subnet: '172.17.143.0/24' + forward_mode: 'route' vms: + switch01: + ip: '172.17.140.201' + node01: + ip: '172.17.140.251' + node02: + ip: '172.17.140.252' + ad: + ip: '172.17.140.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.140.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.140.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.140.102' + netmask: '255.255.255.0' pfel8dev: ip: '172.17.140.10' netmask: '255.255.255.0' - pfdeb11dev: '172.17.140.12' + el8dev: + ip: '172.17.140.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.140.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.140.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.140.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.140.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.140.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.140.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.140.18' + netmask: '255.255.255.0' + _jrouzier: + networks: + - name: mgmt_jrouzier + subnet: '172.17.115.0/24' + forward_mode: 'route' + - name: reg_jrouzier + subnet: '172.17.116.0/24' + forward_mode: 'route' + - name: iso_jrouzier + subnet: '172.17.117.0/24' + forward_mode: 'route' + - name: inline_jrouzier + subnet: '172.17.118.0/24' + forward_mode: 'route' + vms: + switch01: + ip: '172.17.115.201' + node01: + ip: '172.17.115.251' + node02: + ip: '172.17.115.252' + ad: + ip: '172.17.115.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.115.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.115.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.115.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.115.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.115.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.115.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.115.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.115.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.115.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.115.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.115.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.115.18' + netmask: '255.255.255.0' + _lzammit: + networks: + - name: mgmt_lzammit + subnet: '172.17.145.0/24' + forward_mode: 'route' + - name: reg_lzammit + subnet: '172.17.146.0/24' + forward_mode: 'route' + - name: iso_lzammit + subnet: '172.17.147.0/24' + forward_mode: 'route' + - name: inline_lzammit + subnet: '172.17.148.0/24' + forward_mode: 'route' + vms: + switch01: + ip: '172.17.145.201' + node01: + ip: '172.17.145.251' + node02: + ip: '172.17.145.252' + ad: + ip: '172.17.145.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.145.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.145.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.145.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.145.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.145.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.145.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.145.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.145.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.145.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.145.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.145.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.145.18' + netmask: '255.255.255.0' + _jegoimard: + networks: + - name: mgmt_jegoimard + subnet: '172.17.120.0/24' + forward_mode: 'route' + - name: reg_jegoimard + subnet: '172.17.121.0/24' + forward_mode: 'route' + - name: iso_jegoimard + subnet: '172.17.122.0/24' + forward_mode: 'route' + - name: inline_jegoimard + subnet: '172.17.123.0/24' + forward_mode: 'route' + vms: + switch01: + ip: '172.17.120.201' + node01: + ip: '172.17.120.251' + node02: + ip: '172.17.120.252' + ad: + ip: '172.17.120.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.120.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.120.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.120.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.120.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.120.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.120.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.120.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.120.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.120.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.120.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.120.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.120.18' + netmask: '255.255.255.0' + _dsatkunas: + networks: + - name: mgmt_dsatkunas + subnet: '172.17.125.0/24' + forward_mode: 'route' + - name: reg_dsatkunas + subnet: '172.17.126.0/24' + forward_mode: 'route' + - name: iso_dsatkunas + subnet: '172.17.127.0/24' + forward_mode: 'route' + - name: inline_dsatkunas + subnet: '172.17.128.0/24' + forward_mode: 'route' + vms: + switch01: + ip: '172.17.125.201' + node01: + ip: '172.17.125.251' + node02: + ip: '172.17.125.252' + ad: + ip: '172.17.125.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.125.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.125.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.125.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.125.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.125.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.125.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.125.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.125.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.125.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.125.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.125.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.125.18' + netmask: '255.255.255.0' + _fdurand: + networks: + - name: mgmt_fdurand + subnet: '172.17.135.0/24' + forward_mode: 'route' + - name: reg_fdurand + subnet: '172.17.136.0/24' + forward_mode: 'route' + - name: iso_fdurand + subnet: '172.17.137.0/24' + forward_mode: 'route' + - name: inline_fdurand + subnet: '172.17.138.0/24' + forward_mode: 'route' + vms: + switch01: + ip: '172.17.135.201' + node01: + ip: '172.17.135.251' + node02: + ip: '172.17.135.252' + ad: + ip: '172.17.135.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.135.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.135.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.135.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.135.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.135.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.135.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.135.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.135.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.135.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.135.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.135.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.135.18' + netmask: '255.255.255.0' + _jsemaan: + networks: + - name: mgmt_jsemaan + subnet: '172.17.155.0/24' + forward_mode: 'route' + - name: reg_jsemaan + subnet: '172.17.156.0/24' + forward_mode: 'route' + - name: iso_jsemaan + subnet: '172.17.157.0/24' + forward_mode: 'route' + - name: inline_jsemaan + subnet: '172.17.158.0/24' + forward_mode: 'route' + vms: + switch01: + ip: '172.17.155.201' + node01: + ip: '172.17.155.251' + node02: + ip: '172.17.155.252' + ad: + ip: '172.17.155.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.155.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.155.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.155.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.155.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.155.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.155.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.155.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.155.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.155.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.155.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.155.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.155.18' + netmask: '255.255.255.0' From 747884c7c3d9cf09dca14424ee0f6a385dd8a461 Mon Sep 17 00:00:00 2001 From: nqb Date: Fri, 19 Nov 2021 07:29:56 +0100 Subject: [PATCH 07/21] update Vagrantfiles to use new IP addresses --- addons/vagrant/cumulus/Vagrantfile | 29 ++++++++++----------- addons/vagrant/linux_servers/Vagrantfile | 13 +++++----- addons/vagrant/pfservers/Vagrantfile | 32 ++++++++++++------------ addons/vagrant/winservers/Vagrantfile | 13 +++++----- addons/vagrant/wireless/Vagrantfile | 13 +++++----- 5 files changed, 52 insertions(+), 48 deletions(-) diff --git a/addons/vagrant/cumulus/Vagrantfile b/addons/vagrant/cumulus/Vagrantfile index 57b8c5fbe490..f25d4d46ab3a 100644 --- a/addons/vagrant/cumulus/Vagrantfile +++ b/addons/vagrant/cumulus/Vagrantfile @@ -84,9 +84,10 @@ echo "### Rebooting Device to Apply Remap..." nohup bash -c 'shutdown now -r "Rebooting to Remap Interfaces"' & SCRIPT -# Read YAML file with box details +# Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -networks = inventory['all']['vars']['networks'] +current_user = ENV['USER'] +networks = inventory['all']['vars']['networks'][current_user] Vagrant.configure("2") do |config| @@ -113,34 +114,34 @@ Vagrant.configure("2") do |config| # link for swp1 --> mgmt_network (vlan 17) device.vm.network "private_network", :mac => "a0:00:00:00:00:01", - :libvirt__network_name => "mgmt_#{ENV['USER']}", - :ip => networks[0]['subnet'], + :libvirt__network_name => networks['networks'][0]['name'], + :ip => networks['networks'][0]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[0]['forward_mode'], + :libvirt__forward_mode => networks['networks'][0]['forward_mode'], auto_config: false # link for swp2 --> reg_network (vlan 2) device.vm.network "private_network", :mac => "44:38:39:00:00:02", - :libvirt__network_name => "reg_#{ENV['USER']}", - :ip => networks[1]['subnet'], + :libvirt__network_name => networks['networks'][1]['name'], + :ip => networks['networks'][1]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[1]['forward_mode'], + :libvirt__forward_mode => networks['networks'][1]['forward_mode'], auto_config: false # link for swp3 --> iso_network (vlan 3) device.vm.network "private_network", :mac => "44:38:39:00:00:03", - :libvirt__network_name => "iso_#{ENV['USER']}", - :ip => networks[2]['subnet'], + :libvirt__network_name => networks['networks'][2]['name'], + :ip => networks['networks'][2]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[2]['forward_mode'], + :libvirt__forward_mode => networks['networks'][2]['forward_mode'], auto_config: false # link for swp6 --> inline_network (vlan 6) device.vm.network "private_network", :mac => "44:38:39:00:00:06", - :libvirt__network_name => "inline_#{ENV['USER']}", - :ip => networks[3]['subnet'], + :libvirt__network_name => networks['networks'][3]['name'], + :ip => networks['networks'][3]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[3]['forward_mode'], + :libvirt__forward_mode => networks['networks'][3]['forward_mode'], auto_config: false # link for swp11 --> node01:ens6 device.vm.network "private_network", diff --git a/addons/vagrant/linux_servers/Vagrantfile b/addons/vagrant/linux_servers/Vagrantfile index d15dd3ba40eb..40c4591815a2 100644 --- a/addons/vagrant/linux_servers/Vagrantfile +++ b/addons/vagrant/linux_servers/Vagrantfile @@ -4,9 +4,10 @@ # Require YAML module require 'yaml' -# Read YAML file with box details +# Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -networks = inventory['all']['vars']['networks'] +current_user = ENV['USER'] +networks = inventory['all']['vars']['networks'][current_user] Vagrant.configure("2") do |config| # loop on **all** host(s) in linux_servers group in inventory to create VM(s) @@ -22,11 +23,11 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => "mgmt_#{ENV['USER']}", - :ip => details['mgmt_ip'], - :netmask => details['mgmt_netmask'], + :libvirt__network_name => networks['networks'][0]['name'], + :ip => networks['vms'][server]['ip'], + :netmask => networks['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[0]['forward_mode'] + :libvirt__forward_mode => networks['networks'][0]['forward_mode'] srv.vm.provider "libvirt" do |v| v.cpus = details['cpus'] diff --git a/addons/vagrant/pfservers/Vagrantfile b/addons/vagrant/pfservers/Vagrantfile index 2b3ac91fe9fa..a2189446a243 100644 --- a/addons/vagrant/pfservers/Vagrantfile +++ b/addons/vagrant/pfservers/Vagrantfile @@ -4,10 +4,10 @@ # Require YAML module require 'yaml' -# Read YAML file with box details +# Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -networks = inventory['all']['vars']['networks'] -networks2 = "inventory['all']['vars']['networks'][#{ENV['USER']}]" +current_user = ENV['USER'] +networks = inventory['all']['vars']['networks'][current_user] Vagrant.configure("2") do |config| # loop on **all** host(s) in pfservers group in inventory to create VM(s) @@ -25,34 +25,34 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks2['networks'][0]['name'], - :ip => "networks2['vms'][#{server}]['ip']", - :netmask => networks2['vms'][server]['netmask'], + :libvirt__network_name => networks['networks'][0]['name'], + :ip => networks['vms'][server]['ip'], + :netmask => networks['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => 'route' + :libvirt__forward_mode => networks['networks'][0]['forward_mode'] # registration srv.vm.network "private_network", - :libvirt__network_name => networks2['networks'][1]['name'], - :ip => networks2['networks'][1]['subnet'], + :libvirt__network_name => networks['networks'][1]['name'], + :ip => networks['networks'][1]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => 'route', + :libvirt__forward_mode => networks['networks'][1]['forward_mode'], auto_config: false # isolation srv.vm.network "private_network", - :libvirt__network_name => networks2['networks'][2]['name'], - :ip => networks2['networks'][2]['subnet'], + :libvirt__network_name => networks['networks'][2]['name'], + :ip => networks['networks'][2]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => 'route', + :libvirt__forward_mode => networks['networks'][2]['forward_mode'], auto_config: false # inline srv.vm.network "private_network", - :libvirt__network_name => networks2['networks'][3]['name'], - :ip => networks2['networks'][3]['subnet'], + :libvirt__network_name => networks['networks'][3]['name'], + :ip => networks['networks'][3]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => 'route', + :libvirt__forward_mode => networks['networks'][3]['forward_mode'], auto_config: false srv.vm.provider "libvirt" do |v| diff --git a/addons/vagrant/winservers/Vagrantfile b/addons/vagrant/winservers/Vagrantfile index 0cca3724ad9f..deabb8d9d6bc 100644 --- a/addons/vagrant/winservers/Vagrantfile +++ b/addons/vagrant/winservers/Vagrantfile @@ -4,9 +4,10 @@ # Require YAML module require 'yaml' -# Read YAML file with box details +# Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -networks = inventory['all']['vars']['networks'] +current_user = ENV['USER'] +networks = inventory['all']['vars']['networks'][current_user] Vagrant.configure("2") do |config| inventory['all']['children']['winservers']['hosts'].each do |server,details| @@ -21,11 +22,11 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => "mgmt_#{ENV['USER']}", - :ip => details['mgmt_ip'], - :netmask => details['mgmt_netmask'], + :libvirt__network_name => networks['networks'][0]['name'], + :ip => networks['vms'][server]['ip'], + :netmask => networks['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[0]['forward_mode'] + :libvirt__forward_mode => networks['networks'][0]['forward_mode'] srv.vm.provider "libvirt" do |v| v.cpus = details['cpus'] diff --git a/addons/vagrant/wireless/Vagrantfile b/addons/vagrant/wireless/Vagrantfile index a4b3539450ee..dd724da0d3eb 100644 --- a/addons/vagrant/wireless/Vagrantfile +++ b/addons/vagrant/wireless/Vagrantfile @@ -4,9 +4,10 @@ # Require YAML module require 'yaml' -# Read YAML file with box details +# Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -networks = inventory['all']['vars']['networks'] +current_user = ENV['USER'] +networks = inventory['all']['vars']['networks'][current_user] Vagrant.configure("2") do |config| inventory['all']['children']['wireless']['hosts'].each do |server,details| @@ -27,11 +28,11 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => "mgmt_#{ENV['USER']}", - :ip => details['mgmt_ip'], - :netmask => details['mgmt_netmask'], + :libvirt__network_name => networks['networks'][0]['name'], + :ip => networks['vms'][server]['ip'], + :netmask => networks['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[0]['forward_mode'] + :libvirt__forward_mode => networks['networks'][0]['forward_mode'] end end end From 85e1b0aecc1449bab45efd1829318a94c3ed8d46 Mon Sep 17 00:00:00 2001 From: nqb Date: Fri, 19 Nov 2021 07:44:28 +0100 Subject: [PATCH 08/21] adjust Cumulus provisioning scripts --- addons/vagrant/cumulus/Vagrantfile | 25 ++++-- .../vagrant/helper_scripts/config_node01.sh | 7 +- .../vagrant/helper_scripts/config_node02.sh | 7 +- .../vagrant/helper_scripts/config_switch.sh | 19 +++- addons/vagrant/inventory/hosts | 87 +++++++++++++++++-- 5 files changed, 129 insertions(+), 16 deletions(-) diff --git a/addons/vagrant/cumulus/Vagrantfile b/addons/vagrant/cumulus/Vagrantfile index f25d4d46ab3a..e55c8d4022b7 100644 --- a/addons/vagrant/cumulus/Vagrantfile +++ b/addons/vagrant/cumulus/Vagrantfile @@ -88,6 +88,16 @@ SCRIPT inventory = YAML.load_file('inventory/hosts') current_user = ENV['USER'] networks = inventory['all']['vars']['networks'][current_user] +switch01_ip = networks['vms']['switch01']['ip'] +switch01_netmask = networks['vms']['switch01']['netmask'] +inline_ip = networks['vms']['switch01']['inline_ip'] +inline_netmask = networks['vms']['switch01']['inline_netmask'] +inline_l3_ip = networks['vms']['switch01']['inline_l3_ip'] +inline_l3_netmask = networks['vms']['switch01']['inline_l3_netmask'] +node01_ip = networks['vms']['node01']['ip'] +node01_netmask = networks['vms']['node01']['netmask'] +node02_ip = networks['vms']['node02']['ip'] +node02_netmask = networks['vms']['node02']['netmask'] Vagrant.configure("2") do |config| @@ -195,8 +205,11 @@ Vagrant.configure("2") do |config| # Run the Config specified in the Node Attributes device.vm.provision :shell , privileged: false, :inline => 'echo "$(whoami)" > /tmp/normal_user' - device.vm.provision "config_switch", type: "shell" , path: "./helper_scripts/config_switch.sh" - + device.vm.provision "config_switch", type: "shell" , path: "./helper_scripts/config_switch.sh", + args: [ "#{switch01_ip}", "#{switch01_netmask}", + "#{inline_ip}", "#{inline_netmask}", + "#{inline_l3_ip}", "#{inline_l3_netmask}" + ] # Install Rules for the interface re-map device.vm.provision :shell , :inline => <<-delete_udev_directory @@ -339,8 +352,8 @@ vagrant_interface_rule # Run the Config specified in the Node Attributes device.vm.provision :shell , privileged: false, :inline => 'echo "$(whoami)" > /tmp/normal_user' - device.vm.provision "config_node", type: "shell", path: "./helper_scripts/config_node01.sh" - + device.vm.provision "config_node", type: "shell", path: "./helper_scripts/config_node01.sh", + args: [ "#{node01_ip}", "#{node01_netmask}"] # Install Rules for the interface re-map device.vm.provision :shell , :inline => <<-delete_udev_directory @@ -445,8 +458,8 @@ vagrant_interface_rule # Run the Config specified in the Node Attributes device.vm.provision :shell , privileged: false, :inline => 'echo "$(whoami)" > /tmp/normal_user' - device.vm.provision "config_node", type: "shell", path: "./helper_scripts/config_node02.sh" - + device.vm.provision "config_node", type: "shell", path: "./helper_scripts/config_node02.sh", + args: [ "#{node02_ip}", "#{node02_netmask}"] # Install Rules for the interface re-map device.vm.provision :shell , :inline => <<-delete_udev_directory diff --git a/addons/vagrant/helper_scripts/config_node01.sh b/addons/vagrant/helper_scripts/config_node01.sh index 1e332daa3f55..f2e8f67ed137 100755 --- a/addons/vagrant/helper_scripts/config_node01.sh +++ b/addons/vagrant/helper_scripts/config_node01.sh @@ -1,5 +1,9 @@ #!/bin/bash set -o nounset -o pipefail -o errexit +mgmt_ip=$1 +mgmt_netmask=$2 + +declare -p mgmt_ip mgmt_netmask echo "#################################" echo " Running config_node01.sh" @@ -41,7 +45,8 @@ iface lo inet loopback auto ens6 iface ens6 inet static alias VLAN 17 - address 172.17.17.251/24 + address ${mgmt_ip} + netmask ${mgmt_netmask} allow-hotplug ens7 iface ens7 inet dhcp diff --git a/addons/vagrant/helper_scripts/config_node02.sh b/addons/vagrant/helper_scripts/config_node02.sh index db1f55192aea..92727c24e7f5 100755 --- a/addons/vagrant/helper_scripts/config_node02.sh +++ b/addons/vagrant/helper_scripts/config_node02.sh @@ -1,5 +1,9 @@ #!/bin/bash set -o nounset -o pipefail -o errexit +mgmt_ip=$1 +mgmt_netmask=$2 + +declare -p mgmt_ip mgmt_netmask echo "#################################" echo " Running config_node.sh" @@ -41,7 +45,8 @@ iface lo inet loopback auto ens6 iface ens6 inet static alias VLAN 17 - address 172.17.17.252/24 + address ${mgmt_ip} + netmask ${mgmt_netmask} allow-hotplug ens7 iface ens7 inet dhcp diff --git a/addons/vagrant/helper_scripts/config_switch.sh b/addons/vagrant/helper_scripts/config_switch.sh index 4a061b886936..c516cee9e0cc 100755 --- a/addons/vagrant/helper_scripts/config_switch.sh +++ b/addons/vagrant/helper_scripts/config_switch.sh @@ -1,5 +1,15 @@ #!/bin/bash set -o nounset -o pipefail -o errexit +mgmt_ip=$1 +mgmt_netmask=$2 +inline_ip=$3 +inline_netmask=$4 +inline_l3_ip=$5 +inline_l3_netmask=$6 + +declare -p mgmt_ip mgmt_netmask +declare -p inline_ip inline_netmask +declare -p inline_l3_ip inline_l3_netmask echo "#################################" echo " Running Switch Post Config (config_switch.sh)" @@ -62,17 +72,20 @@ iface swp48 auto bridge.6 iface bridge.6 alias Inline-L2 - address 172.17.6.3/24 + address ${inline_ip} + netmask ${inline_netmask} auto bridge.17 iface bridge.17 alias Management - address 172.17.17.201/24 + address ${mgmt_ip} + netmask ${mgmt_netmask} auto bridge.18 iface bridge.18 alias Inline-L3 - address 172.17.18.254/24 + address ${inline_l3_ip} + netmask ${inline_l3_netmask} auto bridge.100 iface bridge.100 inet dhcp diff --git a/addons/vagrant/inventory/hosts b/addons/vagrant/inventory/hosts index 9f45f8f11051..73a35e68fb97 100644 --- a/addons/vagrant/inventory/hosts +++ b/addons/vagrant/inventory/hosts @@ -9,6 +9,7 @@ all: hosts: switch01: mgmt_ip: "{{ networks[current_user]['vms']['switch01']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['switch01']['netmask'] }}" box: CumulusCommunity/cumulus-vx box_version: 3.7.12 ansible_host: "{{ mgmt_ip }}" @@ -19,6 +20,7 @@ all: box: debian/bullseye64 box_version: 11.20211018.1 mgmt_ip: "{{ networks[current_user]['vms']['node01']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['node01']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" # only used when run outside Vagrant ansible_python_interpreter: '/usr/bin/python3' @@ -26,6 +28,7 @@ all: box: debian/bullseye64 box_version: 11.20211018.1 mgmt_ip: "{{ networks[current_user]['vms']['node02']['ip'] }}" + mgmt_netmask: "{{ networks[current_user]['vms']['node02']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" # only used when run outside Vagrant ansible_python_interpreter: '/usr/bin/python3' @@ -191,6 +194,7 @@ all: tz: UTC current_user: "{{ lookup('env', 'USER') }}" networks: + # ci gitlab-runner: networks: - name: mgmt_ci @@ -205,13 +209,22 @@ all: - name: inline_ci subnet: '172.17.203.0/24' forward_mode: 'route' + - name: inline_l3_ci + subnet: '172.17.204.0/24' vms: switch01: ip: '172.17.200.201' + netmask: '255.255.255.0' + inline_ip: '172.17.203.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.204.4' + inline_l3_netmask: '255.255.255.0' node01: ip: '172.17.200.251' + netmask: '255.255.255.0' node02: ip: '172.17.200.252' + netmask: '255.255.255.0' ad: ip: '172.17.200.100' netmask: '255.255.255.0' @@ -254,6 +267,7 @@ all: pfdeb11stable: ip: '172.17.200.18' netmask: '255.255.255.0' + # local dev _nqb: networks: - name: mgmt_nqb @@ -268,13 +282,22 @@ all: - name: inline_nqb subnet: '172.17.143.0/24' forward_mode: 'route' + - name: inline_l3_nqb + subnet: '172.17.144.0/24' vms: switch01: ip: '172.17.140.201' + netmask: '255.255.255.0' + inline_ip: '172.17.143.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.144.4' + inline_l3_netmask: '255.255.255.0' node01: ip: '172.17.140.251' + netmask: '255.255.255.0' node02: ip: '172.17.140.252' + netmask: '255.255.255.0' ad: ip: '172.17.140.100' netmask: '255.255.255.0' @@ -331,13 +354,22 @@ all: - name: inline_jrouzier subnet: '172.17.118.0/24' forward_mode: 'route' + - name: inline_l3_jrouzier + subnet: '172.17.119.0/24' vms: switch01: ip: '172.17.115.201' + netmask: '255.255.255.0' + inline_ip: '172.17.118.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.119.4' + inline_l3_netmask: '255.255.255.0' node01: ip: '172.17.115.251' + netmask: '255.255.255.0' node02: ip: '172.17.115.252' + netmask: '255.255.255.0' ad: ip: '172.17.115.100' netmask: '255.255.255.0' @@ -394,13 +426,22 @@ all: - name: inline_lzammit subnet: '172.17.148.0/24' forward_mode: 'route' + - name: inline_l3_lzammit + subnet: '172.17.149.0/24' vms: switch01: ip: '172.17.145.201' + netmask: '255.255.255.0' + inline_ip: '172.17.148.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.149.4' + inline_l3_netmask: '255.255.255.0' node01: ip: '172.17.145.251' + netmask: '255.255.255.0' node02: ip: '172.17.145.252' + netmask: '255.255.255.0' ad: ip: '172.17.145.100' netmask: '255.255.255.0' @@ -443,27 +484,36 @@ all: pfdeb11stable: ip: '172.17.145.18' netmask: '255.255.255.0' - _jegoimard: + _jgoimard: networks: - - name: mgmt_jegoimard + - name: mgmt_jgoimard subnet: '172.17.120.0/24' forward_mode: 'route' - - name: reg_jegoimard + - name: reg_jgoimard subnet: '172.17.121.0/24' forward_mode: 'route' - - name: iso_jegoimard + - name: iso_jgoimard subnet: '172.17.122.0/24' forward_mode: 'route' - - name: inline_jegoimard + - name: inline_jgoimard subnet: '172.17.123.0/24' forward_mode: 'route' + - name: inline_l3_jgoimard + subnet: '172.17.124.0/24' vms: switch01: ip: '172.17.120.201' + netmask: '255.255.255.0' + inline_ip: '172.17.123.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.124.4' + inline_l3_netmask: '255.255.255.0' node01: ip: '172.17.120.251' + netmask: '255.255.255.0' node02: ip: '172.17.120.252' + netmask: '255.255.255.0' ad: ip: '172.17.120.100' netmask: '255.255.255.0' @@ -520,13 +570,22 @@ all: - name: inline_dsatkunas subnet: '172.17.128.0/24' forward_mode: 'route' + - name: inline_l3_dsatkunas + subnet: '172.17.119.0/24' vms: switch01: ip: '172.17.125.201' + netmask: '255.255.255.0' + inline_ip: '172.17.128.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.128.4' + inline_l3_netmask: '255.255.255.0' node01: ip: '172.17.125.251' + netmask: '255.255.255.0' node02: ip: '172.17.125.252' + netmask: '255.255.255.0' ad: ip: '172.17.125.100' netmask: '255.255.255.0' @@ -583,13 +642,22 @@ all: - name: inline_fdurand subnet: '172.17.138.0/24' forward_mode: 'route' + - name: inline_l3_fdurand + subnet: '172.17.139.0/24' vms: switch01: ip: '172.17.135.201' + netmask: '255.255.255.0' + inline_ip: '172.17.138.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.139.4' + inline_l3_netmask: '255.255.255.0' node01: ip: '172.17.135.251' + netmask: '255.255.255.0' node02: ip: '172.17.135.252' + netmask: '255.255.255.0' ad: ip: '172.17.135.100' netmask: '255.255.255.0' @@ -646,13 +714,22 @@ all: - name: inline_jsemaan subnet: '172.17.158.0/24' forward_mode: 'route' + - name: inline_l3_jsemaan + subnet: '172.17.159.0/24' vms: switch01: ip: '172.17.155.201' + netmask: '255.255.255.0' + inline_ip: '172.17.158.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.159.4' + inline_l3_netmask: '255.255.255.0' node01: ip: '172.17.155.251' + netmask: '255.255.255.0' node02: ip: '172.17.155.252' + netmask: '255.255.255.0' ad: ip: '172.17.155.100' netmask: '255.255.255.0' From 7a920f210add0d6702fefc38cf2f4b702e3f9b26 Mon Sep 17 00:00:00 2001 From: nqb Date: Fri, 19 Nov 2021 11:04:53 +0100 Subject: [PATCH 09/21] ansible: lookup in dict --- addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml b/addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml index 14b5cd00619e..993fe73e174d 100644 --- a/addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml +++ b/addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml @@ -1,4 +1,4 @@ --- # allow all machines on management network to send logs to rsyslog rsyslog__group_allow: - - 172.17.17.0/24 + - "{{ networks[current_user]['networks'][0]['subnet'] }}" From b550fb692bcfb69f6e4bfabeaf982feb8cc2a444 Mon Sep 17 00:00:00 2001 From: nqb Date: Sat, 20 Nov 2021 07:48:10 +0100 Subject: [PATCH 10/21] inventory for localdev --- .../group_vars/dev/packetfence_install.yml | 15 +++++++++++++++ .../localdev/packetfence_install.yml | 12 ++++++++++++ .../pfservers/packetfence_install.yml | 18 ++---------------- 3 files changed, 29 insertions(+), 16 deletions(-) create mode 100644 addons/vagrant/inventory/group_vars/localdev/packetfence_install.yml diff --git a/addons/vagrant/inventory/group_vars/dev/packetfence_install.yml b/addons/vagrant/inventory/group_vars/dev/packetfence_install.yml index de18c79666c7..6f3a8df996e5 100644 --- a/addons/vagrant/inventory/group_vars/dev/packetfence_install.yml +++ b/addons/vagrant/inventory/group_vars/dev/packetfence_install.yml @@ -3,3 +3,18 @@ # from inverse.ca website (already installed based on pipeline # artifacts) packetfence_install__centos_release_rpm: 'packetfence-release' + +# in CI environment: only for dependencies, packetfence package is installed using local repo +packetfence_install__centos: + repos: + - packetfence + +# override to installed test files +packetfence_install__centos_packages: + - packetfence + - packetfence-test + +# override to installed test files +packetfence_install__deb_packages: + - packetfence + - packetfence-test diff --git a/addons/vagrant/inventory/group_vars/localdev/packetfence_install.yml b/addons/vagrant/inventory/group_vars/localdev/packetfence_install.yml new file mode 100644 index 000000000000..73cc5fa4550e --- /dev/null +++ b/addons/vagrant/inventory/group_vars/localdev/packetfence_install.yml @@ -0,0 +1,12 @@ +--- +packetfence_install__centos_release_rpm: "http://packetfence.org/downloads/PacketFence/RHEL8/packetfence-release-{{ pf_minor_release }}.el8.noarch.rpm" + +# override to installed test files +packetfence_install__centos_packages: + - packetfence + - packetfence-test + +# override to installed test files +packetfence_install__deb_packages: + - packetfence + - packetfence-test diff --git a/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml b/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml index a2bb1e15f417..99c05b26a923 100644 --- a/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml +++ b/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml @@ -12,25 +12,11 @@ packetfence_install__mgmt_interface: mask: "{{ mgmt_netmask }}" type: management,portal -# only for dependencies, packetfence package is installed using local repo -packetfence_install__centos: - repos: - - packetfence - -# override to installed test files -packetfence_install__centos_packages: - - packetfence - - packetfence-test - -# only for dependencies, packetfence packages are installed using local repo +# in CI environment: only for dependencies, packetfence package is installed using local repo +# in local dev environment: to install packetfence packages packetfence_install__deb: repos: - "debian/{{ pf_minor_release }}" -# override to installed test files -packetfence_install__deb_packages: - - packetfence - - packetfence-test - # we used Venom to pass through configurator packetfence_install__configurator_status: 'enabled' From 45bfda5b57f67ade7cc2b0885b56abd00141e173 Mon Sep 17 00:00:00 2001 From: nqb Date: Mon, 22 Nov 2021 11:22:59 +0100 Subject: [PATCH 11/21] generate Venom IP address using Ansible --- .../pfservers/packetfence_install.yml | 1 - .../group_vars/pfservers/venom_local_vars.yml | 37 +++++++++++++++++++ t/venom/vars/all.yml | 8 ---- 3 files changed, 37 insertions(+), 9 deletions(-) diff --git a/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml b/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml index 99c05b26a923..cdb39526d0f4 100644 --- a/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml +++ b/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml @@ -10,7 +10,6 @@ packetfence_install__mgmt_interface: id: "{{ mgmt_interface_id['key'] }}" ip: "{{ mgmt_ip }}" mask: "{{ mgmt_netmask }}" - type: management,portal # in CI environment: only for dependencies, packetfence package is installed using local repo # in local dev environment: to install packetfence packages diff --git a/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml b/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml index 050a3910d983..86e8df6d95d6 100644 --- a/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml +++ b/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml @@ -7,6 +7,22 @@ venom_local_vars: - name: 'pfserver_mgmt_netmask' value: "{{ packetfence_install__mgmt_interface['mask'] }}" + # get second IP (.2) usable without /CIDR + - name: 'configurator.interfaces.reg.ip' + value: "{{ networks[current_user]['networks'][1]['subnet'] | ansible.netcommon.next_nth_usable(2) | ansible.netcommon.ipaddr('address') }}" + + # get netmask based on CIDR + - name: 'configurator.interfaces.reg.netmask' + value: "{{ networks[current_user]['networks'][1]['subnet'] | ansible.netcommon.ipaddr('netmask') }}" + + # get second IP (.2) usable without /CIDR + - name: 'configurator.interfaces.iso.ip' + value: "{{ networks[current_user]['networks'][2]['subnet'] | ansible.netcommon.next_nth_usable(2) | ansible.netcommon.ipaddr('address') }}" + + # get netmask based on CIDR + - name: 'configurator.interfaces.iso.netmask' + value: "{{ networks[current_user]['networks'][2]['subnet'] | ansible.netcommon.ipaddr('netmask') }}" + - name: 'smtp_server' value: "{{ packetfence_install__mgmt_interface['ip'] }}" @@ -27,3 +43,24 @@ venom_local_vars: - name: 'mariadb_socket' value: "{{ mariadb_socket }}" + + - name: 'ad_mgmt_ip' + value: "{{ networks[current_user]['vms']['ad']['ip'] }}" + + - name: 'switch01_mgmt_ip' + value: "{{ networks[current_user]['vms']['switch01']['ip'] }}" + + - name: 'node01_mgmt_ip' + value: "{{ networks[current_user]['vms']['node01']['ip'] }}" + + - name: 'node02_mgmt_ip' + value: "{{ networks[current_user]['vms']['node02']['ip'] }}" + + - name: 'wireless01_mgmt_ip' + value: "{{ networks[current_user]['vms']['wireless01']['ip'] }}" + + - name: 'linux01_mgmt_ip' + value: "{{ networks[current_user]['vms']['linux01']['ip'] }}" + + - name: 'linux02_mgmt_ip' + value: "{{ networks[current_user]['vms']['linux02']['ip'] }}" diff --git a/t/venom/vars/all.yml b/t/venom/vars/all.yml index ee17d7e04922..3fbd62d6a963 100644 --- a/t/venom/vars/all.yml +++ b/t/venom/vars/all.yml @@ -28,7 +28,6 @@ pfserver_pfqueue_workers: 2 pfserver_haproxy_admin_server_timeout: 120s # ad variables -ad_mgmt_ip: 172.17.17.100 ad_domain_id: example ad_domain_upper: EXAMPLE ad_dns_domain: example.lan @@ -38,7 +37,6 @@ ad_base_dn: dc=example,dc=lan ad_domain_user: packetfence # switchs variables -switch01_mgmt_ip: 172.17.17.201 switch01.api.url: "https://{{.switch01_mgmt_ip}}:8080" switch01.api.user: cumulus switch01.api.password: CumulusLinux! @@ -46,12 +44,10 @@ switch01.dot1x_interface.id: swp12 switch01.dot1x_interface.mac: 44:38:39:00:00:12 # nodes variables -node01_mgmt_ip: 172.17.17.251 node01_ens7_mac_address: 00:03:00:11:11:01 node01_ens7_mac_address_url_encoded: 00%3A03%3A00%3A11%3A11%3A01 # wireless01 variables -wireless01_mgmt_ip: 172.17.17.210 wireless01_wlan1_mac_address: 02:00:00:00:01:00 wireless01_wlan1_mac_address_url_encoded: 02%3A00%3A00%3A00%3A01%3A00 wireless01.dot1x_interface.mac: 02:00:00:00:00:00 @@ -238,14 +234,10 @@ configurator.interfaces.mgmt.netmask: '{{.pfserver_mgmt_netmask}}' # Interface registration configurator.interfaces.reg.index: 4 -configurator.interfaces.reg.ip: 172.17.2.2 -configurator.interfaces.reg.netmask: 255.255.255.0 configurator.interfaces.reg.dhcpd_enabled: enabled # Interface isolation configurator.interfaces.iso.index: 5 -configurator.interfaces.iso.ip: 172.17.3.2 -configurator.interfaces.iso.netmask: 255.255.255.0 configurator.interfaces.iso.dhcpd_enabled: enabled # DNS servers From 86ba9ada25375da184b8ccaca6a4d7e4148c540a Mon Sep 17 00:00:00 2001 From: nqb Date: Mon, 22 Nov 2021 11:31:13 +0100 Subject: [PATCH 12/21] add Make targets for local tests --- t/venom/Makefile | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/t/venom/Makefile b/t/venom/Makefile index d74be7c34bbb..98a2a7d32b81 100644 --- a/t/venom/Makefile +++ b/t/venom/Makefile @@ -109,10 +109,16 @@ dot1x_eap_tls_deb11: #============================================================================== # Targets for local tests #============================================================================== -# No clean -unit_tests_el8_w: +local_configurator_el8: make \ - PF_VM_NAME=pfel8dev \ - SCENARIOS_TO_RUN=unit_tests \ - $(MAKE_TARGET) + PF_VM_NAME=pfel8localdev \ + SCENARIOS_TO_RUN=configurator \ + run_w_clean + +local_configurator_deb11: + make \ + PF_VM_NAME=pfdeb11localdev \ + SCENARIOS_TO_RUN=configurator \ + run_w_clean + From 6269f8ca079356805686bc69222c3b951c10369f Mon Sep 17 00:00:00 2001 From: nqb Date: Mon, 22 Nov 2021 13:44:26 +0100 Subject: [PATCH 13/21] bump Ansible version for localdev VM --- addons/vagrant/requirements.yml | 2 +- t/venom/requirements.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/addons/vagrant/requirements.yml b/addons/vagrant/requirements.yml index 0e2bbb09b4a1..9f3a8a76cd8a 100644 --- a/addons/vagrant/requirements.yml +++ b/addons/vagrant/requirements.yml @@ -13,7 +13,7 @@ roles: collections: - name: inverse_inc.packetfence - version: 1.2.1 + version: 1.2.1-1 - name: debops.debops version: 2.3.2 - name: inverse_inc.windows diff --git a/t/venom/requirements.yml b/t/venom/requirements.yml index 64070d467dbc..bf3ad9644b83 100644 --- a/t/venom/requirements.yml +++ b/t/venom/requirements.yml @@ -11,7 +11,7 @@ roles: collections: - name: inverse_inc.packetfence - version: 1.2.1 + version: 1.2.1-1 - name: debops.debops version: 2.3.2 - name: inverse_inc.windows From 3beb9b4dd34d9e2a224ea935d1c828a0d4bd079c Mon Sep 17 00:00:00 2001 From: nqb Date: Wed, 24 Nov 2021 08:56:24 +0100 Subject: [PATCH 14/21] secret variables should be define per user --- .../pfservers/rhel_subscription.yml | 2 +- .../group_vars/pfservers/venom_local_vars.yml | 8 +++++- addons/vagrant/inventory/hosts | 26 +++++++++++++++++++ t/venom/vars/all.yml | 4 --- 4 files changed, 34 insertions(+), 6 deletions(-) diff --git a/addons/vagrant/inventory/group_vars/pfservers/rhel_subscription.yml b/addons/vagrant/inventory/group_vars/pfservers/rhel_subscription.yml index c60a2347ad71..cd492b68f57c 100644 --- a/addons/vagrant/inventory/group_vars/pfservers/rhel_subscription.yml +++ b/addons/vagrant/inventory/group_vars/pfservers/rhel_subscription.yml @@ -1,5 +1,5 @@ --- -rhel_sub_psono_secret_id: 'e9d98894-bd22-4f32-8b59-653aceb72ec4' +rhel_sub_psono_secret_id: "{{ users_vars[dict_name]['vars']['rhel_sub_secret_id'] }}" # can be used if variable above is not defined # rhel_subscription_user: diff --git a/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml b/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml index 86e8df6d95d6..469399d1cee1 100644 --- a/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml +++ b/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml @@ -63,4 +63,10 @@ venom_local_vars: value: "{{ networks[current_user]['vms']['linux01']['ip'] }}" - name: 'linux02_mgmt_ip' - value: "{{ networks[current_user]['vms']['linux02']['ip'] }}" + value: "{{ users_vars[dict_name]['vms']['linux02']['ip'] }}" + + - name: 'fingerbank_api_key.secret_id' + value: "{{ users_vars[dict_name]['vars']['fingerbank_api_key']['secret_id'] }}" + + - name: 'fingerbank_api_key.email' + value: "{{ users_vars[dict_name]['vars']['fingerbank_api_key']['email'] }}" diff --git a/addons/vagrant/inventory/hosts b/addons/vagrant/inventory/hosts index 73a35e68fb97..0d18753aaf63 100644 --- a/addons/vagrant/inventory/hosts +++ b/addons/vagrant/inventory/hosts @@ -196,6 +196,11 @@ all: networks: # ci gitlab-runner: + vars: + rhel_sub_secret_id: 'e9d98894-bd22-4f32-8b59-653aceb72ec4' + fingerbank_api_key: + secret_id: 'd2c4d4f8-c5b1-4281-a724-e4ade5c31fe1' + email: 'support@inverse.ca' networks: - name: mgmt_ci subnet: '172.17.200.0/24' @@ -269,6 +274,9 @@ all: netmask: '255.255.255.0' # local dev _nqb: + vars: + rhel_sub_secret_id: '' + fingerbank_api_key_secret_id: '' networks: - name: mgmt_nqb subnet: '172.17.140.0/24' @@ -341,6 +349,9 @@ all: ip: '172.17.140.18' netmask: '255.255.255.0' _jrouzier: + vars: + rhel_sub_secret_id: '' + fingerbank_api_key_secret_id: '' networks: - name: mgmt_jrouzier subnet: '172.17.115.0/24' @@ -413,6 +424,9 @@ all: ip: '172.17.115.18' netmask: '255.255.255.0' _lzammit: + vars: + rhel_sub_secret_id: '' + fingerbank_api_key_secret_id: '' networks: - name: mgmt_lzammit subnet: '172.17.145.0/24' @@ -485,6 +499,9 @@ all: ip: '172.17.145.18' netmask: '255.255.255.0' _jgoimard: + vars: + rhel_sub_secret_id: '' + fingerbank_api_key_secret_id: '' networks: - name: mgmt_jgoimard subnet: '172.17.120.0/24' @@ -557,6 +574,9 @@ all: ip: '172.17.120.18' netmask: '255.255.255.0' _dsatkunas: + vars: + rhel_sub_secret_id: '' + fingerbank_api_key_secret_id: '' networks: - name: mgmt_dsatkunas subnet: '172.17.125.0/24' @@ -629,6 +649,9 @@ all: ip: '172.17.125.18' netmask: '255.255.255.0' _fdurand: + vars: + rhel_sub_secret_id: '' + fingerbank_api_key_secret_id: '' networks: - name: mgmt_fdurand subnet: '172.17.135.0/24' @@ -701,6 +724,9 @@ all: ip: '172.17.135.18' netmask: '255.255.255.0' _jsemaan: + vars: + rhel_sub_secret_id: '' + fingerbank_api_key_secret_id: '' networks: - name: mgmt_jsemaan subnet: '172.17.155.0/24' diff --git a/t/venom/vars/all.yml b/t/venom/vars/all.yml index 3fbd62d6a963..56192356063e 100644 --- a/t/venom/vars/all.yml +++ b/t/venom/vars/all.yml @@ -67,10 +67,6 @@ access_level_user_and_node.description: Users and Nodes management roles.registration.vlan_id: 2 roles.isolation.vlan_id: 3 -# Fingerbank -fingerbank_api_key.email: support@inverse.ca -fingerbank_api_key.secret_id: d2c4d4f8-c5b1-4281-a724-e4ade5c31fe1 - ### Maintenance tasks # Node cleanup node_cleanup.delete_window.interval: 1 From 991169a038de8728a5872d1891fd1864e2a9dc60 Mon Sep 17 00:00:00 2001 From: nqb Date: Wed, 24 Nov 2021 10:01:45 +0100 Subject: [PATCH 15/21] use default settings if user doesn't exist in hash for Vagrant and Ansible. Rename vars to simplify understanding --- addons/vagrant/cumulus/Vagrantfile | 54 ++++++++------ .../group_vars/linux_servers/freeradius.yml | 2 +- .../group_vars/linux_servers/tinyproxy.yml | 2 +- .../group_vars/pfservers/venom_local_vars.yml | 20 ++--- .../group_vars/service_rsyslog/rsyslog.yml | 2 +- addons/vagrant/inventory/hosts | 74 ++++++++++--------- addons/vagrant/linux_servers/Vagrantfile | 20 +++-- addons/vagrant/pfservers/Vagrantfile | 38 ++++++---- addons/vagrant/winservers/Vagrantfile | 20 +++-- addons/vagrant/wireless/Vagrantfile | 18 +++-- 10 files changed, 142 insertions(+), 108 deletions(-) diff --git a/addons/vagrant/cumulus/Vagrantfile b/addons/vagrant/cumulus/Vagrantfile index e55c8d4022b7..72633e9b8012 100644 --- a/addons/vagrant/cumulus/Vagrantfile +++ b/addons/vagrant/cumulus/Vagrantfile @@ -86,18 +86,24 @@ SCRIPT # Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -current_user = ENV['USER'] -networks = inventory['all']['vars']['networks'][current_user] -switch01_ip = networks['vms']['switch01']['ip'] -switch01_netmask = networks['vms']['switch01']['netmask'] -inline_ip = networks['vms']['switch01']['inline_ip'] -inline_netmask = networks['vms']['switch01']['inline_netmask'] -inline_l3_ip = networks['vms']['switch01']['inline_l3_ip'] -inline_l3_netmask = networks['vms']['switch01']['inline_l3_netmask'] -node01_ip = networks['vms']['node01']['ip'] -node01_netmask = networks['vms']['node01']['netmask'] -node02_ip = networks['vms']['node02']['ip'] -node02_netmask = networks['vms']['node02']['netmask'] +user = ENV['USER'] +users_vars = inventory['all']['vars']['users_vars'] +if users_vars[user] + dict_name = user +else + dict_name = 'gitlab-runner' +end +user_vars = inventory['all']['vars']['users_vars'][dict_name] +switch01_ip = user_vars['vms']['switch01']['ip'] +switch01_netmask = user_vars['vms']['switch01']['netmask'] +inline_ip = user_vars['vms']['switch01']['inline_ip'] +inline_netmask = user_vars['vms']['switch01']['inline_netmask'] +inline_l3_ip = user_vars['vms']['switch01']['inline_l3_ip'] +inline_l3_netmask = user_vars['vms']['switch01']['inline_l3_netmask'] +node01_ip = user_vars['vms']['node01']['ip'] +node01_netmask = user_vars['vms']['node01']['netmask'] +node02_ip = user_vars['vms']['node02']['ip'] +node02_netmask = user_vars['vms']['node02']['netmask'] Vagrant.configure("2") do |config| @@ -124,34 +130,34 @@ Vagrant.configure("2") do |config| # link for swp1 --> mgmt_network (vlan 17) device.vm.network "private_network", :mac => "a0:00:00:00:00:01", - :libvirt__network_name => networks['networks'][0]['name'], - :ip => networks['networks'][0]['subnet'], + :libvirt__network_name => user_vars['networks'][0]['name'], + :ip => user_vars['networks'][0]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks['networks'][0]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][0]['forward_mode'], auto_config: false # link for swp2 --> reg_network (vlan 2) device.vm.network "private_network", :mac => "44:38:39:00:00:02", - :libvirt__network_name => networks['networks'][1]['name'], - :ip => networks['networks'][1]['subnet'], + :libvirt__network_name => user_vars['networks'][1]['name'], + :ip => user_vars['networks'][1]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks['networks'][1]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][1]['forward_mode'], auto_config: false # link for swp3 --> iso_network (vlan 3) device.vm.network "private_network", :mac => "44:38:39:00:00:03", - :libvirt__network_name => networks['networks'][2]['name'], - :ip => networks['networks'][2]['subnet'], + :libvirt__network_name => user_vars['networks'][2]['name'], + :ip => user_vars['networks'][2]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks['networks'][2]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][2]['forward_mode'], auto_config: false # link for swp6 --> inline_network (vlan 6) device.vm.network "private_network", :mac => "44:38:39:00:00:06", - :libvirt__network_name => networks['networks'][3]['name'], - :ip => networks['networks'][3]['subnet'], + :libvirt__network_name => user_vars['networks'][3]['name'], + :ip => user_vars['networks'][3]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks['networks'][3]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][3]['forward_mode'], auto_config: false # link for swp11 --> node01:ens6 device.vm.network "private_network", diff --git a/addons/vagrant/inventory/group_vars/linux_servers/freeradius.yml b/addons/vagrant/inventory/group_vars/linux_servers/freeradius.yml index d5f95a981d4d..aa5e04f2e77f 100644 --- a/addons/vagrant/inventory/group_vars/linux_servers/freeradius.yml +++ b/addons/vagrant/inventory/group_vars/linux_servers/freeradius.yml @@ -10,7 +10,7 @@ freeradius__configuration: - name: 'packetfence_servers' raw: | client pf { - ipaddr = {{ networks[0]['subnet'] }} + ipaddr = {{ users_vars[dict_name]['networks'][1]['subnet'] }} secret = testing123 } state: 'present' diff --git a/addons/vagrant/inventory/group_vars/linux_servers/tinyproxy.yml b/addons/vagrant/inventory/group_vars/linux_servers/tinyproxy.yml index c8a755978973..a5a6a4756d0f 100644 --- a/addons/vagrant/inventory/group_vars/linux_servers/tinyproxy.yml +++ b/addons/vagrant/inventory/group_vars/linux_servers/tinyproxy.yml @@ -1,3 +1,3 @@ --- tinyproxy__allow: - - "{{ networks[0]['subnet'] }}" + - "{{ users_vars[dict_name]['networks'][1]['subnet'] }}" diff --git a/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml b/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml index 469399d1cee1..02eca97cde43 100644 --- a/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml +++ b/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml @@ -9,19 +9,19 @@ venom_local_vars: # get second IP (.2) usable without /CIDR - name: 'configurator.interfaces.reg.ip' - value: "{{ networks[current_user]['networks'][1]['subnet'] | ansible.netcommon.next_nth_usable(2) | ansible.netcommon.ipaddr('address') }}" + value: "{{ users_vars[dict_name]['networks'][1]['subnet'] | ansible.netcommon.next_nth_usable(2) | ansible.netcommon.ipaddr('address') }}" # get netmask based on CIDR - name: 'configurator.interfaces.reg.netmask' - value: "{{ networks[current_user]['networks'][1]['subnet'] | ansible.netcommon.ipaddr('netmask') }}" + value: "{{ users_vars[dict_name]['networks'][1]['subnet'] | ansible.netcommon.ipaddr('netmask') }}" # get second IP (.2) usable without /CIDR - name: 'configurator.interfaces.iso.ip' - value: "{{ networks[current_user]['networks'][2]['subnet'] | ansible.netcommon.next_nth_usable(2) | ansible.netcommon.ipaddr('address') }}" + value: "{{ users_vars[dict_name]['networks'][2]['subnet'] | ansible.netcommon.next_nth_usable(2) | ansible.netcommon.ipaddr('address') }}" # get netmask based on CIDR - name: 'configurator.interfaces.iso.netmask' - value: "{{ networks[current_user]['networks'][2]['subnet'] | ansible.netcommon.ipaddr('netmask') }}" + value: "{{ users_vars[dict_name]['networks'][2]['subnet'] | ansible.netcommon.ipaddr('netmask') }}" - name: 'smtp_server' value: "{{ packetfence_install__mgmt_interface['ip'] }}" @@ -45,22 +45,22 @@ venom_local_vars: value: "{{ mariadb_socket }}" - name: 'ad_mgmt_ip' - value: "{{ networks[current_user]['vms']['ad']['ip'] }}" + value: "{{ users_vars[dict_name]['vms']['ad']['ip'] }}" - name: 'switch01_mgmt_ip' - value: "{{ networks[current_user]['vms']['switch01']['ip'] }}" + value: "{{ users_vars[dict_name]['vms']['switch01']['ip'] }}" - name: 'node01_mgmt_ip' - value: "{{ networks[current_user]['vms']['node01']['ip'] }}" + value: "{{ users_vars[dict_name]['vms']['node01']['ip'] }}" - name: 'node02_mgmt_ip' - value: "{{ networks[current_user]['vms']['node02']['ip'] }}" + value: "{{ users_vars[dict_name]['vms']['node02']['ip'] }}" - name: 'wireless01_mgmt_ip' - value: "{{ networks[current_user]['vms']['wireless01']['ip'] }}" + value: "{{ users_vars[dict_name]['vms']['wireless01']['ip'] }}" - name: 'linux01_mgmt_ip' - value: "{{ networks[current_user]['vms']['linux01']['ip'] }}" + value: "{{ users_vars[dict_name]['vms']['linux01']['ip'] }}" - name: 'linux02_mgmt_ip' value: "{{ users_vars[dict_name]['vms']['linux02']['ip'] }}" diff --git a/addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml b/addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml index 993fe73e174d..6242ef7b3028 100644 --- a/addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml +++ b/addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml @@ -1,4 +1,4 @@ --- # allow all machines on management network to send logs to rsyslog rsyslog__group_allow: - - "{{ networks[current_user]['networks'][0]['subnet'] }}" + - "{{ users_vars[dict_name]['networks'][0]['subnet'] }}" diff --git a/addons/vagrant/inventory/hosts b/addons/vagrant/inventory/hosts index 0d18753aaf63..d42b4d0893d3 100644 --- a/addons/vagrant/inventory/hosts +++ b/addons/vagrant/inventory/hosts @@ -8,8 +8,8 @@ all: cumulus: hosts: switch01: - mgmt_ip: "{{ networks[current_user]['vms']['switch01']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['switch01']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['switch01']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['switch01']['netmask'] }}" box: CumulusCommunity/cumulus-vx box_version: 3.7.12 ansible_host: "{{ mgmt_ip }}" @@ -19,16 +19,16 @@ all: node01: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: "{{ networks[current_user]['vms']['node01']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['node01']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['node01']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['node01']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" # only used when run outside Vagrant ansible_python_interpreter: '/usr/bin/python3' node02: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: "{{ networks[current_user]['vms']['node02']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['node02']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['node02']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['node02']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" # only used when run outside Vagrant ansible_python_interpreter: '/usr/bin/python3' @@ -38,8 +38,8 @@ all: ad: box: jborean93/WindowsServer2016 box_version: 0.7.0 - mgmt_ip: "{{ networks[current_user]['vms']['ad']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['ad']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['ad']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['ad']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 2048 @@ -49,8 +49,8 @@ all: wireless01: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: "{{ networks[current_user]['vms']['wireless01']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['wireless01']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['wireless01']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['wireless01']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 1 @@ -65,8 +65,8 @@ all: linux01: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: "{{ networks[current_user]['vms']['linux01']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['linux01']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['linux01']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['linux01']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 1 @@ -74,8 +74,8 @@ all: linux02: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: "{{ networks[current_user]['vms']['linux02']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['linux02']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['linux02']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['linux02']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 1 @@ -104,16 +104,16 @@ all: pfel8dev: box: generic/rhel8 box_version: '3.4.2' - mgmt_ip: "{{ networks[current_user]['vms']['pfel8dev']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['pfel8dev']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfel8dev']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfel8dev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 6144 pfdeb11dev: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: "{{ networks[current_user]['vms']['pfdeb11dev']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['pfdeb11dev']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfdeb11dev']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfdeb11dev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 2 @@ -121,16 +121,16 @@ all: el8dev: box: generic/rhel8 box_version: '3.4.2' - mgmt_ip: "{{ networks[current_user]['vms']['el8dev']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['el8dev']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['el8dev']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['el8dev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 6144 deb11dev: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: "{{ networks[current_user]['vms']['deb11dev']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['deb11dev']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['deb11dev']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['deb11dev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 2 @@ -139,22 +139,22 @@ all: localdev: hosts: localhost: - mgmt_ip: "{{ networks[current_user]['vms']['localhost']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['localhost']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['localhost']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['localhost']['netmask'] }}" ansible_connection: local pfel8localdev: box: generic/rhel8 box_version: '3.4.2' - mgmt_ip: "{{ networks[current_user]['vms']['pfel8localdev']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['pfel8localdev']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfel8localdev']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfel8localdev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 6144 pfdeb11localdev: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: "{{ networks[current_user]['vms']['pfdeb11localdev']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['pfdeb11localdev']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfdeb11localdev']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfdeb11localdev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 2 @@ -165,8 +165,8 @@ all: pfel8stable: box: generic/rhel8 box_version: '3.4.2' - mgmt_ip: "{{ networks[current_user]['vms']['pfel8stable']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['pfel8stable']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfel8stable']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfel8stable']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 8192 @@ -174,16 +174,16 @@ all: pfdeb9stable: box: inverse-inc/pfdeb9stable box_version: 10.3.20210414165339 - mgmt_ip: "{{ networks[current_user]['vms']['pfdeb9stable']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['pfdeb9stable']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfdeb9stable']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfdeb9stable']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 8192 pfdeb11stable: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: "{{ networks[current_user]['vms']['pfdeb11stable']['ip'] }}" - mgmt_netmask: "{{ networks[current_user]['vms']['pfdeb11stable']['netmask'] }}" + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfdeb11stable']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfdeb11stable']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 2 @@ -192,8 +192,10 @@ all: vars: tz: UTC - current_user: "{{ lookup('env', 'USER') }}" - networks: + user: "{{ lookup('env', 'USER') }}" + # if variable "user" doesn't exist in users_vars, we fallback to "gitlab-runner" has dict_name + dict_name: "{{ user if users_vars[user]|d() else 'gitlab-runner' }}" + users_vars: # ci gitlab-runner: vars: diff --git a/addons/vagrant/linux_servers/Vagrantfile b/addons/vagrant/linux_servers/Vagrantfile index 40c4591815a2..3e0d5e6e7bb7 100644 --- a/addons/vagrant/linux_servers/Vagrantfile +++ b/addons/vagrant/linux_servers/Vagrantfile @@ -3,11 +3,17 @@ # Require YAML module require 'yaml' - + # Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -current_user = ENV['USER'] -networks = inventory['all']['vars']['networks'][current_user] +user = ENV['USER'] +users_vars = inventory['all']['vars']['users_vars'] +if users_vars[user] + dict_name = user +else + dict_name = 'gitlab-runner' +end +user_vars = inventory['all']['vars']['users_vars'][dict_name] Vagrant.configure("2") do |config| # loop on **all** host(s) in linux_servers group in inventory to create VM(s) @@ -23,11 +29,11 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks['networks'][0]['name'], - :ip => networks['vms'][server]['ip'], - :netmask => networks['vms'][server]['netmask'], + :libvirt__network_name => user_vars['networks'][0]['name'], + :ip => user_vars['vms'][server]['ip'], + :netmask => user_vars['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks['networks'][0]['forward_mode'] + :libvirt__forward_mode => user_vars['networks'][0]['forward_mode'] srv.vm.provider "libvirt" do |v| v.cpus = details['cpus'] diff --git a/addons/vagrant/pfservers/Vagrantfile b/addons/vagrant/pfservers/Vagrantfile index a2189446a243..0411c51f86ae 100644 --- a/addons/vagrant/pfservers/Vagrantfile +++ b/addons/vagrant/pfservers/Vagrantfile @@ -6,8 +6,16 @@ require 'yaml' # Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -current_user = ENV['USER'] -networks = inventory['all']['vars']['networks'][current_user] +user = ENV['USER'] +users_vars = inventory['all']['vars']['users_vars'] +if users_vars[user] + puts "#{user} present in inventory, apply user settings" + dict_name = user +else + puts "#{user} absent inventory, fallback to gitlab-runner settings" + dict_name = 'gitlab-runner' +end +user_vars = inventory['all']['vars']['users_vars'][dict_name] Vagrant.configure("2") do |config| # loop on **all** host(s) in pfservers group in inventory to create VM(s) @@ -25,34 +33,34 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks['networks'][0]['name'], - :ip => networks['vms'][server]['ip'], - :netmask => networks['vms'][server]['netmask'], + :libvirt__network_name => user_vars['networks'][0]['name'], + :ip => user_vars['vms'][server]['ip'], + :netmask => user_vars['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks['networks'][0]['forward_mode'] + :libvirt__forward_mode => user_vars['networks'][0]['forward_mode'] # registration srv.vm.network "private_network", - :libvirt__network_name => networks['networks'][1]['name'], - :ip => networks['networks'][1]['subnet'], + :libvirt__network_name => user_vars['networks'][1]['name'], + :ip => user_vars['networks'][1]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks['networks'][1]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][1]['forward_mode'], auto_config: false # isolation srv.vm.network "private_network", - :libvirt__network_name => networks['networks'][2]['name'], - :ip => networks['networks'][2]['subnet'], + :libvirt__network_name => user_vars['networks'][2]['name'], + :ip => user_vars['networks'][2]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks['networks'][2]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][2]['forward_mode'], auto_config: false # inline srv.vm.network "private_network", - :libvirt__network_name => networks['networks'][3]['name'], - :ip => networks['networks'][3]['subnet'], + :libvirt__network_name => user_vars['networks'][3]['name'], + :ip => user_vars['networks'][3]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks['networks'][3]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][3]['forward_mode'], auto_config: false srv.vm.provider "libvirt" do |v| diff --git a/addons/vagrant/winservers/Vagrantfile b/addons/vagrant/winservers/Vagrantfile index deabb8d9d6bc..86a885fa0584 100644 --- a/addons/vagrant/winservers/Vagrantfile +++ b/addons/vagrant/winservers/Vagrantfile @@ -3,11 +3,17 @@ # Require YAML module require 'yaml' - + # Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -current_user = ENV['USER'] -networks = inventory['all']['vars']['networks'][current_user] +user = ENV['USER'] +users_vars = inventory['all']['vars']['users_vars'] +if users_vars[user] + dict_name = user +else + dict_name = 'gitlab-runner' +end +user_vars = inventory['all']['vars']['users_vars'][dict_name] Vagrant.configure("2") do |config| inventory['all']['children']['winservers']['hosts'].each do |server,details| @@ -22,11 +28,11 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks['networks'][0]['name'], - :ip => networks['vms'][server]['ip'], - :netmask => networks['vms'][server]['netmask'], + :libvirt__network_name => user_vars['networks'][0]['name'], + :ip => user_vars['vms'][server]['ip'], + :netmask => user_vars['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks['networks'][0]['forward_mode'] + :libvirt__forward_mode => user_vars['networks'][0]['forward_mode'] srv.vm.provider "libvirt" do |v| v.cpus = details['cpus'] diff --git a/addons/vagrant/wireless/Vagrantfile b/addons/vagrant/wireless/Vagrantfile index dd724da0d3eb..77eaa80ce8d0 100644 --- a/addons/vagrant/wireless/Vagrantfile +++ b/addons/vagrant/wireless/Vagrantfile @@ -6,8 +6,14 @@ require 'yaml' # Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -current_user = ENV['USER'] -networks = inventory['all']['vars']['networks'][current_user] +user = ENV['USER'] +users_vars = inventory['all']['vars']['users_vars'] +if users_vars[user] + dict_name = user +else + dict_name = 'gitlab-runner' +end +user_vars = inventory['all']['vars']['users_vars'][dict_name] Vagrant.configure("2") do |config| inventory['all']['children']['wireless']['hosts'].each do |server,details| @@ -28,11 +34,11 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks['networks'][0]['name'], - :ip => networks['vms'][server]['ip'], - :netmask => networks['vms'][server]['netmask'], + :libvirt__network_name => user_vars['networks'][0]['name'], + :ip => user_vars['vms'][server]['ip'], + :netmask => user_vars['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks['networks'][0]['forward_mode'] + :libvirt__forward_mode => user_vars['networks'][0]['forward_mode'] end end end From 5efa80cb83d234ac4e2afa8672a7bcb500b6f082 Mon Sep 17 00:00:00 2001 From: JeGoi <13801368+JeGoi@users.noreply.github.com> Date: Wed, 24 Nov 2021 10:53:02 -0500 Subject: [PATCH 16/21] Add jgoimard and nqb secrets ID --- addons/vagrant/inventory/hosts | 32 ++++++++++++++++++++-------- addons/vagrant/pfservers/Vagrantfile | 4 ++-- t/venom/Makefile | 12 +++++++---- 3 files changed, 33 insertions(+), 15 deletions(-) diff --git a/addons/vagrant/inventory/hosts b/addons/vagrant/inventory/hosts index d42b4d0893d3..6f3a73f8f2bb 100644 --- a/addons/vagrant/inventory/hosts +++ b/addons/vagrant/inventory/hosts @@ -277,8 +277,10 @@ all: # local dev _nqb: vars: - rhel_sub_secret_id: '' - fingerbank_api_key_secret_id: '' + rhel_sub_secret_id: '7ecb993e-5179-4eb5-a686-7d7943bf65eb' + fingerbank_api_key: + secret_id: '98090324-0b00-4877-a0b2-e8a215350c72' + email: 'nqb+git@azyx.fr' networks: - name: mgmt_nqb subnet: '172.17.140.0/24' @@ -353,7 +355,9 @@ all: _jrouzier: vars: rhel_sub_secret_id: '' - fingerbank_api_key_secret_id: '' + fingerbank_api_key: + secret_id: '' + email: '' networks: - name: mgmt_jrouzier subnet: '172.17.115.0/24' @@ -428,7 +432,9 @@ all: _lzammit: vars: rhel_sub_secret_id: '' - fingerbank_api_key_secret_id: '' + fingerbank_api_key: + secret_id: '' + email: '' networks: - name: mgmt_lzammit subnet: '172.17.145.0/24' @@ -502,8 +508,10 @@ all: netmask: '255.255.255.0' _jgoimard: vars: - rhel_sub_secret_id: '' - fingerbank_api_key_secret_id: '' + rhel_sub_secret_id: '14e54e10-247a-4a38-b19c-b1f8209daf70' + fingerbank_api_key: + secret_id: 'ba503744-9ea6-4be3-bd73-d3d5f75d7092' + email: 'jgoimard@inverse.ca' networks: - name: mgmt_jgoimard subnet: '172.17.120.0/24' @@ -578,7 +586,9 @@ all: _dsatkunas: vars: rhel_sub_secret_id: '' - fingerbank_api_key_secret_id: '' + fingerbank_api_key: + secret_id: '' + email: '' networks: - name: mgmt_dsatkunas subnet: '172.17.125.0/24' @@ -653,7 +663,9 @@ all: _fdurand: vars: rhel_sub_secret_id: '' - fingerbank_api_key_secret_id: '' + fingerbank_api_key: + secret_id: '' + email: '' networks: - name: mgmt_fdurand subnet: '172.17.135.0/24' @@ -728,7 +740,9 @@ all: _jsemaan: vars: rhel_sub_secret_id: '' - fingerbank_api_key_secret_id: '' + fingerbank_api_key: + secret_id: '' + email: '' networks: - name: mgmt_jsemaan subnet: '172.17.155.0/24' diff --git a/addons/vagrant/pfservers/Vagrantfile b/addons/vagrant/pfservers/Vagrantfile index 0411c51f86ae..b761250e132a 100644 --- a/addons/vagrant/pfservers/Vagrantfile +++ b/addons/vagrant/pfservers/Vagrantfile @@ -9,10 +9,10 @@ inventory = YAML.load_file('inventory/hosts') user = ENV['USER'] users_vars = inventory['all']['vars']['users_vars'] if users_vars[user] - puts "#{user} present in inventory, apply user settings" + puts "#{user} user present in inventory, apply user settings" dict_name = user else - puts "#{user} absent inventory, fallback to gitlab-runner settings" + puts "#{user} user absent in inventory, fallback to gitlab-runner settings" dict_name = 'gitlab-runner' end user_vars = inventory['all']['vars']['users_vars'][dict_name] diff --git a/t/venom/Makefile b/t/venom/Makefile index 98a2a7d32b81..456bbc230858 100644 --- a/t/venom/Makefile +++ b/t/venom/Makefile @@ -109,16 +109,20 @@ dot1x_eap_tls_deb11: #============================================================================== # Targets for local tests #============================================================================== -local_configurator_el8: +# Usage: SCENARIOS_TO_RUN=example make local_el8 +local_el8: make \ PF_VM_NAME=pfel8localdev \ - SCENARIOS_TO_RUN=configurator \ + INT_TEST_VM_NAMES="$(INT_TEST_VM_NAMES)" \ + SCENARIOS_TO_RUN=$(SCENARIOS_TO_RUN) \ run_w_clean -local_configurator_deb11: +# Usage: SCENARIOS_TO_RUN=example make local_deb11 +local_deb11: make \ PF_VM_NAME=pfdeb11localdev \ - SCENARIOS_TO_RUN=configurator \ + INT_TEST_VM_NAMES="$(INT_TEST_VM_NAMES)" \ + SCENARIOS_TO_RUN=$(SCENARIOS_TO_RUN) \ run_w_clean From 26fa3b554df49ed8810ced62610e5820026585d3 Mon Sep 17 00:00:00 2001 From: nqb Date: Wed, 24 Nov 2021 22:16:57 +0100 Subject: [PATCH 17/21] install packetfence-test from packetfence repos in place of ppa in local dev environment add GPG keys --- .../group_vars/wireless/gitlab_buildpkg_tools.yml | 13 ++++++++++++- .../vagrant/playbooks/nodes/pre_prov/packages.yml | 10 +++++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/addons/vagrant/inventory/group_vars/wireless/gitlab_buildpkg_tools.yml b/addons/vagrant/inventory/group_vars/wireless/gitlab_buildpkg_tools.yml index 39fab30cb12b..7f0d7d8c71a0 100644 --- a/addons/vagrant/inventory/group_vars/wireless/gitlab_buildpkg_tools.yml +++ b/addons/vagrant/inventory/group_vars/wireless/gitlab_buildpkg_tools.yml @@ -1,6 +1,8 @@ --- # force value to simplify tests outside CI -gitlab_buildpkg_tools__ppa_enabled: True +# ppa will be disabled in local dev environment +gitlab_buildpkg_tools__ppa_enabled: '{{ True if lookup("env", "CI") + else False }}' # use repo generated by 'publish' stage gitlab_buildpkg_tools__ppa_url: 'http://inverse.ca/downloads/PacketFence/gitlab/{{ pipeline_id }}' @@ -12,6 +14,15 @@ gitlab_buildpkg_tools__deb_ppa: baseurl: "{{ gitlab_buildpkg_tools__ppa_url_deb }} {{ ansible_distribution_release }} main" gpgkey: 'http://inverse.ca/downloads/GPG_PUBLIC_KEY' +# added for local dev environment where we only want devel packages +gitlab_buildpkg_tools__deb_deps_repos: + - name: 'packetfence' + baseurl: 'http://inverse.ca/downloads/PacketFence/debian/{{ pf_minor_release }} {{ ansible_distribution_release }} {{ ansible_distribution_release }}' + +# added for local dev environment where we only want devel packages +gitlab_buildpkg_tools__deb_keys: + - 'http://inverse.ca/downloads/GPG_PUBLIC_KEY' + gitlab_buildpkg_tools__deb_pkgs: - packetfence-test diff --git a/addons/vagrant/playbooks/nodes/pre_prov/packages.yml b/addons/vagrant/playbooks/nodes/pre_prov/packages.yml index 7ecddbd8cfb4..d2e1e56cd125 100644 --- a/addons/vagrant/playbooks/nodes/pre_prov/packages.yml +++ b/addons/vagrant/playbooks/nodes/pre_prov/packages.yml @@ -14,7 +14,9 @@ pf_minor_release: '{{ lookup("env", "PF_MINOR_RELEASE") | default("99.9", true) }}' # force value to simplify tests outside CI - gitlab_buildpkg_tools__ppa_enabled: True + # ppa will be disabled in local dev environment + gitlab_buildpkg_tools__ppa_enabled: '{{ True if lookup("env", "CI") + else False }}' # use repo generated by 'publish' stage gitlab_buildpkg_tools__ppa_url: 'http://inverse.ca/downloads/PacketFence/gitlab/{{ pipeline_id }}' @@ -26,10 +28,16 @@ baseurl: "{{ gitlab_buildpkg_tools__ppa_url_deb }} {{ ansible_distribution_release }} main" gpgkey: 'http://inverse.ca/downloads/GPG_PUBLIC_KEY' + # added for local dev environment where we only want devel packages + # **and** for dependencies in CI environment gitlab_buildpkg_tools__deb_deps_repos: - name: 'packetfence' baseurl: 'http://inverse.ca/downloads/PacketFence/debian/{{ pf_minor_release }} {{ ansible_distribution_release }} {{ ansible_distribution_release }}' + # added for local dev environment + gitlab_buildpkg_tools__deb_keys: + - 'http://inverse.ca/downloads/GPG_PUBLIC_KEY' + gitlab_buildpkg_tools__deb_pkgs: - packetfence-test - wpasupplicant From 48b4bfb3fb9f0c60be8767436561efeffee264a9 Mon Sep 17 00:00:00 2001 From: nqb Date: Wed, 24 Nov 2021 22:22:53 +0100 Subject: [PATCH 18/21] make pf_minor_release available for all in inventory --- addons/vagrant/inventory/group_vars/all/common.yml | 4 ++++ .../inventory/group_vars/pfservers/packetfence_install.yml | 3 --- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/addons/vagrant/inventory/group_vars/all/common.yml b/addons/vagrant/inventory/group_vars/all/common.yml index c48dcf8e32c4..81e44356e4f6 100644 --- a/addons/vagrant/inventory/group_vars/all/common.yml +++ b/addons/vagrant/inventory/group_vars/all/common.yml @@ -9,3 +9,7 @@ packetfence_server_name: '{{ lookup("env","PF_VM_NAME") | default("pfel8dev", tr packetfence_server_mgmt_ip: '{{ hostvars[packetfence_server_name]["mgmt_ip"] }}' pipeline_id: '{{ lookup("env","CI_PIPELINE_ID") | default("123456789", true) }}' + +# get current PacketFence minor release (X.Y) +pf_minor_release: '{{ lookup("env", "PF_MINOR_RELEASE") | default("99.9", true) }}' + diff --git a/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml b/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml index cdb39526d0f4..43d63df03449 100644 --- a/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml +++ b/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml @@ -1,7 +1,4 @@ --- -# get current PacketFence minor release (X.Y) -pf_minor_release: '{{ lookup("env", "PF_MINOR_RELEASE") | default("99.9", true) }}' - # hack to get id of mgmt interface based # on IP assigned by vagrant (in inventory/hosts file) mgmt_interface_id: "{{ ansible_facts | dict2items | selectattr('value.ipv4', 'defined') | selectattr('value.ipv4.address', 'equalto', mgmt_ip) | first }}" From 522fd36ae9b8a4d745e105b20fe3b41e3f183738 Mon Sep 17 00:00:00 2001 From: nqb Date: Wed, 24 Nov 2021 23:10:05 +0100 Subject: [PATCH 19/21] rsync t/venom on runner with /usr/local/pf/t/venom on VMs before running tests to allow local development on runner --- .../wireless/gitlab_buildpkg_tools.yml | 2 +- .../scenarios/configurator/playbooks/rsync.yml | 18 ++++++++++++++++++ t/venom/scenarios/configurator/site.yml | 6 +++++- .../dot1x_eap_peap/playbooks/rsync.yml | 18 ++++++++++++++++++ t/venom/scenarios/dot1x_eap_peap/site.yml | 6 +++++- .../dot1x_eap_tls/playbooks/rsync.yml | 18 ++++++++++++++++++ t/venom/scenarios/dot1x_eap_tls/site.yml | 6 +++++- t/venom/scenarios/example/playbooks/rsync.yml | 18 ++++++++++++++++++ t/venom/scenarios/example/site.yml | 6 +++++- .../export_import/playbooks/rsync.yml | 18 ++++++++++++++++++ t/venom/scenarios/export_import/site.yml | 8 ++++++-- t/venom/scenarios/mac_auth/playbooks/rsync.yml | 18 ++++++++++++++++++ t/venom/scenarios/mac_auth/site.yml | 6 +++++- t/venom/scenarios/template/playbooks/rsync.yml | 18 ++++++++++++++++++ t/venom/scenarios/template/site.yml | 8 ++++++-- .../scenarios/unit_tests/playbooks/rsync.yml | 18 ++++++++++++++++++ t/venom/scenarios/unit_tests/site.yml | 8 ++++++-- t/venom/test-wrapper.sh | 3 ++- 18 files changed, 190 insertions(+), 13 deletions(-) create mode 100644 t/venom/scenarios/configurator/playbooks/rsync.yml create mode 100644 t/venom/scenarios/dot1x_eap_peap/playbooks/rsync.yml create mode 100644 t/venom/scenarios/dot1x_eap_tls/playbooks/rsync.yml create mode 100644 t/venom/scenarios/example/playbooks/rsync.yml create mode 100644 t/venom/scenarios/export_import/playbooks/rsync.yml create mode 100644 t/venom/scenarios/mac_auth/playbooks/rsync.yml create mode 100644 t/venom/scenarios/template/playbooks/rsync.yml create mode 100644 t/venom/scenarios/unit_tests/playbooks/rsync.yml diff --git a/addons/vagrant/inventory/group_vars/wireless/gitlab_buildpkg_tools.yml b/addons/vagrant/inventory/group_vars/wireless/gitlab_buildpkg_tools.yml index 7f0d7d8c71a0..ebd8b487aa4a 100644 --- a/addons/vagrant/inventory/group_vars/wireless/gitlab_buildpkg_tools.yml +++ b/addons/vagrant/inventory/group_vars/wireless/gitlab_buildpkg_tools.yml @@ -25,4 +25,4 @@ gitlab_buildpkg_tools__deb_keys: gitlab_buildpkg_tools__deb_pkgs: - packetfence-test - + - rsync diff --git a/t/venom/scenarios/configurator/playbooks/rsync.yml b/t/venom/scenarios/configurator/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/configurator/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/configurator/site.yml b/t/venom/scenarios/configurator/site.yml index 26debebc807a..7442a70f6840 100644 --- a/t/venom/scenarios/configurator/site.yml +++ b/t/venom/scenarios/configurator/site.yml @@ -1,5 +1,9 @@ --- -#- import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml + +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' #- import_playbook: playbooks/configure.yml diff --git a/t/venom/scenarios/dot1x_eap_peap/playbooks/rsync.yml b/t/venom/scenarios/dot1x_eap_peap/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/dot1x_eap_peap/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/dot1x_eap_peap/site.yml b/t/venom/scenarios/dot1x_eap_peap/site.yml index 2522d0837f29..7442a70f6840 100644 --- a/t/venom/scenarios/dot1x_eap_peap/site.yml +++ b/t/venom/scenarios/dot1x_eap_peap/site.yml @@ -1,5 +1,9 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml + +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' #- import_playbook: playbooks/configure.yml diff --git a/t/venom/scenarios/dot1x_eap_tls/playbooks/rsync.yml b/t/venom/scenarios/dot1x_eap_tls/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/dot1x_eap_tls/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/dot1x_eap_tls/site.yml b/t/venom/scenarios/dot1x_eap_tls/site.yml index 2522d0837f29..7442a70f6840 100644 --- a/t/venom/scenarios/dot1x_eap_tls/site.yml +++ b/t/venom/scenarios/dot1x_eap_tls/site.yml @@ -1,5 +1,9 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml + +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' #- import_playbook: playbooks/configure.yml diff --git a/t/venom/scenarios/example/playbooks/rsync.yml b/t/venom/scenarios/example/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/example/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/example/site.yml b/t/venom/scenarios/example/site.yml index 2522d0837f29..7442a70f6840 100644 --- a/t/venom/scenarios/example/site.yml +++ b/t/venom/scenarios/example/site.yml @@ -1,5 +1,9 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml + +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' #- import_playbook: playbooks/configure.yml diff --git a/t/venom/scenarios/export_import/playbooks/rsync.yml b/t/venom/scenarios/export_import/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/export_import/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/export_import/site.yml b/t/venom/scenarios/export_import/site.yml index d19c9d1672e6..7442a70f6840 100644 --- a/t/venom/scenarios/export_import/site.yml +++ b/t/venom/scenarios/export_import/site.yml @@ -1,6 +1,10 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml -- import_playbook: playbooks/configure.yml +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' + +#- import_playbook: playbooks/configure.yml - import_playbook: playbooks/run_tests.yml diff --git a/t/venom/scenarios/mac_auth/playbooks/rsync.yml b/t/venom/scenarios/mac_auth/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/mac_auth/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/mac_auth/site.yml b/t/venom/scenarios/mac_auth/site.yml index 2522d0837f29..7442a70f6840 100644 --- a/t/venom/scenarios/mac_auth/site.yml +++ b/t/venom/scenarios/mac_auth/site.yml @@ -1,5 +1,9 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml + +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' #- import_playbook: playbooks/configure.yml diff --git a/t/venom/scenarios/template/playbooks/rsync.yml b/t/venom/scenarios/template/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/template/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/template/site.yml b/t/venom/scenarios/template/site.yml index d19c9d1672e6..7442a70f6840 100644 --- a/t/venom/scenarios/template/site.yml +++ b/t/venom/scenarios/template/site.yml @@ -1,6 +1,10 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml -- import_playbook: playbooks/configure.yml +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' + +#- import_playbook: playbooks/configure.yml - import_playbook: playbooks/run_tests.yml diff --git a/t/venom/scenarios/unit_tests/playbooks/rsync.yml b/t/venom/scenarios/unit_tests/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/unit_tests/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/unit_tests/site.yml b/t/venom/scenarios/unit_tests/site.yml index d19c9d1672e6..7442a70f6840 100644 --- a/t/venom/scenarios/unit_tests/site.yml +++ b/t/venom/scenarios/unit_tests/site.yml @@ -1,6 +1,10 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml -- import_playbook: playbooks/configure.yml +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' + +#- import_playbook: playbooks/configure.yml - import_playbook: playbooks/run_tests.yml diff --git a/t/venom/test-wrapper.sh b/t/venom/test-wrapper.sh index 19c6c35bd8bf..3ca069e31557 100755 --- a/t/venom/test-wrapper.sh +++ b/t/venom/test-wrapper.sh @@ -58,12 +58,13 @@ configure_and_check() { declare -p VAGRANT_DIR VAGRANT_ANSIBLE_VERBOSE VAGRANT_PF_DOTFILE_PATH VAGRANT_COMMON_DOTFILE_PATH - declare -p ANSIBLE_INVENTORY + declare -p ANSIBLE_INVENTORY VENOM_ROOT_DIR declare -p CI_COMMIT_TAG CI_PIPELINE_ID PF_MINOR_RELEASE declare -p PF_VM_NAME INT_TEST_VM_NAMES ANSIBLE_VM_LIST declare -p SCENARIOS_TO_RUN DESTROY_ALL export ANSIBLE_INVENTORY + export VENOM_ROOT_DIR } run() { From f07218b81bee7c7a22f00a3142b49db6537be829 Mon Sep 17 00:00:00 2001 From: nqb Date: Thu, 25 Nov 2021 07:55:55 +0100 Subject: [PATCH 20/21] detect environment in Makefile to use same Make targets --- t/venom/Makefile | 61 +++++++++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/t/venom/Makefile b/t/venom/Makefile index 456bbc230858..d848c233a8fc 100644 --- a/t/venom/Makefile +++ b/t/venom/Makefile @@ -6,6 +6,20 @@ SHELL=/bin/bash PFSERVERS_DIR=pfservers CI_PIPELINE_ID=123456789 +#============================================================================== +# Tests CI or localdev +#============================================================================== +# in localdev, we don't want to clean all VM created previously +ifeq ($(CI), true) + $(info CI environment detected) + MAKE_TARGET=run + DEV_ENV=dev +else + $(info localdev environment detected) + MAKE_TARGET=run_w_clean + DEV_ENV=localdev +endif + #============================================================================== # Targets #============================================================================== @@ -48,81 +62,76 @@ clean: unit_tests_el8: make \ - PF_VM_NAME=pfel8dev \ + PF_VM_NAME=pfel8$(DEV_ENV) \ SCENARIOS_TO_RUN=unit_tests \ $(MAKE_TARGET) configurator_el8: make \ - PF_VM_NAME=pfel8dev \ + PF_VM_NAME=pfel8$(DEV_ENV) \ SCENARIOS_TO_RUN=configurator \ $(MAKE_TARGET) configurator_deb11: make \ - PF_VM_NAME=pfdeb11dev \ + PF_VM_NAME=pfdeb11$(DEV_ENV) \ SCENARIOS_TO_RUN=configurator \ $(MAKE_TARGET) dot1x_eap_peap_el8: make \ - PF_VM_NAME=pfel8dev \ + PF_VM_NAME=pfel8$(DEV_ENV) \ INT_TEST_VM_NAMES="ad switch01 node01 wireless01" \ SCENARIOS_TO_RUN=dot1x_eap_peap \ $(MAKE_TARGET) dot1x_eap_peap_deb11: make \ - PF_VM_NAME=pfdeb11dev \ + PF_VM_NAME=pfdeb11$(DEV_ENV) \ INT_TEST_VM_NAMES="ad switch01 node01 wireless01" \ SCENARIOS_TO_RUN=dot1x_eap_peap \ $(MAKE_TARGET) mac_auth_el8: make \ - PF_VM_NAME=pfel8dev \ + PF_VM_NAME=pfel8$(DEV_ENV) \ INT_TEST_VM_NAMES="switch01 node01 wireless01" \ SCENARIOS_TO_RUN=mac_auth \ $(MAKE_TARGET) mac_auth_deb11: make \ - PF_VM_NAME=pfdeb11dev \ + PF_VM_NAME=pfdeb11$(DEV_ENV) \ INT_TEST_VM_NAMES="switch01 node01 wireless01" \ SCENARIOS_TO_RUN=mac_auth \ $(MAKE_TARGET) dot1x_eap_tls_el8: make \ - PF_VM_NAME=pfel8dev \ + PF_VM_NAME=pfel8$(DEV_ENV) \ INT_TEST_VM_NAMES="switch01 node01" \ SCENARIOS_TO_RUN=dot1x_eap_tls \ $(MAKE_TARGET) dot1x_eap_tls_deb11: make \ - PF_VM_NAME=pfdeb11dev \ + PF_VM_NAME=pfdeb11$(DEV_ENV) \ INT_TEST_VM_NAMES="switch01 node01" \ SCENARIOS_TO_RUN=dot1x_eap_tls \ $(MAKE_TARGET) -#============================================================================== -# Targets for local tests -#============================================================================== -# Usage: SCENARIOS_TO_RUN=example make local_el8 -local_el8: +example_el8: make \ - PF_VM_NAME=pfel8localdev \ - INT_TEST_VM_NAMES="$(INT_TEST_VM_NAMES)" \ - SCENARIOS_TO_RUN=$(SCENARIOS_TO_RUN) \ - run_w_clean + PF_VM_NAME=pfel8$(DEV_ENV) \ + # if you want to start additional VMs + #INT_TEST_VM_NAMES="switch01 node01" \ + SCENARIOS_TO_RUN=example \ + $(MAKE_TARGET) -# Usage: SCENARIOS_TO_RUN=example make local_deb11 -local_deb11: +example_deb11: make \ - PF_VM_NAME=pfdeb11localdev \ - INT_TEST_VM_NAMES="$(INT_TEST_VM_NAMES)" \ - SCENARIOS_TO_RUN=$(SCENARIOS_TO_RUN) \ - run_w_clean - - + PF_VM_NAME=pfdeb11$(DEV_ENV) \ + # if you want to start additional VMs + #INT_TEST_VM_NAMES="switch01 node01" \ + SCENARIOS_TO_RUN=example \ + $(MAKE_TARGET) From 57f7c8261a08dded04b35b37c66436c9aa468400 Mon Sep 17 00:00:00 2001 From: nqb Date: Thu, 25 Nov 2021 08:17:35 +0100 Subject: [PATCH 21/21] reprovision all VM in local environment when running run_w_clean target and allow to shutdown all VM without errors using make halt --- t/venom/test-wrapper.sh | 69 +++++++++++++++++++++++++---------------- 1 file changed, 42 insertions(+), 27 deletions(-) diff --git a/t/venom/test-wrapper.sh b/t/venom/test-wrapper.sh index 3ca069e31557..236fbbaf0ba7 100755 --- a/t/venom/test-wrapper.sh +++ b/t/venom/test-wrapper.sh @@ -78,15 +78,42 @@ run() { run_tests } +# Start with or without VM +start_vm() { + local vm=$1 + local dotfile_path=$2 + if [ -e "${dotfile_path}/machines/${vm}/libvirt/id" ]; then + echo "Machine $vm already exists" + machine_uuid=$(cat ${dotfile_path}/machines/${vm}/libvirt/id) + machine_state=$(virsh -c qemu:///system domstate --domain $machine_uuid) + if [ "${machine_state}" = "shut off" ]; then + echo "Starting $vm using libvirt, provisioning using Ansible (without Vagrant)" + virsh -c qemu:///system start --domain $machine_uuid + # let time for the VM to boot before using ansible + echo "Let time to VM to start before provisioning using Ansible.." + sleep 60 + else + echo "Machine already started, Ansible provisioning only" + fi + ( cd ${VAGRANT_DIR}; \ + ansible-playbook site.yml -l $vm ) + else + echo "Machine $vm doesn't exist, start and provision with Vagrant" + ( cd ${VAGRANT_DIR} ; \ + VAGRANT_DOTFILE_PATH=${dotfile_path} \ + vagrant up \ + ${vm} \ + ${VAGRANT_UP_OPTS} ) + fi +} + start_and_provision_pf_vm() { local vm_names=${@:-vmname} - log_subsection "Start and provision $vm_names" + log_subsection "Start and provision PacketFence $vm_names" + for vm in ${vm_names}; do + start_vm ${vm} ${VAGRANT_PF_DOTFILE_PATH} + done - ( cd ${VAGRANT_DIR} ; \ - VAGRANT_DOTFILE_PATH=${VAGRANT_PF_DOTFILE_PATH} \ - vagrant up \ - ${vm_names} \ - ${VAGRANT_UP_OPTS} ) } start_and_provision_other_vm() { @@ -94,25 +121,7 @@ start_and_provision_other_vm() { log_subsection "Start and provision $vm_names" for vm in ${vm_names}; do - if [ -e "${VAGRANT_COMMON_DOTFILE_PATH}/machines/${vm}/libvirt/id" ]; then - echo "Machine $vm already exists" - machine_uuid=$(cat ${VAGRANT_COMMON_DOTFILE_PATH}/machines/${vm}/libvirt/id) - # hack to overcome the fact that node01 doesn't have IP address after first provisioning - # vagrant up will fail - echo "Starting $vm using libvirt, provisioning using Ansible (without Vagrant)" - virsh -c qemu:///system start --domain $machine_uuid - # let time for the VM to boot before using ansible - sleep 60 - ( cd ${VAGRANT_DIR}; \ - ansible-playbook site.yml -l $vm ) - else - echo "Machine $vm doesn't exist, start and provision with Vagrant" - ( cd ${VAGRANT_DIR} ; \ - VAGRANT_DOTFILE_PATH=${VAGRANT_COMMON_DOTFILE_PATH} \ - vagrant up \ - ${vm} \ - ${VAGRANT_UP_OPTS} ) - fi + start_vm ${vm} ${VAGRANT_COMMON_DOTFILE_PATH} done } @@ -134,8 +143,14 @@ run_tests() { unconfigure() { log_subsection "Unconfigure virtual machines" - ( cd $VAGRANT_DIR ; \ - ansible-playbook teardown.yml -l $ANSIBLE_VM_LIST ) + # when we call "make halt" without options (localdev) + # no VM are provided + if [ -n "${ANSIBLE_VM_LIST}" ]; then + ( cd $VAGRANT_DIR ; \ + ansible-playbook teardown.yml -l $ANSIBLE_VM_LIST ) + else + echo "No VM detected, nothing to unconfigure" + fi } halt() {