diff --git a/addons/vagrant/Vagrantfile b/addons/vagrant/Vagrantfile index 1ef093642614..180fcbfbc041 100644 --- a/addons/vagrant/Vagrantfile +++ b/addons/vagrant/Vagrantfile @@ -3,7 +3,7 @@ require 'securerandom' -BRANCH_OR_TAG_NAME = ENV['CI_COMMIT_REF_SLUG'] || 'local-tests' +BRANCH_OR_TAG_NAME = ENV['CI_COMMIT_REF_SLUG'] || ENV['USER'] DOMAIN_PREFIX = "vagrant-" + BRANCH_OR_TAG_NAME + "-#{SecureRandom.hex(3)}-" Vagrant.configure("2") do |config| diff --git a/addons/vagrant/cumulus/Vagrantfile b/addons/vagrant/cumulus/Vagrantfile index 70d6da86eb6e..72633e9b8012 100644 --- a/addons/vagrant/cumulus/Vagrantfile +++ b/addons/vagrant/cumulus/Vagrantfile @@ -84,9 +84,26 @@ echo "### Rebooting Device to Apply Remap..." nohup bash -c 'shutdown now -r "Rebooting to Remap Interfaces"' & SCRIPT -# Read YAML file with box details +# Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -networks = inventory['all']['vars']['networks'] +user = ENV['USER'] +users_vars = inventory['all']['vars']['users_vars'] +if users_vars[user] + dict_name = user +else + dict_name = 'gitlab-runner' +end +user_vars = inventory['all']['vars']['users_vars'][dict_name] +switch01_ip = user_vars['vms']['switch01']['ip'] +switch01_netmask = user_vars['vms']['switch01']['netmask'] +inline_ip = user_vars['vms']['switch01']['inline_ip'] +inline_netmask = user_vars['vms']['switch01']['inline_netmask'] +inline_l3_ip = user_vars['vms']['switch01']['inline_l3_ip'] +inline_l3_netmask = user_vars['vms']['switch01']['inline_l3_netmask'] +node01_ip = user_vars['vms']['node01']['ip'] +node01_netmask = user_vars['vms']['node01']['netmask'] +node02_ip = user_vars['vms']['node02']['ip'] +node02_netmask = user_vars['vms']['node02']['netmask'] Vagrant.configure("2") do |config| @@ -113,34 +130,34 @@ Vagrant.configure("2") do |config| # link for swp1 --> mgmt_network (vlan 17) device.vm.network "private_network", :mac => "a0:00:00:00:00:01", - :libvirt__network_name => networks[0]['name'], - :ip => networks[0]['subnet'], + :libvirt__network_name => user_vars['networks'][0]['name'], + :ip => user_vars['networks'][0]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[0]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][0]['forward_mode'], auto_config: false # link for swp2 --> reg_network (vlan 2) device.vm.network "private_network", :mac => "44:38:39:00:00:02", - :libvirt__network_name => networks[1]['name'], - :ip => networks[1]['subnet'], + :libvirt__network_name => user_vars['networks'][1]['name'], + :ip => user_vars['networks'][1]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[1]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][1]['forward_mode'], auto_config: false # link for swp3 --> iso_network (vlan 3) device.vm.network "private_network", :mac => "44:38:39:00:00:03", - :libvirt__network_name => networks[2]['name'], - :ip => networks[2]['subnet'], + :libvirt__network_name => user_vars['networks'][2]['name'], + :ip => user_vars['networks'][2]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[2]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][2]['forward_mode'], auto_config: false # link for swp6 --> inline_network (vlan 6) device.vm.network "private_network", :mac => "44:38:39:00:00:06", - :libvirt__network_name => networks[3]['name'], - :ip => networks[3]['subnet'], + :libvirt__network_name => user_vars['networks'][3]['name'], + :ip => user_vars['networks'][3]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[3]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][3]['forward_mode'], auto_config: false # link for swp11 --> node01:ens6 device.vm.network "private_network", @@ -194,8 +211,11 @@ Vagrant.configure("2") do |config| # Run the Config specified in the Node Attributes device.vm.provision :shell , privileged: false, :inline => 'echo "$(whoami)" > /tmp/normal_user' - device.vm.provision "config_switch", type: "shell" , path: "./helper_scripts/config_switch.sh" - + device.vm.provision "config_switch", type: "shell" , path: "./helper_scripts/config_switch.sh", + args: [ "#{switch01_ip}", "#{switch01_netmask}", + "#{inline_ip}", "#{inline_netmask}", + "#{inline_l3_ip}", "#{inline_l3_netmask}" + ] # Install Rules for the interface re-map device.vm.provision :shell , :inline => <<-delete_udev_directory @@ -338,8 +358,8 @@ vagrant_interface_rule # Run the Config specified in the Node Attributes device.vm.provision :shell , privileged: false, :inline => 'echo "$(whoami)" > /tmp/normal_user' - device.vm.provision "config_node", type: "shell", path: "./helper_scripts/config_node01.sh" - + device.vm.provision "config_node", type: "shell", path: "./helper_scripts/config_node01.sh", + args: [ "#{node01_ip}", "#{node01_netmask}"] # Install Rules for the interface re-map device.vm.provision :shell , :inline => <<-delete_udev_directory @@ -444,8 +464,8 @@ vagrant_interface_rule # Run the Config specified in the Node Attributes device.vm.provision :shell , privileged: false, :inline => 'echo "$(whoami)" > /tmp/normal_user' - device.vm.provision "config_node", type: "shell", path: "./helper_scripts/config_node02.sh" - + device.vm.provision "config_node", type: "shell", path: "./helper_scripts/config_node02.sh", + args: [ "#{node02_ip}", "#{node02_netmask}"] # Install Rules for the interface re-map device.vm.provision :shell , :inline => <<-delete_udev_directory diff --git a/addons/vagrant/helper_scripts/config_node01.sh b/addons/vagrant/helper_scripts/config_node01.sh index 1e332daa3f55..f2e8f67ed137 100755 --- a/addons/vagrant/helper_scripts/config_node01.sh +++ b/addons/vagrant/helper_scripts/config_node01.sh @@ -1,5 +1,9 @@ #!/bin/bash set -o nounset -o pipefail -o errexit +mgmt_ip=$1 +mgmt_netmask=$2 + +declare -p mgmt_ip mgmt_netmask echo "#################################" echo " Running config_node01.sh" @@ -41,7 +45,8 @@ iface lo inet loopback auto ens6 iface ens6 inet static alias VLAN 17 - address 172.17.17.251/24 + address ${mgmt_ip} + netmask ${mgmt_netmask} allow-hotplug ens7 iface ens7 inet dhcp diff --git a/addons/vagrant/helper_scripts/config_node02.sh b/addons/vagrant/helper_scripts/config_node02.sh index db1f55192aea..92727c24e7f5 100755 --- a/addons/vagrant/helper_scripts/config_node02.sh +++ b/addons/vagrant/helper_scripts/config_node02.sh @@ -1,5 +1,9 @@ #!/bin/bash set -o nounset -o pipefail -o errexit +mgmt_ip=$1 +mgmt_netmask=$2 + +declare -p mgmt_ip mgmt_netmask echo "#################################" echo " Running config_node.sh" @@ -41,7 +45,8 @@ iface lo inet loopback auto ens6 iface ens6 inet static alias VLAN 17 - address 172.17.17.252/24 + address ${mgmt_ip} + netmask ${mgmt_netmask} allow-hotplug ens7 iface ens7 inet dhcp diff --git a/addons/vagrant/helper_scripts/config_switch.sh b/addons/vagrant/helper_scripts/config_switch.sh index 4a061b886936..c516cee9e0cc 100755 --- a/addons/vagrant/helper_scripts/config_switch.sh +++ b/addons/vagrant/helper_scripts/config_switch.sh @@ -1,5 +1,15 @@ #!/bin/bash set -o nounset -o pipefail -o errexit +mgmt_ip=$1 +mgmt_netmask=$2 +inline_ip=$3 +inline_netmask=$4 +inline_l3_ip=$5 +inline_l3_netmask=$6 + +declare -p mgmt_ip mgmt_netmask +declare -p inline_ip inline_netmask +declare -p inline_l3_ip inline_l3_netmask echo "#################################" echo " Running Switch Post Config (config_switch.sh)" @@ -62,17 +72,20 @@ iface swp48 auto bridge.6 iface bridge.6 alias Inline-L2 - address 172.17.6.3/24 + address ${inline_ip} + netmask ${inline_netmask} auto bridge.17 iface bridge.17 alias Management - address 172.17.17.201/24 + address ${mgmt_ip} + netmask ${mgmt_netmask} auto bridge.18 iface bridge.18 alias Inline-L3 - address 172.17.18.254/24 + address ${inline_l3_ip} + netmask ${inline_l3_netmask} auto bridge.100 iface bridge.100 inet dhcp diff --git a/addons/vagrant/inventory/group_vars/all/common.yml b/addons/vagrant/inventory/group_vars/all/common.yml index c48dcf8e32c4..81e44356e4f6 100644 --- a/addons/vagrant/inventory/group_vars/all/common.yml +++ b/addons/vagrant/inventory/group_vars/all/common.yml @@ -9,3 +9,7 @@ packetfence_server_name: '{{ lookup("env","PF_VM_NAME") | default("pfel8dev", tr packetfence_server_mgmt_ip: '{{ hostvars[packetfence_server_name]["mgmt_ip"] }}' pipeline_id: '{{ lookup("env","CI_PIPELINE_ID") | default("123456789", true) }}' + +# get current PacketFence minor release (X.Y) +pf_minor_release: '{{ lookup("env", "PF_MINOR_RELEASE") | default("99.9", true) }}' + diff --git a/addons/vagrant/inventory/group_vars/dev/packetfence_install.yml b/addons/vagrant/inventory/group_vars/dev/packetfence_install.yml index de18c79666c7..6f3a8df996e5 100644 --- a/addons/vagrant/inventory/group_vars/dev/packetfence_install.yml +++ b/addons/vagrant/inventory/group_vars/dev/packetfence_install.yml @@ -3,3 +3,18 @@ # from inverse.ca website (already installed based on pipeline # artifacts) packetfence_install__centos_release_rpm: 'packetfence-release' + +# in CI environment: only for dependencies, packetfence package is installed using local repo +packetfence_install__centos: + repos: + - packetfence + +# override to installed test files +packetfence_install__centos_packages: + - packetfence + - packetfence-test + +# override to installed test files +packetfence_install__deb_packages: + - packetfence + - packetfence-test diff --git a/addons/vagrant/inventory/group_vars/linux_servers/freeradius.yml b/addons/vagrant/inventory/group_vars/linux_servers/freeradius.yml index d5f95a981d4d..aa5e04f2e77f 100644 --- a/addons/vagrant/inventory/group_vars/linux_servers/freeradius.yml +++ b/addons/vagrant/inventory/group_vars/linux_servers/freeradius.yml @@ -10,7 +10,7 @@ freeradius__configuration: - name: 'packetfence_servers' raw: | client pf { - ipaddr = {{ networks[0]['subnet'] }} + ipaddr = {{ users_vars[dict_name]['networks'][1]['subnet'] }} secret = testing123 } state: 'present' diff --git a/addons/vagrant/inventory/group_vars/linux_servers/tinyproxy.yml b/addons/vagrant/inventory/group_vars/linux_servers/tinyproxy.yml index c8a755978973..a5a6a4756d0f 100644 --- a/addons/vagrant/inventory/group_vars/linux_servers/tinyproxy.yml +++ b/addons/vagrant/inventory/group_vars/linux_servers/tinyproxy.yml @@ -1,3 +1,3 @@ --- tinyproxy__allow: - - "{{ networks[0]['subnet'] }}" + - "{{ users_vars[dict_name]['networks'][1]['subnet'] }}" diff --git a/addons/vagrant/inventory/group_vars/localdev/packetfence_install.yml b/addons/vagrant/inventory/group_vars/localdev/packetfence_install.yml new file mode 100644 index 000000000000..73cc5fa4550e --- /dev/null +++ b/addons/vagrant/inventory/group_vars/localdev/packetfence_install.yml @@ -0,0 +1,12 @@ +--- +packetfence_install__centos_release_rpm: "http://packetfence.org/downloads/PacketFence/RHEL8/packetfence-release-{{ pf_minor_release }}.el8.noarch.rpm" + +# override to installed test files +packetfence_install__centos_packages: + - packetfence + - packetfence-test + +# override to installed test files +packetfence_install__deb_packages: + - packetfence + - packetfence-test diff --git a/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml b/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml index a2bb1e15f417..43d63df03449 100644 --- a/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml +++ b/addons/vagrant/inventory/group_vars/pfservers/packetfence_install.yml @@ -1,7 +1,4 @@ --- -# get current PacketFence minor release (X.Y) -pf_minor_release: '{{ lookup("env", "PF_MINOR_RELEASE") | default("99.9", true) }}' - # hack to get id of mgmt interface based # on IP assigned by vagrant (in inventory/hosts file) mgmt_interface_id: "{{ ansible_facts | dict2items | selectattr('value.ipv4', 'defined') | selectattr('value.ipv4.address', 'equalto', mgmt_ip) | first }}" @@ -10,27 +7,12 @@ packetfence_install__mgmt_interface: id: "{{ mgmt_interface_id['key'] }}" ip: "{{ mgmt_ip }}" mask: "{{ mgmt_netmask }}" - type: management,portal - -# only for dependencies, packetfence package is installed using local repo -packetfence_install__centos: - repos: - - packetfence -# override to installed test files -packetfence_install__centos_packages: - - packetfence - - packetfence-test - -# only for dependencies, packetfence packages are installed using local repo +# in CI environment: only for dependencies, packetfence package is installed using local repo +# in local dev environment: to install packetfence packages packetfence_install__deb: repos: - "debian/{{ pf_minor_release }}" -# override to installed test files -packetfence_install__deb_packages: - - packetfence - - packetfence-test - # we used Venom to pass through configurator packetfence_install__configurator_status: 'enabled' diff --git a/addons/vagrant/inventory/group_vars/pfservers/rhel_subscription.yml b/addons/vagrant/inventory/group_vars/pfservers/rhel_subscription.yml index c60a2347ad71..cd492b68f57c 100644 --- a/addons/vagrant/inventory/group_vars/pfservers/rhel_subscription.yml +++ b/addons/vagrant/inventory/group_vars/pfservers/rhel_subscription.yml @@ -1,5 +1,5 @@ --- -rhel_sub_psono_secret_id: 'e9d98894-bd22-4f32-8b59-653aceb72ec4' +rhel_sub_psono_secret_id: "{{ users_vars[dict_name]['vars']['rhel_sub_secret_id'] }}" # can be used if variable above is not defined # rhel_subscription_user: diff --git a/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml b/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml index 050a3910d983..02eca97cde43 100644 --- a/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml +++ b/addons/vagrant/inventory/group_vars/pfservers/venom_local_vars.yml @@ -7,6 +7,22 @@ venom_local_vars: - name: 'pfserver_mgmt_netmask' value: "{{ packetfence_install__mgmt_interface['mask'] }}" + # get second IP (.2) usable without /CIDR + - name: 'configurator.interfaces.reg.ip' + value: "{{ users_vars[dict_name]['networks'][1]['subnet'] | ansible.netcommon.next_nth_usable(2) | ansible.netcommon.ipaddr('address') }}" + + # get netmask based on CIDR + - name: 'configurator.interfaces.reg.netmask' + value: "{{ users_vars[dict_name]['networks'][1]['subnet'] | ansible.netcommon.ipaddr('netmask') }}" + + # get second IP (.2) usable without /CIDR + - name: 'configurator.interfaces.iso.ip' + value: "{{ users_vars[dict_name]['networks'][2]['subnet'] | ansible.netcommon.next_nth_usable(2) | ansible.netcommon.ipaddr('address') }}" + + # get netmask based on CIDR + - name: 'configurator.interfaces.iso.netmask' + value: "{{ users_vars[dict_name]['networks'][2]['subnet'] | ansible.netcommon.ipaddr('netmask') }}" + - name: 'smtp_server' value: "{{ packetfence_install__mgmt_interface['ip'] }}" @@ -27,3 +43,30 @@ venom_local_vars: - name: 'mariadb_socket' value: "{{ mariadb_socket }}" + + - name: 'ad_mgmt_ip' + value: "{{ users_vars[dict_name]['vms']['ad']['ip'] }}" + + - name: 'switch01_mgmt_ip' + value: "{{ users_vars[dict_name]['vms']['switch01']['ip'] }}" + + - name: 'node01_mgmt_ip' + value: "{{ users_vars[dict_name]['vms']['node01']['ip'] }}" + + - name: 'node02_mgmt_ip' + value: "{{ users_vars[dict_name]['vms']['node02']['ip'] }}" + + - name: 'wireless01_mgmt_ip' + value: "{{ users_vars[dict_name]['vms']['wireless01']['ip'] }}" + + - name: 'linux01_mgmt_ip' + value: "{{ users_vars[dict_name]['vms']['linux01']['ip'] }}" + + - name: 'linux02_mgmt_ip' + value: "{{ users_vars[dict_name]['vms']['linux02']['ip'] }}" + + - name: 'fingerbank_api_key.secret_id' + value: "{{ users_vars[dict_name]['vars']['fingerbank_api_key']['secret_id'] }}" + + - name: 'fingerbank_api_key.email' + value: "{{ users_vars[dict_name]['vars']['fingerbank_api_key']['email'] }}" diff --git a/addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml b/addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml index 14b5cd00619e..6242ef7b3028 100644 --- a/addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml +++ b/addons/vagrant/inventory/group_vars/service_rsyslog/rsyslog.yml @@ -1,4 +1,4 @@ --- # allow all machines on management network to send logs to rsyslog rsyslog__group_allow: - - 172.17.17.0/24 + - "{{ users_vars[dict_name]['networks'][0]['subnet'] }}" diff --git a/addons/vagrant/inventory/group_vars/wireless/gitlab_buildpkg_tools.yml b/addons/vagrant/inventory/group_vars/wireless/gitlab_buildpkg_tools.yml index 39fab30cb12b..ebd8b487aa4a 100644 --- a/addons/vagrant/inventory/group_vars/wireless/gitlab_buildpkg_tools.yml +++ b/addons/vagrant/inventory/group_vars/wireless/gitlab_buildpkg_tools.yml @@ -1,6 +1,8 @@ --- # force value to simplify tests outside CI -gitlab_buildpkg_tools__ppa_enabled: True +# ppa will be disabled in local dev environment +gitlab_buildpkg_tools__ppa_enabled: '{{ True if lookup("env", "CI") + else False }}' # use repo generated by 'publish' stage gitlab_buildpkg_tools__ppa_url: 'http://inverse.ca/downloads/PacketFence/gitlab/{{ pipeline_id }}' @@ -12,6 +14,15 @@ gitlab_buildpkg_tools__deb_ppa: baseurl: "{{ gitlab_buildpkg_tools__ppa_url_deb }} {{ ansible_distribution_release }} main" gpgkey: 'http://inverse.ca/downloads/GPG_PUBLIC_KEY' +# added for local dev environment where we only want devel packages +gitlab_buildpkg_tools__deb_deps_repos: + - name: 'packetfence' + baseurl: 'http://inverse.ca/downloads/PacketFence/debian/{{ pf_minor_release }} {{ ansible_distribution_release }} {{ ansible_distribution_release }}' + +# added for local dev environment where we only want devel packages +gitlab_buildpkg_tools__deb_keys: + - 'http://inverse.ca/downloads/GPG_PUBLIC_KEY' + gitlab_buildpkg_tools__deb_pkgs: - packetfence-test - + - rsync diff --git a/addons/vagrant/inventory/hosts b/addons/vagrant/inventory/hosts index e332ebef564b..6f3a73f8f2bb 100644 --- a/addons/vagrant/inventory/hosts +++ b/addons/vagrant/inventory/hosts @@ -8,8 +8,8 @@ all: cumulus: hosts: switch01: - # IP used by helper_scripts and Ansible, not Vagrant - mgmt_ip: 172.17.17.201 + mgmt_ip: "{{ users_vars[dict_name]['vms']['switch01']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['switch01']['netmask'] }}" box: CumulusCommunity/cumulus-vx box_version: 3.7.12 ansible_host: "{{ mgmt_ip }}" @@ -19,16 +19,16 @@ all: node01: box: debian/bullseye64 box_version: 11.20211018.1 - # IP used by helper_scripts and Ansible, not Vagrant - mgmt_ip: 172.17.17.251 + mgmt_ip: "{{ users_vars[dict_name]['vms']['node01']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['node01']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" # only used when run outside Vagrant ansible_python_interpreter: '/usr/bin/python3' node02: box: debian/bullseye64 box_version: 11.20211018.1 - # IP used by helper_scripts and Ansible, not Vagrant - mgmt_ip: 172.17.17.252 + mgmt_ip: "{{ users_vars[dict_name]['vms']['node02']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['node02']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" # only used when run outside Vagrant ansible_python_interpreter: '/usr/bin/python3' @@ -38,8 +38,8 @@ all: ad: box: jborean93/WindowsServer2016 box_version: 0.7.0 - mgmt_ip: 172.17.17.100 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ users_vars[dict_name]['vms']['ad']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['ad']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 2048 @@ -49,8 +49,8 @@ all: wireless01: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.210 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ users_vars[dict_name]['vms']['wireless01']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['wireless01']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 1 @@ -65,8 +65,8 @@ all: linux01: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.101 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ users_vars[dict_name]['vms']['linux01']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['linux01']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 1 @@ -74,8 +74,8 @@ all: linux02: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.102 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ users_vars[dict_name]['vms']['linux02']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['linux02']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 1 @@ -101,23 +101,19 @@ all: children: dev: hosts: - localhost: - mgmt_ip: '' - mgmt_netmask: 255.255.255.0 - ansible_connection: local pfel8dev: box: generic/rhel8 box_version: '3.4.2' - mgmt_ip: 172.17.17.10 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfel8dev']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfel8dev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 6144 pfdeb11dev: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.12 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfdeb11dev']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfdeb11dev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 2 @@ -125,16 +121,40 @@ all: el8dev: box: generic/rhel8 box_version: '3.4.2' - mgmt_ip: 172.17.17.11 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ users_vars[dict_name]['vms']['el8dev']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['el8dev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 6144 deb11dev: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.13 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ users_vars[dict_name]['vms']['deb11dev']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['deb11dev']['netmask'] }}" + ansible_host: "{{ mgmt_ip }}" + ansible_python_interpreter: '/usr/bin/python3' + cpus: 2 + memory: 6144 + + localdev: + hosts: + localhost: + mgmt_ip: "{{ users_vars[dict_name]['vms']['localhost']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['localhost']['netmask'] }}" + ansible_connection: local + pfel8localdev: + box: generic/rhel8 + box_version: '3.4.2' + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfel8localdev']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfel8localdev']['netmask'] }}" + ansible_host: "{{ mgmt_ip }}" + cpus: 2 + memory: 6144 + pfdeb11localdev: + box: debian/bullseye64 + box_version: 11.20211018.1 + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfdeb11localdev']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfdeb11localdev']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 2 @@ -145,8 +165,8 @@ all: pfel8stable: box: generic/rhel8 box_version: '3.4.2' - mgmt_ip: 172.17.17.14 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfel8stable']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfel8stable']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 8192 @@ -154,16 +174,16 @@ all: pfdeb9stable: box: inverse-inc/pfdeb9stable box_version: 10.3.20210414165339 - mgmt_ip: 172.17.17.15 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfdeb9stable']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfdeb9stable']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" cpus: 2 memory: 8192 pfdeb11stable: box: debian/bullseye64 box_version: 11.20211018.1 - mgmt_ip: 172.17.17.16 - mgmt_netmask: 255.255.255.0 + mgmt_ip: "{{ users_vars[dict_name]['vms']['pfdeb11stable']['ip'] }}" + mgmt_netmask: "{{ users_vars[dict_name]['vms']['pfdeb11stable']['netmask'] }}" ansible_host: "{{ mgmt_ip }}" ansible_python_interpreter: '/usr/bin/python3' cpus: 2 @@ -172,28 +192,625 @@ all: vars: tz: UTC - networks: - - name: 'mgmt' - subnet: '172.17.17.0/24' - forward_mode: 'route' - netmask: 255.255.255.0 - - - name: 'registration' - subnet: '172.17.2.0/24' - forward_mode: 'route' - netmask: 255.255.255.0 - - - name: 'isolation' - subnet: '172.17.3.0/24' - forward_mode: 'route' - netmask: 255.255.255.0 - - - name: 'inline' - subnet: '172.17.6.0/24' - forward_mode: 'route' - netmask: 255.255.255.0 - - - name: 'inline-l3' - subnet: '172.17.18.0/24' - forward_mode: 'route' - netmask: 255.255.255.0 + user: "{{ lookup('env', 'USER') }}" + # if variable "user" doesn't exist in users_vars, we fallback to "gitlab-runner" has dict_name + dict_name: "{{ user if users_vars[user]|d() else 'gitlab-runner' }}" + users_vars: + # ci + gitlab-runner: + vars: + rhel_sub_secret_id: 'e9d98894-bd22-4f32-8b59-653aceb72ec4' + fingerbank_api_key: + secret_id: 'd2c4d4f8-c5b1-4281-a724-e4ade5c31fe1' + email: 'support@inverse.ca' + networks: + - name: mgmt_ci + subnet: '172.17.200.0/24' + forward_mode: 'route' + - name: reg_ci + subnet: '172.17.201.0/24' + forward_mode: 'route' + - name: iso_ci + subnet: '172.17.202.0/24' + forward_mode: 'route' + - name: inline_ci + subnet: '172.17.203.0/24' + forward_mode: 'route' + - name: inline_l3_ci + subnet: '172.17.204.0/24' + vms: + switch01: + ip: '172.17.200.201' + netmask: '255.255.255.0' + inline_ip: '172.17.203.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.204.4' + inline_l3_netmask: '255.255.255.0' + node01: + ip: '172.17.200.251' + netmask: '255.255.255.0' + node02: + ip: '172.17.200.252' + netmask: '255.255.255.0' + ad: + ip: '172.17.200.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.200.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.200.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.200.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.200.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.200.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.200.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.200.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.200.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.200.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.200.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.200.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.200.18' + netmask: '255.255.255.0' + # local dev + _nqb: + vars: + rhel_sub_secret_id: '7ecb993e-5179-4eb5-a686-7d7943bf65eb' + fingerbank_api_key: + secret_id: '98090324-0b00-4877-a0b2-e8a215350c72' + email: 'nqb+git@azyx.fr' + networks: + - name: mgmt_nqb + subnet: '172.17.140.0/24' + forward_mode: 'route' + - name: reg_nqb + subnet: '172.17.141.0/24' + forward_mode: 'route' + - name: iso_nqb + subnet: '172.17.142.0/24' + forward_mode: 'route' + - name: inline_nqb + subnet: '172.17.143.0/24' + forward_mode: 'route' + - name: inline_l3_nqb + subnet: '172.17.144.0/24' + vms: + switch01: + ip: '172.17.140.201' + netmask: '255.255.255.0' + inline_ip: '172.17.143.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.144.4' + inline_l3_netmask: '255.255.255.0' + node01: + ip: '172.17.140.251' + netmask: '255.255.255.0' + node02: + ip: '172.17.140.252' + netmask: '255.255.255.0' + ad: + ip: '172.17.140.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.140.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.140.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.140.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.140.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.140.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.140.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.140.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.140.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.140.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.140.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.140.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.140.18' + netmask: '255.255.255.0' + _jrouzier: + vars: + rhel_sub_secret_id: '' + fingerbank_api_key: + secret_id: '' + email: '' + networks: + - name: mgmt_jrouzier + subnet: '172.17.115.0/24' + forward_mode: 'route' + - name: reg_jrouzier + subnet: '172.17.116.0/24' + forward_mode: 'route' + - name: iso_jrouzier + subnet: '172.17.117.0/24' + forward_mode: 'route' + - name: inline_jrouzier + subnet: '172.17.118.0/24' + forward_mode: 'route' + - name: inline_l3_jrouzier + subnet: '172.17.119.0/24' + vms: + switch01: + ip: '172.17.115.201' + netmask: '255.255.255.0' + inline_ip: '172.17.118.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.119.4' + inline_l3_netmask: '255.255.255.0' + node01: + ip: '172.17.115.251' + netmask: '255.255.255.0' + node02: + ip: '172.17.115.252' + netmask: '255.255.255.0' + ad: + ip: '172.17.115.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.115.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.115.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.115.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.115.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.115.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.115.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.115.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.115.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.115.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.115.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.115.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.115.18' + netmask: '255.255.255.0' + _lzammit: + vars: + rhel_sub_secret_id: '' + fingerbank_api_key: + secret_id: '' + email: '' + networks: + - name: mgmt_lzammit + subnet: '172.17.145.0/24' + forward_mode: 'route' + - name: reg_lzammit + subnet: '172.17.146.0/24' + forward_mode: 'route' + - name: iso_lzammit + subnet: '172.17.147.0/24' + forward_mode: 'route' + - name: inline_lzammit + subnet: '172.17.148.0/24' + forward_mode: 'route' + - name: inline_l3_lzammit + subnet: '172.17.149.0/24' + vms: + switch01: + ip: '172.17.145.201' + netmask: '255.255.255.0' + inline_ip: '172.17.148.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.149.4' + inline_l3_netmask: '255.255.255.0' + node01: + ip: '172.17.145.251' + netmask: '255.255.255.0' + node02: + ip: '172.17.145.252' + netmask: '255.255.255.0' + ad: + ip: '172.17.145.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.145.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.145.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.145.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.145.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.145.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.145.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.145.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.145.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.145.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.145.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.145.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.145.18' + netmask: '255.255.255.0' + _jgoimard: + vars: + rhel_sub_secret_id: '14e54e10-247a-4a38-b19c-b1f8209daf70' + fingerbank_api_key: + secret_id: 'ba503744-9ea6-4be3-bd73-d3d5f75d7092' + email: 'jgoimard@inverse.ca' + networks: + - name: mgmt_jgoimard + subnet: '172.17.120.0/24' + forward_mode: 'route' + - name: reg_jgoimard + subnet: '172.17.121.0/24' + forward_mode: 'route' + - name: iso_jgoimard + subnet: '172.17.122.0/24' + forward_mode: 'route' + - name: inline_jgoimard + subnet: '172.17.123.0/24' + forward_mode: 'route' + - name: inline_l3_jgoimard + subnet: '172.17.124.0/24' + vms: + switch01: + ip: '172.17.120.201' + netmask: '255.255.255.0' + inline_ip: '172.17.123.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.124.4' + inline_l3_netmask: '255.255.255.0' + node01: + ip: '172.17.120.251' + netmask: '255.255.255.0' + node02: + ip: '172.17.120.252' + netmask: '255.255.255.0' + ad: + ip: '172.17.120.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.120.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.120.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.120.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.120.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.120.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.120.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.120.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.120.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.120.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.120.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.120.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.120.18' + netmask: '255.255.255.0' + _dsatkunas: + vars: + rhel_sub_secret_id: '' + fingerbank_api_key: + secret_id: '' + email: '' + networks: + - name: mgmt_dsatkunas + subnet: '172.17.125.0/24' + forward_mode: 'route' + - name: reg_dsatkunas + subnet: '172.17.126.0/24' + forward_mode: 'route' + - name: iso_dsatkunas + subnet: '172.17.127.0/24' + forward_mode: 'route' + - name: inline_dsatkunas + subnet: '172.17.128.0/24' + forward_mode: 'route' + - name: inline_l3_dsatkunas + subnet: '172.17.119.0/24' + vms: + switch01: + ip: '172.17.125.201' + netmask: '255.255.255.0' + inline_ip: '172.17.128.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.128.4' + inline_l3_netmask: '255.255.255.0' + node01: + ip: '172.17.125.251' + netmask: '255.255.255.0' + node02: + ip: '172.17.125.252' + netmask: '255.255.255.0' + ad: + ip: '172.17.125.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.125.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.125.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.125.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.125.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.125.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.125.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.125.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.125.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.125.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.125.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.125.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.125.18' + netmask: '255.255.255.0' + _fdurand: + vars: + rhel_sub_secret_id: '' + fingerbank_api_key: + secret_id: '' + email: '' + networks: + - name: mgmt_fdurand + subnet: '172.17.135.0/24' + forward_mode: 'route' + - name: reg_fdurand + subnet: '172.17.136.0/24' + forward_mode: 'route' + - name: iso_fdurand + subnet: '172.17.137.0/24' + forward_mode: 'route' + - name: inline_fdurand + subnet: '172.17.138.0/24' + forward_mode: 'route' + - name: inline_l3_fdurand + subnet: '172.17.139.0/24' + vms: + switch01: + ip: '172.17.135.201' + netmask: '255.255.255.0' + inline_ip: '172.17.138.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.139.4' + inline_l3_netmask: '255.255.255.0' + node01: + ip: '172.17.135.251' + netmask: '255.255.255.0' + node02: + ip: '172.17.135.252' + netmask: '255.255.255.0' + ad: + ip: '172.17.135.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.135.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.135.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.135.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.135.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.135.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.135.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.135.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.135.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.135.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.135.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.135.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.135.18' + netmask: '255.255.255.0' + _jsemaan: + vars: + rhel_sub_secret_id: '' + fingerbank_api_key: + secret_id: '' + email: '' + networks: + - name: mgmt_jsemaan + subnet: '172.17.155.0/24' + forward_mode: 'route' + - name: reg_jsemaan + subnet: '172.17.156.0/24' + forward_mode: 'route' + - name: iso_jsemaan + subnet: '172.17.157.0/24' + forward_mode: 'route' + - name: inline_jsemaan + subnet: '172.17.158.0/24' + forward_mode: 'route' + - name: inline_l3_jsemaan + subnet: '172.17.159.0/24' + vms: + switch01: + ip: '172.17.155.201' + netmask: '255.255.255.0' + inline_ip: '172.17.158.3' + inline_netmask: '255.255.255.0' + inline_l3_ip: '172.17.159.4' + inline_l3_netmask: '255.255.255.0' + node01: + ip: '172.17.155.251' + netmask: '255.255.255.0' + node02: + ip: '172.17.155.252' + netmask: '255.255.255.0' + ad: + ip: '172.17.155.100' + netmask: '255.255.255.0' + wireless01: + ip: '172.17.155.210' + netmask: '255.255.255.0' + linux01: + ip: '172.17.155.101' + netmask: '255.255.255.0' + linux02: + ip: '172.17.155.102' + netmask: '255.255.255.0' + pfel8dev: + ip: '172.17.155.10' + netmask: '255.255.255.0' + el8dev: + ip: '172.17.155.11' + netmask: '255.255.255.0' + pfdeb11dev: + ip: '172.17.155.12' + netmask: '255.255.255.0' + deb11dev: + ip: '172.17.155.13' + netmask: '255.255.255.0' + localhost: + ip: '' + netmask: '255.255.255.0' + pfel8localdev: + ip: '172.17.155.14' + netmask: '255.255.255.0' + pfdeb11localdev: + ip: '172.17.155.15' + netmask: '255.255.255.0' + pfel8stable: + ip: '172.17.155.16' + netmask: '255.255.255.0' + pfdeb9stable: + ip: '172.17.155.17' + netmask: '255.255.255.0' + pfdeb11stable: + ip: '172.17.155.18' + netmask: '255.255.255.0' diff --git a/addons/vagrant/linux_servers/Vagrantfile b/addons/vagrant/linux_servers/Vagrantfile index 6cb1592ad45e..3e0d5e6e7bb7 100644 --- a/addons/vagrant/linux_servers/Vagrantfile +++ b/addons/vagrant/linux_servers/Vagrantfile @@ -3,10 +3,17 @@ # Require YAML module require 'yaml' - -# Read YAML file with box details + +# Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -networks = inventory['all']['vars']['networks'] +user = ENV['USER'] +users_vars = inventory['all']['vars']['users_vars'] +if users_vars[user] + dict_name = user +else + dict_name = 'gitlab-runner' +end +user_vars = inventory['all']['vars']['users_vars'][dict_name] Vagrant.configure("2") do |config| # loop on **all** host(s) in linux_servers group in inventory to create VM(s) @@ -22,11 +29,11 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks[0]['name'], - :ip => details['mgmt_ip'], - :netmask => details['mgmt_netmask'], + :libvirt__network_name => user_vars['networks'][0]['name'], + :ip => user_vars['vms'][server]['ip'], + :netmask => user_vars['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[0]['forward_mode'] + :libvirt__forward_mode => user_vars['networks'][0]['forward_mode'] srv.vm.provider "libvirt" do |v| v.cpus = details['cpus'] diff --git a/addons/vagrant/pfservers/Vagrantfile b/addons/vagrant/pfservers/Vagrantfile index 1f00d243a854..b761250e132a 100644 --- a/addons/vagrant/pfservers/Vagrantfile +++ b/addons/vagrant/pfservers/Vagrantfile @@ -4,9 +4,18 @@ # Require YAML module require 'yaml' -# Read YAML file with box details +# Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -networks = inventory['all']['vars']['networks'] +user = ENV['USER'] +users_vars = inventory['all']['vars']['users_vars'] +if users_vars[user] + puts "#{user} user present in inventory, apply user settings" + dict_name = user +else + puts "#{user} user absent in inventory, fallback to gitlab-runner settings" + dict_name = 'gitlab-runner' +end +user_vars = inventory['all']['vars']['users_vars'][dict_name] Vagrant.configure("2") do |config| # loop on **all** host(s) in pfservers group in inventory to create VM(s) @@ -24,34 +33,34 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks[0]['name'], - :ip => details['mgmt_ip'], - :netmask => details['mgmt_netmask'], + :libvirt__network_name => user_vars['networks'][0]['name'], + :ip => user_vars['vms'][server]['ip'], + :netmask => user_vars['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[0]['forward_mode'] + :libvirt__forward_mode => user_vars['networks'][0]['forward_mode'] # registration srv.vm.network "private_network", - :libvirt__network_name => networks[1]['name'], - :ip => networks[1]['subnet'], + :libvirt__network_name => user_vars['networks'][1]['name'], + :ip => user_vars['networks'][1]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[1]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][1]['forward_mode'], auto_config: false # isolation srv.vm.network "private_network", - :libvirt__network_name => networks[2]['name'], - :ip => networks[2]['subnet'], + :libvirt__network_name => user_vars['networks'][2]['name'], + :ip => user_vars['networks'][2]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[2]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][2]['forward_mode'], auto_config: false # inline srv.vm.network "private_network", - :libvirt__network_name => networks[3]['name'], - :ip => networks[3]['subnet'], + :libvirt__network_name => user_vars['networks'][3]['name'], + :ip => user_vars['networks'][3]['subnet'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[3]['forward_mode'], + :libvirt__forward_mode => user_vars['networks'][3]['forward_mode'], auto_config: false srv.vm.provider "libvirt" do |v| diff --git a/addons/vagrant/playbooks/nodes/pre_prov/packages.yml b/addons/vagrant/playbooks/nodes/pre_prov/packages.yml index 7ecddbd8cfb4..d2e1e56cd125 100644 --- a/addons/vagrant/playbooks/nodes/pre_prov/packages.yml +++ b/addons/vagrant/playbooks/nodes/pre_prov/packages.yml @@ -14,7 +14,9 @@ pf_minor_release: '{{ lookup("env", "PF_MINOR_RELEASE") | default("99.9", true) }}' # force value to simplify tests outside CI - gitlab_buildpkg_tools__ppa_enabled: True + # ppa will be disabled in local dev environment + gitlab_buildpkg_tools__ppa_enabled: '{{ True if lookup("env", "CI") + else False }}' # use repo generated by 'publish' stage gitlab_buildpkg_tools__ppa_url: 'http://inverse.ca/downloads/PacketFence/gitlab/{{ pipeline_id }}' @@ -26,10 +28,16 @@ baseurl: "{{ gitlab_buildpkg_tools__ppa_url_deb }} {{ ansible_distribution_release }} main" gpgkey: 'http://inverse.ca/downloads/GPG_PUBLIC_KEY' + # added for local dev environment where we only want devel packages + # **and** for dependencies in CI environment gitlab_buildpkg_tools__deb_deps_repos: - name: 'packetfence' baseurl: 'http://inverse.ca/downloads/PacketFence/debian/{{ pf_minor_release }} {{ ansible_distribution_release }} {{ ansible_distribution_release }}' + # added for local dev environment + gitlab_buildpkg_tools__deb_keys: + - 'http://inverse.ca/downloads/GPG_PUBLIC_KEY' + gitlab_buildpkg_tools__deb_pkgs: - packetfence-test - wpasupplicant diff --git a/addons/vagrant/requirements.yml b/addons/vagrant/requirements.yml index 0e2bbb09b4a1..9f3a8a76cd8a 100644 --- a/addons/vagrant/requirements.yml +++ b/addons/vagrant/requirements.yml @@ -13,7 +13,7 @@ roles: collections: - name: inverse_inc.packetfence - version: 1.2.1 + version: 1.2.1-1 - name: debops.debops version: 2.3.2 - name: inverse_inc.windows diff --git a/addons/vagrant/winservers/Vagrantfile b/addons/vagrant/winservers/Vagrantfile index 838aea4d77f7..86a885fa0584 100644 --- a/addons/vagrant/winservers/Vagrantfile +++ b/addons/vagrant/winservers/Vagrantfile @@ -3,10 +3,17 @@ # Require YAML module require 'yaml' - -# Read YAML file with box details + +# Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -networks = inventory['all']['vars']['networks'] +user = ENV['USER'] +users_vars = inventory['all']['vars']['users_vars'] +if users_vars[user] + dict_name = user +else + dict_name = 'gitlab-runner' +end +user_vars = inventory['all']['vars']['users_vars'][dict_name] Vagrant.configure("2") do |config| inventory['all']['children']['winservers']['hosts'].each do |server,details| @@ -21,11 +28,11 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks[0]['name'], - :ip => details['mgmt_ip'], - :netmask => details['mgmt_netmask'], + :libvirt__network_name => user_vars['networks'][0]['name'], + :ip => user_vars['vms'][server]['ip'], + :netmask => user_vars['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[0]['forward_mode'] + :libvirt__forward_mode => user_vars['networks'][0]['forward_mode'] srv.vm.provider "libvirt" do |v| v.cpus = details['cpus'] diff --git a/addons/vagrant/wireless/Vagrantfile b/addons/vagrant/wireless/Vagrantfile index f5246ba08f4e..77eaa80ce8d0 100644 --- a/addons/vagrant/wireless/Vagrantfile +++ b/addons/vagrant/wireless/Vagrantfile @@ -4,9 +4,16 @@ # Require YAML module require 'yaml' -# Read YAML file with box details +# Read YAML file with box and network details inventory = YAML.load_file('inventory/hosts') -networks = inventory['all']['vars']['networks'] +user = ENV['USER'] +users_vars = inventory['all']['vars']['users_vars'] +if users_vars[user] + dict_name = user +else + dict_name = 'gitlab-runner' +end +user_vars = inventory['all']['vars']['users_vars'][dict_name] Vagrant.configure("2") do |config| inventory['all']['children']['wireless']['hosts'].each do |server,details| @@ -27,11 +34,11 @@ Vagrant.configure("2") do |config| # only from our expected subnet. Allow traffic between guests. Deny # all other inbound. Deny all other outbound. srv.vm.network "private_network", - :libvirt__network_name => networks[0]['name'], - :ip => details['mgmt_ip'], - :netmask => details['mgmt_netmask'], + :libvirt__network_name => user_vars['networks'][0]['name'], + :ip => user_vars['vms'][server]['ip'], + :netmask => user_vars['vms'][server]['netmask'], :libvirt__dhcp_enabled => false, - :libvirt__forward_mode => networks[0]['forward_mode'] + :libvirt__forward_mode => user_vars['networks'][0]['forward_mode'] end end end diff --git a/t/venom/Makefile b/t/venom/Makefile index d74be7c34bbb..d848c233a8fc 100644 --- a/t/venom/Makefile +++ b/t/venom/Makefile @@ -6,6 +6,20 @@ SHELL=/bin/bash PFSERVERS_DIR=pfservers CI_PIPELINE_ID=123456789 +#============================================================================== +# Tests CI or localdev +#============================================================================== +# in localdev, we don't want to clean all VM created previously +ifeq ($(CI), true) + $(info CI environment detected) + MAKE_TARGET=run + DEV_ENV=dev +else + $(info localdev environment detected) + MAKE_TARGET=run_w_clean + DEV_ENV=localdev +endif + #============================================================================== # Targets #============================================================================== @@ -48,71 +62,76 @@ clean: unit_tests_el8: make \ - PF_VM_NAME=pfel8dev \ + PF_VM_NAME=pfel8$(DEV_ENV) \ SCENARIOS_TO_RUN=unit_tests \ $(MAKE_TARGET) configurator_el8: make \ - PF_VM_NAME=pfel8dev \ + PF_VM_NAME=pfel8$(DEV_ENV) \ SCENARIOS_TO_RUN=configurator \ $(MAKE_TARGET) configurator_deb11: make \ - PF_VM_NAME=pfdeb11dev \ + PF_VM_NAME=pfdeb11$(DEV_ENV) \ SCENARIOS_TO_RUN=configurator \ $(MAKE_TARGET) dot1x_eap_peap_el8: make \ - PF_VM_NAME=pfel8dev \ + PF_VM_NAME=pfel8$(DEV_ENV) \ INT_TEST_VM_NAMES="ad switch01 node01 wireless01" \ SCENARIOS_TO_RUN=dot1x_eap_peap \ $(MAKE_TARGET) dot1x_eap_peap_deb11: make \ - PF_VM_NAME=pfdeb11dev \ + PF_VM_NAME=pfdeb11$(DEV_ENV) \ INT_TEST_VM_NAMES="ad switch01 node01 wireless01" \ SCENARIOS_TO_RUN=dot1x_eap_peap \ $(MAKE_TARGET) mac_auth_el8: make \ - PF_VM_NAME=pfel8dev \ + PF_VM_NAME=pfel8$(DEV_ENV) \ INT_TEST_VM_NAMES="switch01 node01 wireless01" \ SCENARIOS_TO_RUN=mac_auth \ $(MAKE_TARGET) mac_auth_deb11: make \ - PF_VM_NAME=pfdeb11dev \ + PF_VM_NAME=pfdeb11$(DEV_ENV) \ INT_TEST_VM_NAMES="switch01 node01 wireless01" \ SCENARIOS_TO_RUN=mac_auth \ $(MAKE_TARGET) dot1x_eap_tls_el8: make \ - PF_VM_NAME=pfel8dev \ + PF_VM_NAME=pfel8$(DEV_ENV) \ INT_TEST_VM_NAMES="switch01 node01" \ SCENARIOS_TO_RUN=dot1x_eap_tls \ $(MAKE_TARGET) dot1x_eap_tls_deb11: make \ - PF_VM_NAME=pfdeb11dev \ + PF_VM_NAME=pfdeb11$(DEV_ENV) \ INT_TEST_VM_NAMES="switch01 node01" \ SCENARIOS_TO_RUN=dot1x_eap_tls \ $(MAKE_TARGET) -#============================================================================== -# Targets for local tests -#============================================================================== -# No clean -unit_tests_el8_w: +example_el8: make \ - PF_VM_NAME=pfel8dev \ - SCENARIOS_TO_RUN=unit_tests \ + PF_VM_NAME=pfel8$(DEV_ENV) \ + # if you want to start additional VMs + #INT_TEST_VM_NAMES="switch01 node01" \ + SCENARIOS_TO_RUN=example \ $(MAKE_TARGET) +example_deb11: + make \ + PF_VM_NAME=pfdeb11$(DEV_ENV) \ + # if you want to start additional VMs + #INT_TEST_VM_NAMES="switch01 node01" \ + SCENARIOS_TO_RUN=example \ + $(MAKE_TARGET) diff --git a/t/venom/requirements.yml b/t/venom/requirements.yml index 64070d467dbc..bf3ad9644b83 100644 --- a/t/venom/requirements.yml +++ b/t/venom/requirements.yml @@ -11,7 +11,7 @@ roles: collections: - name: inverse_inc.packetfence - version: 1.2.1 + version: 1.2.1-1 - name: debops.debops version: 2.3.2 - name: inverse_inc.windows diff --git a/t/venom/scenarios/configurator/playbooks/rsync.yml b/t/venom/scenarios/configurator/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/configurator/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/configurator/site.yml b/t/venom/scenarios/configurator/site.yml index 26debebc807a..7442a70f6840 100644 --- a/t/venom/scenarios/configurator/site.yml +++ b/t/venom/scenarios/configurator/site.yml @@ -1,5 +1,9 @@ --- -#- import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml + +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' #- import_playbook: playbooks/configure.yml diff --git a/t/venom/scenarios/dot1x_eap_peap/playbooks/rsync.yml b/t/venom/scenarios/dot1x_eap_peap/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/dot1x_eap_peap/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/dot1x_eap_peap/site.yml b/t/venom/scenarios/dot1x_eap_peap/site.yml index 2522d0837f29..7442a70f6840 100644 --- a/t/venom/scenarios/dot1x_eap_peap/site.yml +++ b/t/venom/scenarios/dot1x_eap_peap/site.yml @@ -1,5 +1,9 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml + +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' #- import_playbook: playbooks/configure.yml diff --git a/t/venom/scenarios/dot1x_eap_tls/playbooks/rsync.yml b/t/venom/scenarios/dot1x_eap_tls/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/dot1x_eap_tls/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/dot1x_eap_tls/site.yml b/t/venom/scenarios/dot1x_eap_tls/site.yml index 2522d0837f29..7442a70f6840 100644 --- a/t/venom/scenarios/dot1x_eap_tls/site.yml +++ b/t/venom/scenarios/dot1x_eap_tls/site.yml @@ -1,5 +1,9 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml + +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' #- import_playbook: playbooks/configure.yml diff --git a/t/venom/scenarios/example/playbooks/rsync.yml b/t/venom/scenarios/example/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/example/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/example/site.yml b/t/venom/scenarios/example/site.yml index 2522d0837f29..7442a70f6840 100644 --- a/t/venom/scenarios/example/site.yml +++ b/t/venom/scenarios/example/site.yml @@ -1,5 +1,9 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml + +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' #- import_playbook: playbooks/configure.yml diff --git a/t/venom/scenarios/export_import/playbooks/rsync.yml b/t/venom/scenarios/export_import/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/export_import/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/export_import/site.yml b/t/venom/scenarios/export_import/site.yml index d19c9d1672e6..7442a70f6840 100644 --- a/t/venom/scenarios/export_import/site.yml +++ b/t/venom/scenarios/export_import/site.yml @@ -1,6 +1,10 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml -- import_playbook: playbooks/configure.yml +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' + +#- import_playbook: playbooks/configure.yml - import_playbook: playbooks/run_tests.yml diff --git a/t/venom/scenarios/mac_auth/playbooks/rsync.yml b/t/venom/scenarios/mac_auth/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/mac_auth/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/mac_auth/site.yml b/t/venom/scenarios/mac_auth/site.yml index 2522d0837f29..7442a70f6840 100644 --- a/t/venom/scenarios/mac_auth/site.yml +++ b/t/venom/scenarios/mac_auth/site.yml @@ -1,5 +1,9 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml + +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' #- import_playbook: playbooks/configure.yml diff --git a/t/venom/scenarios/template/playbooks/rsync.yml b/t/venom/scenarios/template/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/template/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/template/site.yml b/t/venom/scenarios/template/site.yml index d19c9d1672e6..7442a70f6840 100644 --- a/t/venom/scenarios/template/site.yml +++ b/t/venom/scenarios/template/site.yml @@ -1,6 +1,10 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml -- import_playbook: playbooks/configure.yml +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' + +#- import_playbook: playbooks/configure.yml - import_playbook: playbooks/run_tests.yml diff --git a/t/venom/scenarios/unit_tests/playbooks/rsync.yml b/t/venom/scenarios/unit_tests/playbooks/rsync.yml new file mode 100644 index 000000000000..a592f83c0c79 --- /dev/null +++ b/t/venom/scenarios/unit_tests/playbooks/rsync.yml @@ -0,0 +1,18 @@ +--- +- hosts: pfservers, service_venom, nodes + name: Rsync Git repository t/venom with /usr/local/pf/t/venom (localdev only) + become: True + + tasks: + - name: Synchronize Git repository t/venom with /usr/local/pf/t/venom + ansible.posix.synchronize: + # src is provided through test-wrapper.sh + # as $PWD/venom (no leading slash) + src: "{{ lookup('env', 'VENOM_ROOT_DIR') }}" + dest: '/usr/local/pf/t/' + archive: yes + delete: yes + # exclude files dynamically generated by Ansible + rsync_opts: + - "--exclude=vars/local.yml" + diff --git a/t/venom/scenarios/unit_tests/site.yml b/t/venom/scenarios/unit_tests/site.yml index d19c9d1672e6..7442a70f6840 100644 --- a/t/venom/scenarios/unit_tests/site.yml +++ b/t/venom/scenarios/unit_tests/site.yml @@ -1,6 +1,10 @@ --- -# - import_playbook: provision.yml +# - import_playbook: playbooks/provision.yml -- import_playbook: playbooks/configure.yml +# rsync before tests when doing local development +- import_playbook: playbooks/rsync.yml + when: lookup("env", "CI") != 'true' + +#- import_playbook: playbooks/configure.yml - import_playbook: playbooks/run_tests.yml diff --git a/t/venom/test-wrapper.sh b/t/venom/test-wrapper.sh index 19c6c35bd8bf..236fbbaf0ba7 100755 --- a/t/venom/test-wrapper.sh +++ b/t/venom/test-wrapper.sh @@ -58,12 +58,13 @@ configure_and_check() { declare -p VAGRANT_DIR VAGRANT_ANSIBLE_VERBOSE VAGRANT_PF_DOTFILE_PATH VAGRANT_COMMON_DOTFILE_PATH - declare -p ANSIBLE_INVENTORY + declare -p ANSIBLE_INVENTORY VENOM_ROOT_DIR declare -p CI_COMMIT_TAG CI_PIPELINE_ID PF_MINOR_RELEASE declare -p PF_VM_NAME INT_TEST_VM_NAMES ANSIBLE_VM_LIST declare -p SCENARIOS_TO_RUN DESTROY_ALL export ANSIBLE_INVENTORY + export VENOM_ROOT_DIR } run() { @@ -77,15 +78,42 @@ run() { run_tests } +# Start with or without VM +start_vm() { + local vm=$1 + local dotfile_path=$2 + if [ -e "${dotfile_path}/machines/${vm}/libvirt/id" ]; then + echo "Machine $vm already exists" + machine_uuid=$(cat ${dotfile_path}/machines/${vm}/libvirt/id) + machine_state=$(virsh -c qemu:///system domstate --domain $machine_uuid) + if [ "${machine_state}" = "shut off" ]; then + echo "Starting $vm using libvirt, provisioning using Ansible (without Vagrant)" + virsh -c qemu:///system start --domain $machine_uuid + # let time for the VM to boot before using ansible + echo "Let time to VM to start before provisioning using Ansible.." + sleep 60 + else + echo "Machine already started, Ansible provisioning only" + fi + ( cd ${VAGRANT_DIR}; \ + ansible-playbook site.yml -l $vm ) + else + echo "Machine $vm doesn't exist, start and provision with Vagrant" + ( cd ${VAGRANT_DIR} ; \ + VAGRANT_DOTFILE_PATH=${dotfile_path} \ + vagrant up \ + ${vm} \ + ${VAGRANT_UP_OPTS} ) + fi +} + start_and_provision_pf_vm() { local vm_names=${@:-vmname} - log_subsection "Start and provision $vm_names" + log_subsection "Start and provision PacketFence $vm_names" + for vm in ${vm_names}; do + start_vm ${vm} ${VAGRANT_PF_DOTFILE_PATH} + done - ( cd ${VAGRANT_DIR} ; \ - VAGRANT_DOTFILE_PATH=${VAGRANT_PF_DOTFILE_PATH} \ - vagrant up \ - ${vm_names} \ - ${VAGRANT_UP_OPTS} ) } start_and_provision_other_vm() { @@ -93,25 +121,7 @@ start_and_provision_other_vm() { log_subsection "Start and provision $vm_names" for vm in ${vm_names}; do - if [ -e "${VAGRANT_COMMON_DOTFILE_PATH}/machines/${vm}/libvirt/id" ]; then - echo "Machine $vm already exists" - machine_uuid=$(cat ${VAGRANT_COMMON_DOTFILE_PATH}/machines/${vm}/libvirt/id) - # hack to overcome the fact that node01 doesn't have IP address after first provisioning - # vagrant up will fail - echo "Starting $vm using libvirt, provisioning using Ansible (without Vagrant)" - virsh -c qemu:///system start --domain $machine_uuid - # let time for the VM to boot before using ansible - sleep 60 - ( cd ${VAGRANT_DIR}; \ - ansible-playbook site.yml -l $vm ) - else - echo "Machine $vm doesn't exist, start and provision with Vagrant" - ( cd ${VAGRANT_DIR} ; \ - VAGRANT_DOTFILE_PATH=${VAGRANT_COMMON_DOTFILE_PATH} \ - vagrant up \ - ${vm} \ - ${VAGRANT_UP_OPTS} ) - fi + start_vm ${vm} ${VAGRANT_COMMON_DOTFILE_PATH} done } @@ -133,8 +143,14 @@ run_tests() { unconfigure() { log_subsection "Unconfigure virtual machines" - ( cd $VAGRANT_DIR ; \ - ansible-playbook teardown.yml -l $ANSIBLE_VM_LIST ) + # when we call "make halt" without options (localdev) + # no VM are provided + if [ -n "${ANSIBLE_VM_LIST}" ]; then + ( cd $VAGRANT_DIR ; \ + ansible-playbook teardown.yml -l $ANSIBLE_VM_LIST ) + else + echo "No VM detected, nothing to unconfigure" + fi } halt() { diff --git a/t/venom/vars/all.yml b/t/venom/vars/all.yml index ee17d7e04922..56192356063e 100644 --- a/t/venom/vars/all.yml +++ b/t/venom/vars/all.yml @@ -28,7 +28,6 @@ pfserver_pfqueue_workers: 2 pfserver_haproxy_admin_server_timeout: 120s # ad variables -ad_mgmt_ip: 172.17.17.100 ad_domain_id: example ad_domain_upper: EXAMPLE ad_dns_domain: example.lan @@ -38,7 +37,6 @@ ad_base_dn: dc=example,dc=lan ad_domain_user: packetfence # switchs variables -switch01_mgmt_ip: 172.17.17.201 switch01.api.url: "https://{{.switch01_mgmt_ip}}:8080" switch01.api.user: cumulus switch01.api.password: CumulusLinux! @@ -46,12 +44,10 @@ switch01.dot1x_interface.id: swp12 switch01.dot1x_interface.mac: 44:38:39:00:00:12 # nodes variables -node01_mgmt_ip: 172.17.17.251 node01_ens7_mac_address: 00:03:00:11:11:01 node01_ens7_mac_address_url_encoded: 00%3A03%3A00%3A11%3A11%3A01 # wireless01 variables -wireless01_mgmt_ip: 172.17.17.210 wireless01_wlan1_mac_address: 02:00:00:00:01:00 wireless01_wlan1_mac_address_url_encoded: 02%3A00%3A00%3A00%3A01%3A00 wireless01.dot1x_interface.mac: 02:00:00:00:00:00 @@ -71,10 +67,6 @@ access_level_user_and_node.description: Users and Nodes management roles.registration.vlan_id: 2 roles.isolation.vlan_id: 3 -# Fingerbank -fingerbank_api_key.email: support@inverse.ca -fingerbank_api_key.secret_id: d2c4d4f8-c5b1-4281-a724-e4ade5c31fe1 - ### Maintenance tasks # Node cleanup node_cleanup.delete_window.interval: 1 @@ -238,14 +230,10 @@ configurator.interfaces.mgmt.netmask: '{{.pfserver_mgmt_netmask}}' # Interface registration configurator.interfaces.reg.index: 4 -configurator.interfaces.reg.ip: 172.17.2.2 -configurator.interfaces.reg.netmask: 255.255.255.0 configurator.interfaces.reg.dhcpd_enabled: enabled # Interface isolation configurator.interfaces.iso.index: 5 -configurator.interfaces.iso.ip: 172.17.3.2 -configurator.interfaces.iso.netmask: 255.255.255.0 configurator.interfaces.iso.dhcpd_enabled: enabled # DNS servers