diff --git a/.gitignore b/.gitignore index 713cead5c..05cbc5283 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,11 @@ etc/kayobe/inventory/group_vars/seed/ansible-host # Ignore kolla configuration. etc/kolla + +# Ignore ceph generated config in AUFN env +etc/kayobe/environments/aufn-ceph/kolla/config/glance/ceph.conf +etc/kayobe/environments/aufn-ceph/kolla/config/glance/ceph.client.glance.keyring +etc/kayobe/environments/aufn-ceph/kolla/config/cinder/ceph.conf +etc/kayobe/environments/aufn-ceph/kolla/config/cinder/ceph.client.glance.keyring +etc/kayobe/environments/aufn-ceph/kolla/config/nova/ceph.conf +etc/kayobe/environments/aufn-ceph/kolla/config/nova/ceph.client.glance.keyring \ No newline at end of file diff --git a/etc/kayobe/environments/aufn-ceph/a-universe-from-nothing.sh b/etc/kayobe/environments/aufn-ceph/a-universe-from-nothing.sh new file mode 100755 index 000000000..e594ea388 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/a-universe-from-nothing.sh @@ -0,0 +1,155 @@ +#!/bin/bash + +########################################### +# STACKHPC-KAYOBE-CONFIG AUFN ENV VERSION # +########################################### + +# Cheat script for a full deployment. +# This should be used for testing only. + +set -eu + +BASE_PATH=~ +KAYOBE_BRANCH=stackhpc/yoga +KAYOBE_CONFIG_BRANCH=stackhpc/yoga +KAYOBE_ENVIRONMENT=aufn-ceph + +PELICAN_HOST="10.0.0.34 pelican pelican.service.compute.sms-lab.cloud" +PULP_HOST="10.205.3.187 pulp-server pulp-server.internal.sms-cloud" + +# FIXME: Work around lack of DNS on SMS lab. +cat << EOF | sudo tee -a /etc/hosts +$PELICAN_HOST +$PULP_HOST +EOF + +# Install git and tmux. +if $(which dnf 2>/dev/null >/dev/null); then + sudo dnf -y install git tmux python3-virtualenv +else + sudo apt update + sudo apt -y install git tmux gcc libffi-dev python3-dev python-is-python3 python3-virtualenv +fi + +# Disable the firewall. +sudo systemctl is-enabled firewalld && sudo systemctl stop firewalld && sudo systemctl disable firewalld + +# Disable SELinux both immediately and permanently. +if $(which setenforce 2>/dev/null >/dev/null); then + sudo setenforce 0 + sudo sed -i 's/^SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config +fi + +# Prevent sudo from performing DNS queries. +echo 'Defaults !fqdn' | sudo tee /etc/sudoers.d/no-fqdn + +# Clone repositories +cd $BASE_PATH +mkdir -p src +pushd src +[[ -d kayobe ]] || git clone https://github.com/stackhpc/kayobe.git -b $KAYOBE_BRANCH +[[ -d kayobe-config ]] || git clone https://github.com/stackhpc/stackhpc-kayobe-config kayobe-config -b $KAYOBE_CONFIG_BRANCH +[[ -d kayobe/tenks ]] || (cd kayobe && git clone https://opendev.org/openstack/tenks.git) +popd + +# Create Kayobe virtualenv +mkdir -p venvs +pushd venvs +if [[ ! -d kayobe ]]; then + virtualenv kayobe +fi +# NOTE: Virtualenv's activate and deactivate scripts reference an +# unbound variable. +set +u +source kayobe/bin/activate +set -u +pip install -U pip +pip install ../src/kayobe +popd + +# Activate environment +pushd $BASE_PATH/src/kayobe-config +source kayobe-env --environment $KAYOBE_ENVIRONMENT + +# Configure host networking (bridge, routes & firewall) +$KAYOBE_CONFIG_PATH/environments/$KAYOBE_ENVIRONMENT/configure-local-networking.sh + +# Bootstrap the Ansible control host. +kayobe control host bootstrap + +# Configure the seed hypervisor host. +kayobe seed hypervisor host configure + +# Provision the seed VM. +kayobe seed vm provision + +# Configure the seed host, and deploy a local registry. +kayobe seed host configure + +# Deploy local pulp server as a container on the seed VM +kayobe seed service deploy --tags seed-deploy-containers --kolla-tags none + +# Deploying the seed restarts networking interface, run configure-local-networking.sh again to re-add routes. +$KAYOBE_CONFIG_PATH/environments/$KAYOBE_ENVIRONMENT/configure-local-networking.sh + +# Add sms lab test pulp to /etc/hosts of seed vm's pulp container +SEED_IP=192.168.33.5 +REMOTE_COMMAND="docker exec pulp sh -c 'echo $PULP_HOST | tee -a /etc/hosts'" +ssh stack@$SEED_IP $REMOTE_COMMAND + +# Sync package & container repositories. +kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/pulp-repo-sync.yml +kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/pulp-repo-publish.yml +kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/pulp-container-sync.yml +kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/pulp-container-publish.yml + +# Re-run full task to set up bifrost_deploy etc. using newly-populated pulp repo +kayobe seed service deploy + +# NOTE: Make sure to use ./tenks, since just ‘tenks’ will install via PyPI. +(export TENKS_CONFIG_PATH=$KAYOBE_CONFIG_PATH/environments/$KAYOBE_ENVIRONMENT/tenks.yml && \ + export KAYOBE_CONFIG_SOURCE_PATH=$BASE_PATH/src/kayobe-config && \ + export KAYOBE_VENV_PATH=$BASE_PATH/venvs/kayobe && \ + cd $BASE_PATH/src/kayobe && \ + ./dev/tenks-deploy-overcloud.sh ./tenks) + +# Inspect and provision the overcloud hardware: +kayobe overcloud inventory discover +kayobe overcloud hardware inspect +kayobe overcloud provision +kayobe overcloud host configure +kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/cephadm.yml +kayobe playbook run $KAYOBE_CONFIG_PATH/ansible/cephadm-gather-keys.yml +kayobe overcloud container image pull +kayobe overcloud service deploy +source $KOLLA_CONFIG_PATH/public-openrc.sh +kayobe overcloud post configure +source $KOLLA_CONFIG_PATH/public-openrc.sh + + +# Use openstack-config-multinode here instead of init-runonce.sh script from standard aufn + +#Deactivate current kayobe venv +set +u +deactivate +set -u +$KAYOBE_CONFIG_PATH/environments/$KAYOBE_ENVIRONMENT/configure-openstack.sh $BASE_PATH + +# Create a test vm +VENV_DIR=$BASE_PATH/venvs/openstack +if [[ ! -d $VENV_DIR ]]; then + python3 -m venv $VENV_DIR +fi +source $VENV_DIR/bin/activate +pip install -U pip +pip install python-openstackclient +source $KOLLA_CONFIG_PATH/public-openrc.sh +echo "Creating openstack key:" +openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey +echo "Creating test vm:" +openstack server create --key-name mykey --flavor m1.tiny --image cirros --network admin-tenant test-vm-1 +echo "Attaching floating IP:" +openstack floating ip create external +openstack server add floating ip test-vm-1 `openstack floating ip list -c ID -f value` +echo -e "Done! \nopenstack server list:" +openstack server list diff --git a/etc/kayobe/environments/aufn-ceph/cephadm.yml b/etc/kayobe/environments/aufn-ceph/cephadm.yml new file mode 100644 index 000000000..72be305a3 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/cephadm.yml @@ -0,0 +1,66 @@ +--- +############################################################################### +# Cephadm deployment configuration. + +# Ceph container image. +cephadm_image: "quay.io/ceph/ceph:v16.2.5" + +# List of additional cephadm commands to run before deployment +# cephadm_commands: +# - "config set global osd_pool_default_size {{ [3, groups['osds'] | length] | min }}" +# - "config set global osd_pool_default_min_size {{ [3, groups['osds'] | length] | min }}" + +# Ceph OSD specification. +cephadm_osd_spec: + service_type: osd + service_id: osd_spec_default + placement: + host_pattern: "*" + data_devices: + all: true + +############################################################################### +# Ceph post-deployment configuration. + +# List of Ceph erasure coding profiles. See stackhpc.cephadm.ec_profiles role +# for format. +cephadm_ec_profiles: [] + +# List of Ceph CRUSH rules. See stackhpc.cephadm.crush_rules role for format. +cephadm_crush_rules: [] + +# List of Ceph pools. See stackhpc.cephadm.pools role for format. +cephadm_pools: + - name: backups + application: rbd + state: present + - name: images + application: rbd + state: present + - name: volumes + application: rbd + state: present + - name: vms + application: rbd + state: present + +# List of Cephx keys. See stackhpc.cephadm.keys role for format. +cephadm_keys: + - name: client.cinder + caps: + mon: "profile rbd" + osd: "profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images" + mgr: "profile rbd pool=volumes, profile rbd pool=vms" + state: present + - name: client.cinder-backup + caps: + mon: "profile rbd" + osd: "profile rbd pool=volumes, profile rbd pool=backups" + mgr: "profile rbd pool=volumes, profile rbd pool=backups" + state: present + - name: client.glance + caps: + mon: "profile rbd" + osd: "profile rbd pool=images" + mgr: "profile rbd pool=images" + state: present diff --git a/etc/kayobe/environments/aufn-ceph/configure-local-networking.sh b/etc/kayobe/environments/aufn-ceph/configure-local-networking.sh new file mode 100755 index 000000000..ab3602d2a --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/configure-local-networking.sh @@ -0,0 +1,81 @@ +#!/bin/bash + +set -e +set -o pipefail + +# This should be run on the seed hypervisor. + +# IP addresses on the all-in-one Kayobe cloud network. +# These IP addresses map to those statically configured in +# etc/kayobe/network-allocation.yml and etc/kayobe/networks.yml. +controller_vip=192.168.39.2 +seed_hv_ip=192.168.33.4 + +iface=$(ip route | awk '$1 == "default" {print $5; exit}') + +# Private IP address by which the seed hypervisor is accessible in the cloud +# hosting the VM. +seed_hv_private_ip=$(ip a show dev $iface | awk '$1 == "inet" { gsub(/\/[0-9]*/,"",$2); print $2; exit }') + +# Forward the following ports to the controller. +# 80: Horizon +# 6080: VNC console +forwarded_ports="80 6080" + +# Install iptables. +if $(which dnf >/dev/null 2>&1); then + sudo dnf -y install iptables +else + sudo apt update + sudo apt -y install iptables +fi + +# Configure local networking. +# Add bridges for the Kayobe networks. +if ! sudo ip l show brprov >/dev/null 2>&1; then + sudo ip l add brprov type bridge + sudo ip l set brprov up + sudo ip a add $seed_hv_ip/24 dev brprov +fi + +if ! sudo ip l show brcloud >/dev/null 2>&1; then + sudo ip l add brcloud type bridge + sudo ip l set brcloud up +fi + +# On CentOS 8, bridges without a port are DOWN, which causes network +# configuration to fail. Add a dummy interface and plug it into the bridge. +for i in mgmt prov cloud; do + if ! sudo ip l show dummy-$i >/dev/null 2>&1; then + sudo ip l add dummy-$i type dummy + fi +done + +# Configure IP routing and NAT to allow the seed VM and overcloud hosts to +# route via this route to the outside world. +sudo iptables -A POSTROUTING -t nat -o $iface -j MASQUERADE +sudo sysctl -w net.ipv4.conf.all.forwarding=1 + +# FIXME: IP MASQUERADE from control plane fails without this on Ubuntu. +if ! $(which dnf >/dev/null 2>&1); then + sudo modprobe br_netfilter + echo 0 | sudo tee /proc/sys/net/bridge/bridge-nf-call-iptables +fi + +# Configure port forwarding from the hypervisor to the Horizon GUI on the +# controller. +sudo iptables -A FORWARD -i $iface -o brprov -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT +sudo iptables -A FORWARD -i brprov -o $iface -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT +for port in $forwarded_ports; do + # Allow new connections. + sudo iptables -A FORWARD -i $iface -o brcloud -p tcp --syn --dport $port -m conntrack --ctstate NEW -j ACCEPT + # Destination NAT. + sudo iptables -t nat -A PREROUTING -i $iface -p tcp --dport $port -j DNAT --to-destination $controller_vip + # Source NAT. + sudo iptables -t nat -A POSTROUTING -o brcloud -p tcp --dport $port -d $controller_vip -j SNAT --to-source $seed_hv_private_ip +done + +echo +echo "NOTE: The network configuration applied by this script is not" +echo "persistent across reboots." +echo "If you reboot the system, please re-run this script." diff --git a/etc/kayobe/environments/aufn-ceph/configure-openstack.sh b/etc/kayobe/environments/aufn-ceph/configure-openstack.sh new file mode 100755 index 000000000..2129bc122 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/configure-openstack.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +BASE_PATH=$1 +VENV_DIR=$BASE_PATH/venvs/ansible +cd $BASE_PATH/src/ +[[ -d openstack-config ]] || git clone https://github.com/stackhpc/openstack-config-multinode.git openstack-config +cd openstack-config +if [[ ! -d $VENV_DIR ]]; then + # Using virtualenv causes a strange bug with python3.6 where + # nested virtual env creation leads to envs without pip... + # virtualenv $VENV_DIR + python3 -m venv $VENV_DIR +fi + +# NOTE: Virtualenv's activate and deactivate scripts reference an unbound variable. +set +u +source $VENV_DIR/bin/activate +set -u + +pip install -U pip +pip install -r requirements.txt +ansible-galaxy role install -p ansible/roles -r requirements.yml +ansible-galaxy collection install -p ansible/collections -r requirements.yml + +source $BASE_PATH/src/kayobe-config/etc/kolla/public-openrc.sh + +# Run script to configure openstack cloud +tools/openstack-config \ No newline at end of file diff --git a/etc/kayobe/environments/aufn-ceph/globals.yml b/etc/kayobe/environments/aufn-ceph/globals.yml new file mode 100644 index 000000000..2eb8232c6 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/globals.yml @@ -0,0 +1,13 @@ +--- +# Kayobe global configuration. + +############################################################################### +# OS distribution. + +# OS distribution name. Valid options are "centos", "ubuntu". Default is +# "centos". +os_distribution: "{{ lookup('pipe', '. /etc/os-release && echo $ID') | trim }}" + +# OS release. Valid options are "8-stream" when os_distribution is "centos", or +# "focal" when os_distribution is "ubuntu". +#os_release: diff --git a/etc/kayobe/environments/aufn-ceph/inventory/group_vars/compute/network-interfaces b/etc/kayobe/environments/aufn-ceph/inventory/group_vars/compute/network-interfaces new file mode 100644 index 000000000..b44b1b048 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/inventory/group_vars/compute/network-interfaces @@ -0,0 +1,19 @@ +--- +############################################################################### +# Network interface definitions for the compute group. + +provision_oc_interface: "{{ 'ens2' if os_distribution == 'ubuntu' else 'eth0' }}" +# Route via the seed-hypervisor to the outside world. +provision_oc_gateway: 192.168.33.4 + +internal_interface: "{{ 'ens3' if os_distribution == 'ubuntu' else 'eth1' }}.{{ internal_vlan }}" + +storage_interface: "{{ 'ens3' if os_distribution == 'ubuntu' else 'eth1' }}.{{ storage_vlan }}" + +tunnel_interface: "{{ 'ens3' if os_distribution == 'ubuntu' else 'eth1' }}.{{ tunnel_vlan }}" + +external_interface: "{{ 'ens3' if os_distribution == 'ubuntu' else 'eth1' }}.{{ external_vlan }}" + +############################################################################### +# Dummy variable to allow Ansible to accept this file. +workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/environments/aufn-ceph/inventory/group_vars/controllers/network-interfaces b/etc/kayobe/environments/aufn-ceph/inventory/group_vars/controllers/network-interfaces new file mode 100644 index 000000000..d608a62e7 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/inventory/group_vars/controllers/network-interfaces @@ -0,0 +1,27 @@ +--- +############################################################################### +# Network interface definitions for the controller group. + +provision_oc_interface: "{{ 'ens2' if os_distribution == 'ubuntu' else 'eth0' }}" +# Route via the seed-hypervisor to the outside world. +provision_oc_gateway: 192.168.33.4 + +mgmt_interface: "{{ 'ens3' if os_distribution == 'ubuntu' else 'eth1' }}" + +provision_wl_interface: "br{{ 'ens4' if os_distribution == 'ubuntu' else 'eth2' }}" +provision_wl_bridge_ports: + - "{{ 'ens4' if os_distribution == 'ubuntu' else 'eth2' }}" + +internal_interface: "{{ 'ens4' if os_distribution == 'ubuntu' else 'eth2' }}.{{ internal_vlan }}" + +external_interface: "br{{ 'ens4' if os_distribution == 'ubuntu' else 'eth2' }}.{{ external_vlan }}" + +public_interface: "{{ 'ens4' if os_distribution == 'ubuntu' else 'eth2' }}.{{ public_vlan }}" + +storage_interface: "{{ 'ens4' if os_distribution == 'ubuntu' else 'eth2' }}.{{ storage_vlan }}" + +tunnel_interface: "{{ 'ens4' if os_distribution == 'ubuntu' else 'eth2' }}.{{ tunnel_vlan }}" + +############################################################################### +# Dummy variable to allow Ansible to accept this file. +workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/environments/aufn-ceph/inventory/group_vars/seed-hypervisor/network-interfaces b/etc/kayobe/environments/aufn-ceph/inventory/group_vars/seed-hypervisor/network-interfaces new file mode 100644 index 000000000..61d0a836b --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/inventory/group_vars/seed-hypervisor/network-interfaces @@ -0,0 +1,18 @@ +--- +mgmt_interface: brmgmt +mgmt_bridge_ports: + - dummy-mgmt + +provision_oc_interface: brprov +provision_oc_bridge_ports: + - dummy-prov + +provision_wl_interface: brcloud +provision_wl_bridge_ports: + - dummy-cloud + +internal_interface: "{{ provision_wl_interface }}.{{ internal_vlan }}" + +public_interface: "{{ provision_wl_interface }}.{{ public_vlan }}" + +external_interface: "{{ provision_wl_interface }}.{{ external_vlan }}" diff --git a/etc/kayobe/environments/aufn-ceph/inventory/group_vars/seed/network-interfaces b/etc/kayobe/environments/aufn-ceph/inventory/group_vars/seed/network-interfaces new file mode 100644 index 000000000..ca04864d4 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/inventory/group_vars/seed/network-interfaces @@ -0,0 +1,13 @@ +--- +############################################################################### +# Network interface definitions for the seed group. + +mgmt_interface: "{{ 'ens2' if os_distribution == 'ubuntu' else 'eth0' }}" + +provision_oc_interface: "{{ 'ens3' if os_distribution == 'ubuntu' else 'eth1' }}" +# Route via the seed-hypervisor to the outside world. +provision_oc_gateway: 192.168.33.4 + +############################################################################### +# Dummy variable to allow Ansible to accept this file. +workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/environments/aufn-ceph/inventory/group_vars/storage/network-interfaces b/etc/kayobe/environments/aufn-ceph/inventory/group_vars/storage/network-interfaces new file mode 100644 index 000000000..c2b0dac84 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/inventory/group_vars/storage/network-interfaces @@ -0,0 +1,17 @@ +--- +############################################################################### +# Network interface definitions for the compute group. + +provision_oc_interface: "{{ 'ens2' if os_distribution == 'ubuntu' else 'eth0' }}" +# Route via the seed-hypervisor to the outside world. +provision_oc_gateway: 192.168.33.4 + +internal_interface: "{{ 'ens3' if os_distribution == 'ubuntu' else 'eth1' }}.{{ internal_vlan }}" + +storage_interface: "{{ 'ens3' if os_distribution == 'ubuntu' else 'eth1' }}.{{ storage_vlan }}" + +storage_mgmt_interface: "{{ 'ens3' if os_distribution == 'ubuntu' else 'eth1' }}.{{ storage_mgmt_vlan }}" + +############################################################################### +# Dummy variable to allow Ansible to accept this file. +workaround_ansible_issue_8743: yes diff --git a/etc/kayobe/environments/aufn-ceph/inventory/groups b/etc/kayobe/environments/aufn-ceph/inventory/groups new file mode 100644 index 000000000..2834d37ba --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/inventory/groups @@ -0,0 +1,27 @@ +# Kayobe groups inventory file. This file should generally not be modified. +# It declares the top-level groups and sub-groups. + +############################################################################### +# Ceph groups + +# Empty group with hosts added in overcloud.yml +[storage-ceph] + +[mons:children] +storage-ceph + +[mgrs:children] +storage-ceph + +[osds:children] +storage-ceph + +[rgws:children] +storage-ceph + + +############################################################################### +# Monitoring groups + +[monitoring:children] +controllers \ No newline at end of file diff --git a/etc/kayobe/environments/aufn-ceph/inventory/hosts b/etc/kayobe/environments/aufn-ceph/inventory/hosts new file mode 100644 index 000000000..c0d1b51fd --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/inventory/hosts @@ -0,0 +1,27 @@ +# This host acts as the configuration management Ansible control host. This must be +# localhost. +localhost ansible_connection=local + +[seed-hypervisor] +seed-hypervisor + +[seed] +seed + +[controllers] +#controller0 + +[compute:children] +#controllers + +[baremetal-compute] +# Add baremetal compute nodes here if required. + +[mgmt-switches] +# Add management network switches here if required. + +[ctl-switches] +# Add control and provisioning switches here if required. + +[hs-switches] +# Add high speed switches here if required. diff --git a/etc/kayobe/environments/aufn-ceph/ipa.yml b/etc/kayobe/environments/aufn-ceph/ipa.yml new file mode 100644 index 000000000..65616cfe1 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/ipa.yml @@ -0,0 +1,13 @@ +--- +# Ironic Python Agent (IPA) configuration. + +############################################################################### +# Ironic Python Agent (IPA) images configuration. + +# URL of Ironic deployment kernel image to download. +# yamllint disable-line rule:line-length +ipa_kernel_upstream_url: "https://tarballs.openstack.org/ironic-python-agent/tinyipa/files/tinyipa{{ ipa_images_upstream_url_suffix }}.vmlinuz" + +# URL of Ironic deployment ramdisk image to download. +# yamllint disable-line rule:line-length +ipa_ramdisk_upstream_url: "https://tarballs.openstack.org/ironic-python-agent/tinyipa/files/tinyipa{{ ipa_images_upstream_url_suffix }}.gz" diff --git a/etc/kayobe/environments/aufn-ceph/kolla.yml b/etc/kayobe/environments/aufn-ceph/kolla.yml new file mode 100644 index 000000000..155ac2ed1 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/kolla.yml @@ -0,0 +1,17 @@ +--- +# Kayobe Kolla configuration. + +############################################################################### +# Kolla-ansible inventory configuration. + +# Custom overcloud inventory containing a mapping from services to components. +kolla_overcloud_inventory_custom_services: "{{ lookup('template', kayobe_env_config_path ~ '/kolla/inventory/overcloud-services.j2') }}" + +# Don't give storage nodes to kolla-ansible - we're using Ceph-ansible. +# kolla_overcloud_inventory_storage_groups: [] + +############################################################################### +# Kolla feature flag configuration. +kolla_enable_cinder: true +kolla_enable_ovn: true +kolla_enable_neutron_provider_networks: true diff --git a/etc/kayobe/environments/aufn-ceph/kolla/config/bifrost/bifrost.yml b/etc/kayobe/environments/aufn-ceph/kolla/config/bifrost/bifrost.yml new file mode 100644 index 000000000..5f981c201 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/kolla/config/bifrost/bifrost.yml @@ -0,0 +1,13 @@ +# yamllint disable-file +--- +# Don't build an IPA deployment image, instead download upstream images. +create_ipa_image: false +download_ipa: true + +# Use a locally hosted CentOS8 cloud image. +use_cirros: true +{% if os_distribution == 'ubuntu' %} +cirros_deploy_image_upstream_url: "https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img" +{% else %} +cirros_deploy_image_upstream_url: "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20220913.0.x86_64.qcow2" +{% endif %} diff --git a/etc/kayobe/environments/aufn-ceph/kolla/config/neutron.conf b/etc/kayobe/environments/aufn-ceph/kolla/config/neutron.conf new file mode 100644 index 000000000..1014e6730 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/kolla/config/neutron.conf @@ -0,0 +1,2 @@ +[DEFAULT] +global_physnet_mtu = {{ tunnel_net_name | net_mtu }} diff --git a/etc/kayobe/environments/aufn-ceph/kolla/config/neutron/ml2_conf.ini b/etc/kayobe/environments/aufn-ceph/kolla/config/neutron/ml2_conf.ini new file mode 100644 index 000000000..7695e792b --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/kolla/config/neutron/ml2_conf.ini @@ -0,0 +1,2 @@ +[ml2] +path_mtu = {{ tunnel_net_name | net_mtu }} diff --git a/etc/kayobe/environments/aufn-ceph/kolla/globals.yml b/etc/kayobe/environments/aufn-ceph/kolla/globals.yml new file mode 100644 index 000000000..c1d90e7ab --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/kolla/globals.yml @@ -0,0 +1,16 @@ +--- +# Most development environments will use nested virtualisation, and we can't +# guarantee that nested KVM support is available. Use QEMU as a lowest common +# denominator. +nova_compute_virt_type: qemu + +# Reduce the control plane's memory footprint by limiting the number of worker +# processes to one per-service. +openstack_service_workers: "1" + +glance_backend_ceph: "yes" +cinder_backend_ceph: "yes" +nova_backend_ceph: "yes" + +# Elasticsearch memory tuning +es_heap_size: 1g diff --git a/etc/kayobe/environments/aufn-ceph/kolla/inventory/overcloud-services.j2 b/etc/kayobe/environments/aufn-ceph/kolla/inventory/overcloud-services.j2 new file mode 100644 index 000000000..313c3ba58 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/kolla/inventory/overcloud-services.j2 @@ -0,0 +1,561 @@ +# This inventory section provides a mapping of services to components. +# +# Top level groups define the roles of hosts, e.g. controller or compute. +# Components define groups of services, e.g. nova or ironic. +# Services define single containers, e.g. nova-compute or ironic-api. + +# Additional control implemented here. These groups allow you to control which +# services run on which hosts at a per-service level. +# +# Word of caution: Some services are required to run on the same host to +# function appropriately. For example, neutron-metadata-agent must run on the +# same host as the l3-agent and (depending on configuration) the dhcp-agent. + +# Common +[cron:children] +common + +[fluentd:children] +common + +[kolla-logs:children] +common + +[kolla-toolbox:children] +common + +# Elasticsearch Curator +[elasticsearch-curator:children] +elasticsearch + +# Glance +[glance-api:children] +glance + +# Nova +[nova-api:children] +nova + +[nova-conductor:children] +{% if 'cell-control' in kolla_overcloud_inventory_top_level_group_map %} +cell-control +{% else %} +nova +{% endif %} + +[nova-super-conductor:children] +nova + +[nova-novncproxy:children] +{% if 'cell-control' in kolla_overcloud_inventory_top_level_group_map %} +cell-control +{% else %} +nova +{% endif %} + +[nova-scheduler:children] +nova + +[nova-spicehtml5proxy:children] +{% if 'cell-control' in kolla_overcloud_inventory_top_level_group_map %} +cell-control +{% else %} +nova +{% endif %} + +# NOTE: HA for nova-compute services with ironic is still an experimental +# feature. Provide the option to use a single compute host, even when multiple +# controllers are in use. +{% if kolla_nova_compute_ironic_host is not none %} +[nova-compute-ironic] +{{ kolla_nova_compute_ironic_host }} +{% else %} +[nova-compute-ironic:children] +nova +{% endif %} + +[nova-serialproxy:children] +{% if 'cell-control' in kolla_overcloud_inventory_top_level_group_map %} +cell-control +{% else %} +nova +{% endif %} + +# Neutron +[neutron-server:children] +control + +[neutron-dhcp-agent:children] +neutron + +[neutron-l3-agent:children] +neutron + +[neutron-metadata-agent:children] +neutron + +[neutron-ovn-metadata-agent:children] +compute +network + +[neutron-bgp-dragent:children] +neutron + +[neutron-infoblox-ipam-agent:children] +neutron + +[neutron-metering-agent:children] +neutron + +[ironic-neutron-agent:children] +neutron + +# Cinder +[cinder-api:children] +cinder + +[cinder-backup:children] +cinder + +[cinder-scheduler:children] +cinder + +[cinder-volume:children] +cinder + +# Cloudkitty +[cloudkitty-api:children] +cloudkitty + +[cloudkitty-processor:children] +cloudkitty + +# Freezer +[freezer-api:children] +freezer + +[freezer-scheduler:children] +freezer + +# iSCSI +[iscsid:children] +compute +storage +ironic + +[tgtd:children] +storage + +# Manila +[manila-api:children] +manila + +[manila-scheduler:children] +manila + +[manila-share:children] +network + +[manila-data:children] +manila + +# Swift +[swift-proxy-server:children] +swift + +[swift-account-server:children] +storage + +[swift-container-server:children] +storage + +[swift-object-server:children] +storage + +# Barbican +[barbican-api:children] +barbican + +[barbican-keystone-listener:children] +barbican + +[barbican-worker:children] +barbican + +# Heat +[heat-api:children] +heat + +[heat-api-cfn:children] +heat + +[heat-engine:children] +heat + +# Murano +[murano-api:children] +murano + +[murano-engine:children] +murano + +# Monasca +[monasca-agent-collector:children] +monasca-agent + +[monasca-agent-forwarder:children] +monasca-agent + +[monasca-agent-statsd:children] +monasca-agent + +[monasca-api:children] +monasca + +[monasca-log-persister:children] +monasca + +[monasca-log-metrics:children] +monasca + +[monasca-thresh:children] +monasca + +[monasca-notification:children] +monasca + +[monasca-persister:children] +monasca + +# Storm +[storm-worker:children] +storm + +[storm-nimbus:children] +storm + +# Ironic +[ironic-api:children] +ironic + +[ironic-conductor:children] +ironic + +#[ironic-inspector:children] +#ironic + +[ironic-inspector] +# FIXME: Ideally we wouldn't reference controllers in here directly, but only +# one inspector service should exist, and groups can't be indexed in an +# inventory (e.g. ironic[0]). +{% if groups.get('controllers', []) | length > 0 %} +{{ groups['controllers'][0] }} +{% endif %} + +[ironic-tftp:children] +ironic + +[ironic-http:children] +ironic + +# Magnum +[magnum-api:children] +magnum + +[magnum-conductor:children] +magnum + +# Sahara +[sahara-api:children] +sahara + +[sahara-engine:children] +sahara + +# Solum +[solum-api:children] +solum + +[solum-worker:children] +solum + +[solum-deployer:children] +solum + +[solum-conductor:children] +solum + +[solum-application-deployment:children] +solum + +[solum-image-builder:children] +solum + +# Mistral +[mistral-api:children] +mistral + +[mistral-executor:children] +mistral + +[mistral-engine:children] +mistral + +[mistral-event-engine:children] +mistral + +# Ceilometer +[ceilometer-central:children] +ceilometer + +[ceilometer-notification:children] +ceilometer + +[ceilometer-compute:children] +compute + +[ceilometer-ipmi:children] +compute + +# Aodh +[aodh-api:children] +aodh + +[aodh-evaluator:children] +aodh + +[aodh-listener:children] +aodh + +[aodh-notifier:children] +aodh + +# Cyborg +[cyborg-api:children] +cyborg + +[cyborg-agent:children] +compute + +[cyborg-conductor:children] +cyborg + +# Gnocchi +[gnocchi-api:children] +gnocchi + +[gnocchi-statsd:children] +gnocchi + +[gnocchi-metricd:children] +gnocchi + +# Trove +[trove-api:children] +trove + +[trove-conductor:children] +trove + +[trove-taskmanager:children] +trove + +# Multipathd +[multipathd:children] +compute +storage + +# Watcher +[watcher-api:children] +watcher + +[watcher-engine:children] +watcher + +[watcher-applier:children] +watcher + +# Senlin +[senlin-api:children] +senlin + +[senlin-conductor:children] +senlin + +[senlin-engine:children] +senlin + +[senlin-health-manager:children] +senlin + +# Octavia +[octavia-api:children] +octavia + +[octavia-driver-agent:children] +octavia + +[octavia-health-manager:children] +octavia + +[octavia-housekeeping:children] +octavia + +[octavia-worker:children] +octavia + +# Designate +[designate-api:children] +designate + +[designate-central:children] +designate + +[designate-producer:children] +designate + +[designate-mdns:children] +network + +[designate-worker:children] +designate + +[designate-sink:children] +designate + +[designate-backend-bind9:children] +designate + +# Placement +[placement-api:children] +placement + +# Zun +[zun-api:children] +zun + +[zun-wsproxy:children] +zun + +[zun-compute:children] +compute + +[zun-cni-daemon:children] +compute + +# Skydive +[skydive-analyzer:children] +skydive + +[skydive-agent:children] +compute +network + +# Tacker +[tacker-server:children] +tacker + +[tacker-conductor:children] +tacker + +# Vitrage +[vitrage-api:children] +vitrage + +[vitrage-notifier:children] +vitrage + +[vitrage-graph:children] +vitrage + +[vitrage-ml:children] +vitrage + +[vitrage-persistor:children] +vitrage + +# Blazar +[blazar-api:children] +blazar + +[blazar-manager:children] +blazar + +# Prometheus +[prometheus-node-exporter:children] +monitoring +control +compute +network +storage + +[prometheus-mysqld-exporter:children] +mariadb + +[prometheus-haproxy-exporter:children] +loadbalancer + +[prometheus-memcached-exporter:children] +memcached + +[prometheus-cadvisor:children] +monitoring +control +compute +network +storage + +[prometheus-alertmanager:children] +monitoring + +[prometheus-msteams:children] +monitoring + +[prometheus-openstack-exporter:children] +monitoring + +[prometheus-elasticsearch-exporter:children] +elasticsearch + +[prometheus-blackbox-exporter:children] +monitoring + +[prometheus-libvirt-exporter:children] +compute + +[prometheus-msteams:children] +prometheus-alertmanager + +[masakari-api:children] +control + +[masakari-engine:children] +control + +[masakari-hostmonitor:children] +control + +[masakari-instancemonitor:children] +compute + +[ovn-controller:children] +ovn-controller-compute +ovn-controller-network + +[ovn-controller-compute:children] +compute + +[ovn-controller-network:children] +network + +[ovn-database:children] +control + +[ovn-northd:children] +ovn-database + +[ovn-nb-db:children] +ovn-database + +[ovn-sb-db:children] +ovn-database \ No newline at end of file diff --git a/etc/kayobe/environments/aufn-ceph/kolla/kolla-build.conf b/etc/kayobe/environments/aufn-ceph/kolla/kolla-build.conf new file mode 100644 index 000000000..0a665e1bd --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/kolla/kolla-build.conf @@ -0,0 +1,4 @@ +[DEFAULT] +# This is necessary for network connectivity of kolla-build, when Docker +# default iptables rules are disabled. +network_mode = host diff --git a/etc/kayobe/environments/aufn-ceph/network-allocation.yml b/etc/kayobe/environments/aufn-ceph/network-allocation.yml new file mode 100644 index 000000000..ba8a1241c --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/network-allocation.yml @@ -0,0 +1,13 @@ +--- +provision_oc_ips: + compute0: 192.168.33.6 + compute1: 192.168.33.7 + compute2: 192.168.33.8 + controller0: 192.168.33.3 + controller1: 192.168.33.9 + controller2: 192.168.33.10 + seed: 192.168.33.5 + seed-hypervisor: 192.168.33.4 + storage0: 192.168.33.11 + storage1: 192.168.33.12 + storage2: 192.168.33.13 diff --git a/etc/kayobe/environments/aufn-ceph/networks.yml b/etc/kayobe/environments/aufn-ceph/networks.yml new file mode 100644 index 000000000..4b1178110 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/networks.yml @@ -0,0 +1,117 @@ +--- +# Kayobe network configuration. + +############################################################################### +# Network role to network mappings. + +# Name of the network used by the seed to manage the bare metal overcloud +# hosts via their out-of-band management controllers. +oob_oc_net_name: mgmt + +# Name of the network used by the seed to provision the bare metal overcloud +# hosts. +provision_oc_net_name: provision_oc + +# Name of the network used by the overcloud hosts to manage the bare metal +# compute hosts via their out-of-band management controllers. +oob_wl_net_name: mgmt + +# Name of the network used by the overcloud hosts to provision the bare metal +# workload hosts. +provision_wl_net_name: provision_wl + +# Name of the network used to expose the internal OpenStack API endpoints. +internal_net_name: internal + +# List of names of networks used to provide external network access via +# Neutron. +# Deprecated name: external_net_name +# If external_net_name is defined, external_net_names will default to a list +# containing one item, external_net_name. +external_net_names: + - external + +# Name of the network used to expose the public OpenStack API endpoints. +public_net_name: public + +# Name of the network used by Neutron to carry tenant overlay network traffic. +tunnel_net_name: tunnel + +# Name of the network used to carry storage data traffic. +storage_net_name: storage + +# Name of the network used to carry storage management traffic. +storage_mgmt_net_name: storage_mgmt + +# Name of the network used to perform hardware introspection on the bare metal +# workload hosts. +inspection_net_name: provision_wl + +# Name of the network used to perform cleaning on the bare metal workload +# hosts +cleaning_net_name: provision_wl + +############################################################################### +# Network definitions. + +mgmt_cidr: 192.168.35.0/24 +mgmt_mtu: 1450 +# Native VLAN +mgmt_physical_network: mgmt + +provision_oc_cidr: 192.168.33.0/24 +provision_oc_mtu: 1450 +provision_oc_inspection_allocation_pool_start: 192.168.33.128 +provision_oc_inspection_allocation_pool_end: 192.168.33.254 +# Native VLAN +provision_oc_physical_network: provision + +provision_wl_cidr: 192.168.36.0/24 +provision_wl_mtu: 1450 +provision_wl_inspection_allocation_pool_start: 192.168.36.128 +provision_wl_inspection_allocation_pool_end: 192.168.36.254 +provision_wl_neutron_allocation_pool_start: 192.168.36.2 +provision_wl_neutron_allocation_pool_end: 192.168.36.127 +# Native VLAN +provision_wl_physical_network: cloud + +internal_cidr: 192.168.37.0/24 +internal_mtu: 1450 +internal_allocation_pool_start: 192.168.37.3 +internal_allocation_pool_end: 192.168.37.254 +internal_vip_address: 192.168.37.2 +internal_vlan: 101 +internal_physical_network: cloud + +external_cidr: 192.168.38.0/24 +external_mtu: 1392 +external_vlan: 102 +external_physical_network: cloud + +public_cidr: 192.168.39.0/24 +public_mtu: 1450 +public_allocation_pool_start: 192.168.39.3 +public_allocation_pool_end: 192.168.39.254 +public_vip_address: 192.168.39.2 +public_vlan: 103 +public_physical_network: cloud + +tunnel_cidr: 192.168.40.0/24 +tunnel_mtu: 1450 +tunnel_vlan: 104 +tunnel_physical_network: cloud + +storage_cidr: 192.168.41.0/24 +storage_mtu: 1450 +storage_vlan: 105 +storage_physical_network: cloud + +storage_mgmt_cidr: 192.168.42.0/24 +storage_mgmt_mtu: 1450 +storage_mgmt_vlan: 106 +storage_mgmt_physical_network: cloud + +############################################################################### +# MichaelRigart interfaces configuration. + +interfaces_pause_time: 5 diff --git a/etc/kayobe/environments/aufn-ceph/neutron.yml b/etc/kayobe/environments/aufn-ceph/neutron.yml new file mode 100644 index 000000000..258c0a15f --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/neutron.yml @@ -0,0 +1,13 @@ +--- +kolla_neutron_ml2_network_vlan_ranges: + - physical_network: "physnet1" + +kolla_neutron_ml2_type_drivers: + - flat + - vlan + - geneve + +kolla_neutron_ml2_tenant_network_types: + - flat + - vlan + - geneve diff --git a/etc/kayobe/environments/aufn-ceph/overcloud.yml b/etc/kayobe/environments/aufn-ceph/overcloud.yml new file mode 100644 index 000000000..1530c401d --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/overcloud.yml @@ -0,0 +1,21 @@ +--- +############################################################################### +# Overcloud configuration. + +# Dict mapping overcloud Ansible group names to lists of hosts in the group. +# As a special case, the group 'ignore' can be used to specify hosts that +# should not be added to the inventory. +#overcloud_group_hosts_map: +overcloud_group_hosts_map: + controllers: + - controller0 + - controller1 + - controller2 + compute: + - compute0 + - compute1 + - compute2 + storage-ceph: + - storage0 + - storage1 + - storage2 diff --git a/etc/kayobe/environments/aufn-ceph/seed-hypervisor.yml b/etc/kayobe/environments/aufn-ceph/seed-hypervisor.yml new file mode 100644 index 000000000..6a1b7ffdf --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/seed-hypervisor.yml @@ -0,0 +1,14 @@ +--- + +############################################################################### +# Seed hypervisor network interface configuration. + +# List of extra networks to which seed hypervisor nodes are attached. +seed_hypervisor_extra_network_interfaces: + - "{{ provision_wl_net_name }}" + - "{{ internal_net_name }}" + - "{{ public_net_name }}" + - "{{ external_net_names[0] }}" + +# Workaround change to cloud-user default login name on CentOS-Stream8 +seed_hypervisor_bootstrap_user: "{{ lookup('env', 'USER') }}" diff --git a/etc/kayobe/environments/aufn-ceph/seed-vm.yml b/etc/kayobe/environments/aufn-ceph/seed-vm.yml new file mode 100644 index 000000000..ddf82f9de --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/seed-vm.yml @@ -0,0 +1,9 @@ +--- +############################################################################### +# Seed node VM configuration. + +# Memory in MB. +seed_vm_memory_mb: "{{ 4 * 1024 }}" + +# Number of vCPUs. +seed_vm_vcpus: 1 diff --git a/etc/kayobe/environments/aufn-ceph/stackhpc.yml b/etc/kayobe/environments/aufn-ceph/stackhpc.yml new file mode 100644 index 000000000..0f31b2851 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/stackhpc.yml @@ -0,0 +1,15 @@ +--- +############################################################################### +# Kolla configuration. +# +# Docker namespace to use for Kolla images. Default is 'kolla'. +kolla_docker_namespace: stackhpc-dev + +############################################################################### +# StackHPC configuration. + +# Base URL of the StackHPC Test Pulp service. +stackhpc_release_pulp_url: "http://pulp-server.internal.sms-cloud:8080" + +pulp_username: admin +pulp_password: 9e4bfa04-9d9d-493d-9473-ba92e4361dae diff --git a/etc/kayobe/environments/aufn-ceph/storage.yml b/etc/kayobe/environments/aufn-ceph/storage.yml new file mode 100644 index 000000000..2d2302b7c --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/storage.yml @@ -0,0 +1,11 @@ +--- + +############################################################################### +# Storage node LVM configuration. + +# List of storage volume groups. See mrlesmithjr.manage-lvm role for +# format. +# storage_lvm_groups: + +# Avoid undefined var which would result in 'LVM physical disks have not been configured' error +storage_lvm_groups: [] diff --git a/etc/kayobe/environments/aufn-ceph/tenks.yml b/etc/kayobe/environments/aufn-ceph/tenks.yml new file mode 100644 index 000000000..9b0e9e9f4 --- /dev/null +++ b/etc/kayobe/environments/aufn-ceph/tenks.yml @@ -0,0 +1,89 @@ +--- +# This file holds the config given to Tenks when running `tenks-deploy.sh`. It +# assumes the existence of the bridges `brmgmt` and `brcloud`. + +node_types: + controller: + memory_mb: 8192 + vcpus: 4 + volumes: + # There is a minimum disk space capacity requirement of 4GiB when using Ironic Python Agent: + # https://github.com/openstack/ironic-python-agent/blob/master/ironic_python_agent/utils.py#L290 + - capacity: 20GiB + physical_networks: + - provision-net + - mgmt-net + - cloud-net + console_log_enabled: true + storage: + memory_mb: 8192 + vcpus: 4 + volumes: + # There is a minimum disk space capacity requirement of 4GiB when using Ironic Python Agent: + # https://github.com/openstack/ironic-python-agent/blob/master/ironic_python_agent/utils.py#L290 + - capacity: 10GiB + # Ceph volume + - capacity: 10GiB + physical_networks: + - provision-net + - cloud-net + console_log_enabled: true + compute: + memory_mb: 8192 + vcpus: 4 + volumes: + # There is a minimum disk space capacity requirement of 4GiB when using Ironic Python Agent: + # https://github.com/openstack/ironic-python-agent/blob/master/ironic_python_agent/utils.py#L290 + - capacity: 10GiB + physical_networks: + - provision-net + - cloud-net + console_log_enabled: true + baremetal: + memory_mb: 4096 + vcpus: 1 + volumes: + # There is a minimum disk space capacity requirement of 4GiB when using Ironic Python Agent: + # https://github.com/openstack/ironic-python-agent/blob/master/ironic_python_agent/utils.py#L290 + - capacity: 5GiB + physical_networks: + - cloud-net + console_log_enabled: true + +specs: + - type: controller + count: 3 + node_name_prefix: controller + ironic_config: + resource_class: test-rc + network_interface: noop + - type: storage + count: 3 + node_name_prefix: storage + ironic_config: + resource_class: test-rc + network_interface: noop + - type: compute + count: 2 + node_name_prefix: compute + ironic_config: + resource_class: test-rc + network_interface: noop + - type: baremetal + count: 0 + node_name_prefix: baremetal + +ipmi_address: 192.168.33.4 +ipmi_port_range_end: 6250 + +nova_flavors: [] + +physnet_mappings: + mgmt-net: brmgmt + provision-net: brprov + cloud-net: brcloud + +bridge_type: linuxbridge + +# No placement service. +wait_for_placement: false