Skip to content
Merged
25 changes: 14 additions & 11 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@

Terraform for the following configuration:

* OpenStack virtualised instances
* OpenStack virtualised lab instances
* An OpenStack virtualised container registry instance
* Cinder volumes for instance storage
* Floating IPs for networking

Expand All @@ -12,32 +13,34 @@ OpenStack infrastructure.

## Prerequisites

* A Neutron network the instances can attach to, with router
* A Neutron network the instances can attach to, with a router
* Plenty of resource quota
* Terraform installed (see instructions
[here](https://developer.hashicorp.com/terraform/install))

## Software Components

[Kayobe](https://docs.openstack.org/kayobe/latest/) enables deployment of
[Kayobe](https://docs.openstack.org/kayobe/latest/) enables the deployment of
containerised OpenStack to bare metal.

# Instructions for deployment

After cloning this repo, source the regular OpenStack rc file with necessary
vars for accessing the *A Universe From Nothing* lab project.
After cloning this repo, source the regular OpenStack rc file with the
necessary vars for accessing the *A Universe From Nothing* lab project.

There are a various variables available for configuration. These can be seen
There are various variables available for configuration. These can be seen
in `vars.tf`, and can be set in `terraform.tfvars` (see sample file
`terraform.tfvars.sample`).

Next up is the `terraform` bit assuming it is already installed:
Create the resources using Terraform:

terraform init
terraform plan
terraform apply -auto-approve -parallelism=52

To reprovision a lab machine:

terraform taint openstack_compute_instance_v2.#
terraform taint openstack_compute_instance_v2.lab[#]
terraform apply -auto-approve

where `#` is the lab index which can be obtained from the web UI.
Expand All @@ -54,7 +57,7 @@ SSH in to your lab instance by running and entering the provided password:

ssh lab@<lab-ip-address> -o PreferredAuthentications=password

The default password is the id of the lab instance. As such, it is recommeded
The default password is the id of the lab instance. As such, it is recommended
that you run `passwd` immediately to change the default password.

## Nested virtualisation
Expand All @@ -75,7 +78,7 @@ When complete, it should report an elapsed time as follows:

[INFO] 22 minutes and 3 seconds elapsed.

## Inspect the bifrost container inside your seed VM:
## Inspect the Bifrost container inside your seed VM:

ssh stack@192.138.33.5
docker ps
Expand All @@ -85,7 +88,7 @@ When complete, it should report an elapsed time as follows:

Look at the steps involved in deploying Kayobe control plane:

< a-universe-from-seed.sh
less a-universe-from-seed.sh

# Wrapping up

Expand Down
79 changes: 46 additions & 33 deletions a-seed-from-nothing.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,6 @@
# Reset SECONDS
SECONDS=0

# Cloud User: cloud-user (CentOS) or ubuntu?
CLOUD_USER=cloud-user

ENABLE_OVN=true

# Registry IP
Expand All @@ -14,19 +11,20 @@ registry_ip=$1
echo "[INFO] Given docker registry IP: $registry_ip"

# Disable the firewall.
if [[ "${CLOUD_USER}" = "ubuntu" ]]
then
if type apt; then
grep -q $HOSTNAME /etc/hosts || (echo "$(ip r | grep -o '^default via.*src [0-9.]*' | awk '{print $NF}') $HOSTNAME" | sudo tee -a /etc/hosts)
dpkg -l ufw && sudo systemctl is-enabled ufw && sudo systemctl stop ufw && sudo systemctl disable ufw
else
rpm -q firewalld && sudo systemctl is-enabled firewalld && sudo systemctl stop firewalld && sudo systemctl disable firewalld
fi

# Disable SELinux.
sudo setenforce 0
# Disable SELinux.
sudo setenforce 0
fi

# Useful packages
if [[ "${CLOUD_USER}" = "ubuntu" ]]
then
if type apt; then
# Avoid the interactive dialog prompting for service restart: set policy to leave services unchanged
echo "\$nrconf{restart} = 'l';" | sudo tee /etc/needrestart/conf.d/90-aufn.conf
sudo apt update
sudo apt install -y git tmux lvm2 iptables
else
Expand All @@ -42,8 +40,7 @@ EOF
sudo sysctl --load /etc/sysctl.d/70-ipv6.conf

# CentOS Stream 8 requires network-scripts. Rocky Linux 9 and onwards use NetworkManager.
if [[ "${CLOUD_USER}" = "cloud-user" ]]
then
if type dnf; then
case $(grep -o "[89]\.[0-9]" /etc/redhat-release) in
"8.*")
sudo dnf install -y network-scripts
Expand All @@ -60,6 +57,18 @@ then
exit -1
;;
esac
elif type apt; then
# Prepare for disabling of Netplan and enabling of systemd-networkd.
# Netplan has an interaction with systemd and cloud-init to populate
# systemd-networkd files, but ephemerally. If /etc/systemd/network is
# empty and netplan config files are present in /run, copy them over.
persistent_netcfg=$(ls /etc/systemd/network)
ephemeral_netcfg=$(ls /run/systemd/network)
if [[ -z "$persistent_netcfg" && ! -z "$ephemeral_netcfg" ]]
then
echo "Creating persistent versions of Netplan ephemeral config"
sudo cp /run/systemd/network/* /etc/systemd/network
fi
fi

# Exit on error
Expand All @@ -68,6 +77,7 @@ fi
set -e

# Ensure an ssh key is generated
CLOUD_USER=$(ls /home | grep -v lab | grep -v stack | head -1)
# NOTE: you might think ~${CLOUD_USER} would work but apparently not
CLOUD_USER_DIR=/home/${CLOUD_USER}
keyfile="$HOME/.ssh/id_rsa"
Expand All @@ -86,43 +96,38 @@ then
sudo chown ${CLOUD_USER}.${CLOUD_USER} ${CLOUD_USER_DIR}/.ssh/authorized_keys
fi

# Clone Kayobe.
# Clone Beokay.
cd $HOME
[[ -d kayobe ]] || git clone https://opendev.org/openstack/kayobe.git -b stable/yoga
cd kayobe
git clone https://github.com/stackhpc/beokay.git -b master

# Use Beokay to bootstrap your control host.
[[ -d deployment ]] || beokay/beokay.py create --base-path ~/deployment --kayobe-repo https://opendev.org/openstack/kayobe.git --kayobe-branch stable/2023.1 --kayobe-config-repo https://github.com/stackhpc/a-universe-from-nothing.git --kayobe-config-branch stable/2023.1

# Bump the provisioning time - it can be lengthy on virtualised storage
sed -i.bak 's%^[# ]*wait_active_timeout:.*% wait_active_timeout: 5000%' ~/kayobe/ansible/overcloud-provision.yml
sed -i.bak 's%^[# ]*wait_active_timeout:.*% wait_active_timeout: 5000%' ~/deployment/src/kayobe/ansible/overcloud-provision.yml

# Clone the Tenks repository.
cd ~/deployment/src/
[[ -d tenks ]] || git clone https://opendev.org/openstack/tenks.git

# Clone this Kayobe configuration.
mkdir -p config/src
cd config/src/
[[ -d kayobe-config ]] || git clone https://github.com/stackhpc/a-universe-from-nothing.git -b stable/yoga kayobe-config
cd

# Set default registry name to the one we just created
sed -i.bak 's/^docker_registry.*/docker_registry: '$registry_ip':4000/' kayobe-config/etc/kayobe/docker.yml
sed -i.bak 's/^docker_registry:.*/docker_registry: '$registry_ip':4000/' ~/deployment/src/kayobe-config/etc/kayobe/docker.yml

# Configure host networking (bridge, routes & firewall)
./kayobe-config/configure-local-networking.sh

# Install kayobe.
cd ~/kayobe
./dev/install-dev.sh
~/deployment/src/kayobe-config/configure-local-networking.sh

# Enable OVN flags
if $ENABLE_OVN
then
cat <<EOF | sudo tee -a config/src/kayobe-config/etc/kayobe/bifrost.yml
cat <<EOF | sudo tee -a ~/deployment/src/kayobe-config/etc/kayobe/bifrost.yml
kolla_bifrost_extra_kernel_options:
- "console=ttyS0"
EOF
cat <<EOF | sudo tee -a config/src/kayobe-config/etc/kayobe/kolla.yml
cat <<EOF | sudo tee -a ~/deployment/src/kayobe-config/etc/kayobe/kolla.yml
kolla_enable_ovn: yes
EOF
cat <<EOF | sudo tee -a config/src/kayobe-config/etc/kayobe/neutron.yml
cat <<EOF | sudo tee -a ~/deployment/src/kayobe-config/etc/kayobe/neutron.yml
kolla_neutron_ml2_type_drivers:
- geneve
- vlan
Expand All @@ -134,17 +139,25 @@ kolla_neutron_ml2_tenant_network_types:
EOF
fi

# Set Environment variables for Kayobe dev scripts
export KAYOBE_CONFIG_SOURCE_PATH=~/deployment/src/kayobe-config
export KAYOBE_VENV_PATH=~/deployment/venvs/kayobe

# Deploy hypervisor services.
./dev/seed-hypervisor-deploy.sh
~/deployment/src/kayobe/dev/seed-hypervisor-deploy.sh

# Deploy a seed VM.
# NOTE: This should work the first time because the packet configuration uses a
# custom docker registry. However, there are sometimes issues with Docker starting up on the seed (FIXME)
if ! ./dev/seed-deploy.sh; then
if ! ~/deployment/src/kayobe/dev/seed-deploy.sh; then
# Deploy a seed VM. Should work this time.
./dev/seed-deploy.sh
~/deployment/src/kayobe/dev/seed-deploy.sh
fi

# Run TENKS
export TENKS_CONFIG_PATH=~/deployment/src/kayobe-config/tenks.yml
~/deployment/src/kayobe/dev/tenks-deploy-overcloud.sh ~/deployment/src/tenks

# Duration
duration=$SECONDS
echo "[INFO] $(($duration / 60)) minutes and $(($duration % 60)) seconds elapsed."
22 changes: 7 additions & 15 deletions a-universe-from-seed.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,19 +8,10 @@ SECONDS=0

# FIXME: IP on public1 subnet disappears for braio interface during the course
# of a-seed-from-nothing.sh script. Rerun the configuration script to re-add it.
cd ~/kayobe/config/src/
./kayobe-config/configure-local-networking.sh

# Change to kayobe directory
cd ~/kayobe

# Create some 'bare metal' VMs for the controller and compute node.
# NOTE: Make sure to use ./tenks, since just ‘tenks’ will install via PyPI.
export TENKS_CONFIG_PATH=config/src/kayobe-config/tenks.yml
./dev/tenks-deploy.sh ./tenks
~/deployment/src/kayobe-config/configure-local-networking.sh

# Activate the Kayobe environment, to allow running commands directly.
source dev/environment-setup.sh
source ~/deployment/env-vars.sh

# Inspect and provision the overcloud hardware:
kayobe overcloud inventory discover
Expand All @@ -32,7 +23,7 @@ kayobe overcloud provision
kayobe overcloud host configure
kayobe overcloud container image pull
kayobe overcloud service deploy
source config/src/kayobe-config/etc/kolla/public-openrc.sh
source ~/deployment/src/kayobe-config/etc/kolla/public-openrc.sh
kayobe overcloud post configure

# At this point it should be possible to access the Horizon GUI via the seed
Expand All @@ -49,12 +40,13 @@ kayobe overcloud host command run --command "iptables -P FORWARD ACCEPT" --becom

# The following script will register some resources in OpenStack to enable
# booting up a tenant VM.
source config/src/kayobe-config/etc/kolla/public-openrc.sh
./config/src/kayobe-config/init-runonce.sh
source ~/deployment/src/kayobe-config/etc/kolla/public-openrc.sh
~/deployment/src/kayobe-config/init-runonce.sh

# Following the instructions displayed by the above script, boot a VM.
# You'll need to have activated the ~/os-venv virtual environment.
source ~/os-venv/bin/activate
deactivate
source ~/deployment/venvs/os-venv/bin/activate
openstack server create --image cirros --flavor m1.tiny --key-name mykey --network demo-net demo1

# Assign a floating IP to the server to make it accessible.
Expand Down
64 changes: 63 additions & 1 deletion openstack-device.tf
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
resource "openstack_compute_keypair_v2" "ufn_lab_key" {
name = "ufn_lab_key"
name = "${var.lab_prefix}_lab_key"
public_key = tls_private_key.default.public_key_openssh
}

Expand Down Expand Up @@ -110,13 +110,75 @@ resource "null_resource" "registry" {
}
}

resource "openstack_compute_secgroup_v2" "AUFN" {
name = "${var.lab_prefix}-lab-rules"
description = "Access rules for AUFN lab deployment"

rule {
from_port = 22
to_port = 22
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}

rule {
from_port = 80
to_port = 80
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}

rule {
from_port = 3000
to_port = 3000
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}

rule {
from_port = 5601
to_port = 5601
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}

rule {
from_port = 9091
to_port = 9091
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}

rule {
from_port = 9093
to_port = 9093
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}
}

data "openstack_dns_zone_v2" "lab_zone" {
count = var.dns_zone_name != null ? 1 : 0
name = var.dns_zone_name
}

resource "openstack_dns_recordset_v2" "lab_dns" {
count = var.dns_zone_name != null ? var.lab_count : 0
zone_id = data.openstack_dns_zone_v2.lab_zone[0].id
name = format("%s-lab-%02d.%s", var.lab_prefix, count.index, var.dns_zone_name)
type = "A"
ttl = 300
records = [openstack_compute_instance_v2.lab[count.index].network[0].fixed_ip_v4]
}

resource "openstack_compute_instance_v2" "lab" {

count = var.lab_count
name = format("%s-lab-%02d", var.lab_prefix, count.index)
image_name = var.image_name
flavor_name = var.lab_flavor
key_pair = openstack_compute_keypair_v2.ufn_lab_key.name
security_groups = ["default", openstack_compute_secgroup_v2.AUFN.name ]

dynamic "block_device" {
for_each = var.boot_labs_from_volume ? [1] : []
Expand Down
Loading