Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions ansible-collection-requirements.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
collections:
- name: https://opendev.org/openstack/ansible-collections-openstack
version: 2.2.0
type: git
- name: https://github.com/ansible-collections/community.general
version: 8.2.0
type: git
- name: https://opendev.org/openstack/ansible-config_template
version: 2.1.0
type: git
- name: https://github.com/ansible-collections/kubernetes.core
version: 3.0.0
type: git
77 changes: 77 additions & 0 deletions bootstrap.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
#!/usr/bin/env bash

# Copyright 2024, Rackspace Technology, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export LC_ALL=C.UTF-8
mkdir -p ~/.venvs

source scripts/lib/functions.sh

# Which config to bootstrap
test -f "${GENESTACK_CONFIG}/product" 2>/dev/null && export GENESTACK_PRODUCT=$(head -n1 ${GENESTACK_CONFIG}/product)
export GENESTACK_PRODUCT=${GENESTACK_PRODUCT:-openstack-enterprise}

set -e

success "Environment variables:"
env |egrep '^(SUDO|RPC_|ANSIBLE_|GENESTACK_|K8S|CONTAINER_|OPENSTACK_|OSH_)' | sort -u

success "Installing base packages (git):"
apt update

DEBIAN_FRONTEND=noninteractive \
apt-get -o "Dpkg::Options::=--force-confdef" \
-o "Dpkg::Options::=--force-confold" \
-qy install make git python3-pip python3-venv jq make 2>&1 > ~/genestack-base-package-install.log


if [ $? -gt 1 ]; then
error "Check for ansible errors at ~/genestack-base-package-install.log"
else
success "Local base OS packages installed"
fi

# Install project dependencies
success "Installing genestack dependencies"
test -L $GENESTACK_CONFIG 2>&1 || mkdir -p $GENESTACK_CONFIG

# Set config
test -f $GENESTACK_CONFIG/provider || echo $K8S_PROVIDER > $GENESTACK_CONFIG/provider
test -f $GENESTACK_CONFIG/product || echo $GENESTACK_PRODUCT > $GENESTACK_CONFIG/product
mkdir -p $GENESTACK_CONFIG/inventory/group_vars $GENESTACK_CONFIG/inventory/credentials

# Copy default k8s config
test -d $GENESTACK_PRODUCT || error "Product Config $GENESTACK_PRODUCT does not exist here"
if [ $(find $GENESTACK_CONFIG/inventory -name *.yml 2>/dev/null |wc -l) -eq 0 ]; then
cp -r ${GENESTACK_PRODUCT}/* ${GENESTACK_CONFIG}/inventory
fi

# Prepare Ansible
python3 -m venv ~/.venvs/genestack
~/.venvs/genestack/bin/pip install pip --upgrade
source ~/.venvs/genestack/bin/activate && success "Switched to venv ~/.venvs/genestack"

pip install -r /opt/genestack/requirements.txt && success "Installed ansible package"

ansible-playbook scripts/get-ansible-collection-requirements.yml \
-e collection_file="${ANSIBLE_COLLECTION_FILE}" -e user_collection_file="${USER_COLLECTION_FILE}"

source /opt/genestack/scripts/genestack.rc
success "Environment sourced per /opt/genestack/scripts/genestack.rc"

message "OpenStack Release: ${OPENSTACK_RELEASE}"
message "Target OS Distro: ${CONTAINER_DISTRO_NAME}:${CONTAINER_DISTRO_VERSION}"
message "Deploy Mulinode: ${OSH_DEPLOY_MULTINODE}"

echo
1 change: 1 addition & 0 deletions dev-requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
reno==4.0.0
52 changes: 52 additions & 0 deletions docs/quickstart.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
# Quick Start Guide

Before you can do anything we need to get the code. Because we've sold our soul to the submodule devil, you're going to need to recursively clone the repo into your location.

> Throughout the all our documentation and examples the genestack code base will be assumed to be in `/opt`.

``` shell
git clone --recurse-submodules -j4 https://github.com/rackerlabs/genestack /opt/genestack
```

## Basic Setup

The basic setup requires ansible, ansible collection and helm installed to install Kubernetes and OpenStack Helm:

The environment variable `GENESTACK_PRODUCT` is used to bootstrap specific configurations and alters playbook handling.
It is persisted at /etc/genestack/product` for subsequent executions, it only has to be used once.

``` shell
GENESTACK_PRODUCT=openstack-enterprise
#GENESTACK_PRODUCT=openstack-flex

/opt/genestack/bootstrap.sh
```

Once the bootstrap is completed the default Kubernetes provider will be configured inside `/etc/genestack/provider`

The ansible inventory is expected at `/etc/genestack/inventory`

## Prepare hosts for installation

``` shell
source /opt/genestack/scripts/genestack.rc
cd /opt/genestack/playbooks

ansible-playbook host-setup.yml
```

## Installing Kubernetes

Currently only the k8s provider kubespray is supported and included as submodule into the code base.
A default inventory file for kubespray is provided at `/etc/genestack/inventory` and must be modified.
Existing OpenStack Ansible inventory can be converted using the `/opt/genestack/scripts/convert_osa_inventory.py`
script which provides a `hosts.yml`

Once the inventory is updated and configuration altered (networking etc), the Kubernetes cluster can be initialized with

``` shell
source /opt/genestack/scripts/genestack.rc
cd /opt/genestack/submodules/kubespray

ansible-playbook cluster.yml
```
87 changes: 87 additions & 0 deletions etc/netplan/openstack-enterprise.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
---
network:
version: 2
ethernets:
em49:
mtu: 9000
p4p1:
mtu: 9000
bonds:
bond0:
interfaces: [ em49, p4p1 ]
parameters:
mode: 802.3ad
lacp-rate: fast
transmit-hash-policy: layer2+3
mii-monitor-interval: 100
dhcp4: false
mtu: 9000
bridges:
br-bond0:
dhcp4: false
mtu: 1500
interfaces:
- bond0
br-host:
dhcp4: false
mtu: 1500
interfaces:
- vlan1000
addresses: [ 10.240.0.51/22 ]
nameservers:
addresses: [ 1.1.1.1, 1.0.0.1 ]
routes:
- to: 0.0.0.0/0
via: 10.240.0.1
metric: 500
br-storage:
dhcp4: false
mtu: 9000
interfaces:
- vlan1030
addresses: [ 172.29.244.51/22 ]
br-repl:
dhcp4: false
mtu: 9000
interfaces:
- vlan1040
addresses: [ 172.29.248.51/22 ]
br-ovs:
dhcp4: false
mtu: 9000
interfaces:
- vlan1020
addresses: [ 172.29.240.51/22 ]
br-pxe:
dhcp4: false
mtu: 1500
interfaces:
- vlan1050
addresses: [ 172.23.208.5/22 ]
openvswitch: {}
vlans:
vlan1000:
id: 1000
link: bond0
dhcp4: false
mtu: 1500
vlan1020:
id: 1020
link: bond0
dhcp4: false
mtu: 9000
vlan1030:
id: 1030
link: bond0
dhcp4: false
mtu: 9000
vlan1040:
id: 1040
link: bond0
dhcp4: false
mtu: 9000
vlan1050:
id: 1050
link: bond0
dhcp4: false
mtu: 1050
138 changes: 138 additions & 0 deletions openstack-enterprise/group_vars/all/all.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
## Directory where the binaries will be installed
bin_dir: /usr/local/bin

## The access_ip variable is used to define how other nodes should access
## the node. This is used in flannel to allow other flannel nodes to see
## this node for example. The access_ip is really useful AWS and Google
## environments where the nodes are accessed remotely by the "public" ip,
## but don't know about that address themselves.
# access_ip: 1.1.1.1


## External LB example config
## apiserver_loadbalancer_domain_name: "elb.some.domain"
# loadbalancer_apiserver:
# address: 1.2.3.4
# port: 1234

## Internal loadbalancers for apiservers
# loadbalancer_apiserver_localhost: true
# valid options are "nginx" or "haproxy"
# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy"

## Local loadbalancer should use this port
## And must be set port 6443
loadbalancer_apiserver_port: 6443

## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx.
loadbalancer_apiserver_healthcheck_port: 8081

### OTHER OPTIONAL VARIABLES

## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries.
## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage.
## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail.
# disable_host_nameservers: false

## Upstream dns servers
upstream_dns_servers:
- 1.1.1.1
- 1.0.0.1

## There are some changes specific to the cloud providers
## for instance we need to encapsulate packets with some network plugins
## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external'
## When openstack is used make sure to source in the openstack credentials
## like you would do when using openstack-client before starting the playbook.
# cloud_provider:

## When cloud_provider is set to 'external', you can set the cloud controller to deploy
## Supported cloud controllers are: 'openstack', 'vsphere', 'huaweicloud' and 'hcloud'
## When openstack or vsphere are used make sure to source in the required fields
# external_cloud_provider:

## Set these proxy values in order to update package manager and docker daemon to use proxies and custom CA for https_proxy if needed
# http_proxy: ""
# https_proxy: ""
# https_proxy_cert_file: ""

## Refer to roles/kubespray-defaults/defaults/main/main.yml before modifying no_proxy
# no_proxy: ""

## Some problems may occur when downloading files over https proxy due to ansible bug
## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable
## SSL validation of get_url module. Note that kubespray will still be performing checksum validation.
# download_validate_certs: False

## If you need exclude all cluster nodes from proxy and other resources, add other resources here.
# additional_no_proxy: ""

## If you need to disable proxying of os package repositories but are still behind an http_proxy set
## skip_http_proxy_on_os_packages to true
## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu
## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish
# skip_http_proxy_on_os_packages: false

## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all
## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the
## no_proxy variable, set below to true:
no_proxy_exclude_workers: false

## Certificate Management
## This setting determines whether certs are generated via scripts.
## Chose 'none' if you provide your own certificates.
## Option is "script", "none"
# cert_management: script

## Set to true to allow pre-checks to fail and continue deployment
# ignore_assert_errors: false

## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable.
# kube_read_only_port: 10255

## Set true to download and cache container
# download_container: true

## Deploy container engine
# Set false if you want to deploy container engine manually.
# deploy_container_engine: true

## Red Hat Enterprise Linux subscription registration
## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination
## Update RHEL subscription purpose usage, role and SLA if necessary
# rh_subscription_username: ""
# rh_subscription_password: ""
# rh_subscription_org_id: ""
# rh_subscription_activation_key: ""
# rh_subscription_usage: "Development"
# rh_subscription_role: "Red Hat Enterprise Server"
# rh_subscription_sla: "Self-Support"

## Check if access_ip responds to ping. Set false if your firewall blocks ICMP.
# ping_access_ip: true

# sysctl_file_path to add sysctl conf to
# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf"

## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication
kube_webhook_token_auth: false
kube_webhook_token_auth_url_skip_tls_verify: false
# kube_webhook_token_auth_url: https://...
## base64-encoded string of the webhook's CA certificate
# kube_webhook_token_auth_ca_data: "LS0t..."

## NTP Settings
# Start the ntpd or chrony service and enable it at system boot.
ntp_enabled: false
ntp_manage_config: false
ntp_servers:
- "0.pool.ntp.org iburst"
- "1.pool.ntp.org iburst"
- "2.pool.ntp.org iburst"
- "3.pool.ntp.org iburst"

## Used to control no_log attribute
unsafe_show_logs: false

## If enabled it will allow kubespray to attempt setup even if the distribution is not supported. For unsupported distributions this can lead to unexpected failures in some cases.
allow_unsupported_distribution_setup: false
Loading