diff --git a/ansible-collection-requirements.yml b/ansible-collection-requirements.yml new file mode 100644 index 000000000..349b442d7 --- /dev/null +++ b/ansible-collection-requirements.yml @@ -0,0 +1,13 @@ +collections: + - name: https://opendev.org/openstack/ansible-collections-openstack + version: 2.2.0 + type: git + - name: https://github.com/ansible-collections/community.general + version: 8.2.0 + type: git + - name: https://opendev.org/openstack/ansible-config_template + version: 2.1.0 + type: git + - name: https://github.com/ansible-collections/kubernetes.core + version: 3.0.0 + type: git diff --git a/bootstrap.sh b/bootstrap.sh new file mode 100755 index 000000000..181329868 --- /dev/null +++ b/bootstrap.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash + +# Copyright 2024, Rackspace Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +export LC_ALL=C.UTF-8 +mkdir -p ~/.venvs + +source scripts/lib/functions.sh + +# Which config to bootstrap +test -f "${GENESTACK_CONFIG}/product" 2>/dev/null && export GENESTACK_PRODUCT=$(head -n1 ${GENESTACK_CONFIG}/product) +export GENESTACK_PRODUCT=${GENESTACK_PRODUCT:-openstack-enterprise} + +set -e + +success "Environment variables:" +env |egrep '^(SUDO|RPC_|ANSIBLE_|GENESTACK_|K8S|CONTAINER_|OPENSTACK_|OSH_)' | sort -u + +success "Installing base packages (git):" +apt update + +DEBIAN_FRONTEND=noninteractive \ + apt-get -o "Dpkg::Options::=--force-confdef" \ + -o "Dpkg::Options::=--force-confold" \ + -qy install make git python3-pip python3-venv jq make 2>&1 > ~/genestack-base-package-install.log + + +if [ $? -gt 1 ]; then + error "Check for ansible errors at ~/genestack-base-package-install.log" +else + success "Local base OS packages installed" +fi + +# Install project dependencies +success "Installing genestack dependencies" +test -L $GENESTACK_CONFIG 2>&1 || mkdir -p $GENESTACK_CONFIG + +# Set config +test -f $GENESTACK_CONFIG/provider || echo $K8S_PROVIDER > $GENESTACK_CONFIG/provider +test -f $GENESTACK_CONFIG/product || echo $GENESTACK_PRODUCT > $GENESTACK_CONFIG/product +mkdir -p $GENESTACK_CONFIG/inventory/group_vars $GENESTACK_CONFIG/inventory/credentials + +# Copy default k8s config +test -d $GENESTACK_PRODUCT || error "Product Config $GENESTACK_PRODUCT does not exist here" +if [ $(find $GENESTACK_CONFIG/inventory -name *.yml 2>/dev/null |wc -l) -eq 0 ]; then + cp -r ${GENESTACK_PRODUCT}/* ${GENESTACK_CONFIG}/inventory +fi + +# Prepare Ansible +python3 -m venv ~/.venvs/genestack +~/.venvs/genestack/bin/pip install pip --upgrade +source ~/.venvs/genestack/bin/activate && success "Switched to venv ~/.venvs/genestack" + +pip install -r /opt/genestack/requirements.txt && success "Installed ansible package" + +ansible-playbook scripts/get-ansible-collection-requirements.yml \ + -e collection_file="${ANSIBLE_COLLECTION_FILE}" -e user_collection_file="${USER_COLLECTION_FILE}" + +source /opt/genestack/scripts/genestack.rc +success "Environment sourced per /opt/genestack/scripts/genestack.rc" + +message "OpenStack Release: ${OPENSTACK_RELEASE}" +message "Target OS Distro: ${CONTAINER_DISTRO_NAME}:${CONTAINER_DISTRO_VERSION}" +message "Deploy Mulinode: ${OSH_DEPLOY_MULTINODE}" + +echo diff --git a/dev-requirements.txt b/dev-requirements.txt new file mode 100644 index 000000000..178d3d12e --- /dev/null +++ b/dev-requirements.txt @@ -0,0 +1 @@ +reno==4.0.0 diff --git a/docs/quickstart.md b/docs/quickstart.md new file mode 100644 index 000000000..847b54b0d --- /dev/null +++ b/docs/quickstart.md @@ -0,0 +1,52 @@ +# Quick Start Guide + +Before you can do anything we need to get the code. Because we've sold our soul to the submodule devil, you're going to need to recursively clone the repo into your location. + +> Throughout the all our documentation and examples the genestack code base will be assumed to be in `/opt`. + +``` shell +git clone --recurse-submodules -j4 https://github.com/rackerlabs/genestack /opt/genestack +``` + +## Basic Setup + +The basic setup requires ansible, ansible collection and helm installed to install Kubernetes and OpenStack Helm: + +The environment variable `GENESTACK_PRODUCT` is used to bootstrap specific configurations and alters playbook handling. +It is persisted at /etc/genestack/product` for subsequent executions, it only has to be used once. + +``` shell +GENESTACK_PRODUCT=openstack-enterprise +#GENESTACK_PRODUCT=openstack-flex + +/opt/genestack/bootstrap.sh +``` + +Once the bootstrap is completed the default Kubernetes provider will be configured inside `/etc/genestack/provider` + +The ansible inventory is expected at `/etc/genestack/inventory` + +## Prepare hosts for installation + +``` shell +source /opt/genestack/scripts/genestack.rc +cd /opt/genestack/playbooks + +ansible-playbook host-setup.yml +``` + +## Installing Kubernetes + +Currently only the k8s provider kubespray is supported and included as submodule into the code base. +A default inventory file for kubespray is provided at `/etc/genestack/inventory` and must be modified. +Existing OpenStack Ansible inventory can be converted using the `/opt/genestack/scripts/convert_osa_inventory.py` +script which provides a `hosts.yml` + +Once the inventory is updated and configuration altered (networking etc), the Kubernetes cluster can be initialized with + +``` shell +source /opt/genestack/scripts/genestack.rc +cd /opt/genestack/submodules/kubespray + +ansible-playbook cluster.yml +``` diff --git a/etc/netplan/openstack-enterprise.yaml b/etc/netplan/openstack-enterprise.yaml new file mode 100644 index 000000000..0642c0870 --- /dev/null +++ b/etc/netplan/openstack-enterprise.yaml @@ -0,0 +1,87 @@ +--- +network: + version: 2 + ethernets: + em49: + mtu: 9000 + p4p1: + mtu: 9000 + bonds: + bond0: + interfaces: [ em49, p4p1 ] + parameters: + mode: 802.3ad + lacp-rate: fast + transmit-hash-policy: layer2+3 + mii-monitor-interval: 100 + dhcp4: false + mtu: 9000 + bridges: + br-bond0: + dhcp4: false + mtu: 1500 + interfaces: + - bond0 + br-host: + dhcp4: false + mtu: 1500 + interfaces: + - vlan1000 + addresses: [ 10.240.0.51/22 ] + nameservers: + addresses: [ 1.1.1.1, 1.0.0.1 ] + routes: + - to: 0.0.0.0/0 + via: 10.240.0.1 + metric: 500 + br-storage: + dhcp4: false + mtu: 9000 + interfaces: + - vlan1030 + addresses: [ 172.29.244.51/22 ] + br-repl: + dhcp4: false + mtu: 9000 + interfaces: + - vlan1040 + addresses: [ 172.29.248.51/22 ] + br-ovs: + dhcp4: false + mtu: 9000 + interfaces: + - vlan1020 + addresses: [ 172.29.240.51/22 ] + br-pxe: + dhcp4: false + mtu: 1500 + interfaces: + - vlan1050 + addresses: [ 172.23.208.5/22 ] + openvswitch: {} + vlans: + vlan1000: + id: 1000 + link: bond0 + dhcp4: false + mtu: 1500 + vlan1020: + id: 1020 + link: bond0 + dhcp4: false + mtu: 9000 + vlan1030: + id: 1030 + link: bond0 + dhcp4: false + mtu: 9000 + vlan1040: + id: 1040 + link: bond0 + dhcp4: false + mtu: 9000 + vlan1050: + id: 1050 + link: bond0 + dhcp4: false + mtu: 1050 diff --git a/openstack-enterprise/group_vars/all/all.yml b/openstack-enterprise/group_vars/all/all.yml new file mode 100644 index 000000000..e88cc8c1d --- /dev/null +++ b/openstack-enterprise/group_vars/all/all.yml @@ -0,0 +1,138 @@ +## Directory where the binaries will be installed +bin_dir: /usr/local/bin + +## The access_ip variable is used to define how other nodes should access +## the node. This is used in flannel to allow other flannel nodes to see +## this node for example. The access_ip is really useful AWS and Google +## environments where the nodes are accessed remotely by the "public" ip, +## but don't know about that address themselves. +# access_ip: 1.1.1.1 + + +## External LB example config +## apiserver_loadbalancer_domain_name: "elb.some.domain" +# loadbalancer_apiserver: +# address: 1.2.3.4 +# port: 1234 + +## Internal loadbalancers for apiservers +# loadbalancer_apiserver_localhost: true +# valid options are "nginx" or "haproxy" +# loadbalancer_apiserver_type: nginx # valid values "nginx" or "haproxy" + +## Local loadbalancer should use this port +## And must be set port 6443 +loadbalancer_apiserver_port: 6443 + +## If loadbalancer_apiserver_healthcheck_port variable defined, enables proxy liveness check for nginx. +loadbalancer_apiserver_healthcheck_port: 8081 + +### OTHER OPTIONAL VARIABLES + +## By default, Kubespray collects nameservers on the host. It then adds the previously collected nameservers in nameserverentries. +## If true, Kubespray does not include host nameservers in nameserverentries in dns_late stage. However, It uses the nameserver to make sure cluster installed safely in dns_early stage. +## Use this option with caution, you may need to define your dns servers. Otherwise, the outbound queries such as www.google.com may fail. +# disable_host_nameservers: false + +## Upstream dns servers +upstream_dns_servers: + - 1.1.1.1 + - 1.0.0.1 + +## There are some changes specific to the cloud providers +## for instance we need to encapsulate packets with some network plugins +## If set the possible values are either 'gce', 'aws', 'azure', 'openstack', 'vsphere', 'oci', or 'external' +## When openstack is used make sure to source in the openstack credentials +## like you would do when using openstack-client before starting the playbook. +# cloud_provider: + +## When cloud_provider is set to 'external', you can set the cloud controller to deploy +## Supported cloud controllers are: 'openstack', 'vsphere', 'huaweicloud' and 'hcloud' +## When openstack or vsphere are used make sure to source in the required fields +# external_cloud_provider: + +## Set these proxy values in order to update package manager and docker daemon to use proxies and custom CA for https_proxy if needed +# http_proxy: "" +# https_proxy: "" +# https_proxy_cert_file: "" + +## Refer to roles/kubespray-defaults/defaults/main/main.yml before modifying no_proxy +# no_proxy: "" + +## Some problems may occur when downloading files over https proxy due to ansible bug +## https://github.com/ansible/ansible/issues/32750. Set this variable to False to disable +## SSL validation of get_url module. Note that kubespray will still be performing checksum validation. +# download_validate_certs: False + +## If you need exclude all cluster nodes from proxy and other resources, add other resources here. +# additional_no_proxy: "" + +## If you need to disable proxying of os package repositories but are still behind an http_proxy set +## skip_http_proxy_on_os_packages to true +## This will cause kubespray not to set proxy environment in /etc/yum.conf for centos and in /etc/apt/apt.conf for debian/ubuntu +## Special information for debian/ubuntu - you have to set the no_proxy variable, then apt package will install from your source of wish +# skip_http_proxy_on_os_packages: false + +## Since workers are included in the no_proxy variable by default, docker engine will be restarted on all nodes (all +## pods will restart) when adding or removing workers. To override this behaviour by only including master nodes in the +## no_proxy variable, set below to true: +no_proxy_exclude_workers: false + +## Certificate Management +## This setting determines whether certs are generated via scripts. +## Chose 'none' if you provide your own certificates. +## Option is "script", "none" +# cert_management: script + +## Set to true to allow pre-checks to fail and continue deployment +# ignore_assert_errors: false + +## The read-only port for the Kubelet to serve on with no authentication/authorization. Uncomment to enable. +# kube_read_only_port: 10255 + +## Set true to download and cache container +# download_container: true + +## Deploy container engine +# Set false if you want to deploy container engine manually. +# deploy_container_engine: true + +## Red Hat Enterprise Linux subscription registration +## Add either RHEL subscription Username/Password or Organization ID/Activation Key combination +## Update RHEL subscription purpose usage, role and SLA if necessary +# rh_subscription_username: "" +# rh_subscription_password: "" +# rh_subscription_org_id: "" +# rh_subscription_activation_key: "" +# rh_subscription_usage: "Development" +# rh_subscription_role: "Red Hat Enterprise Server" +# rh_subscription_sla: "Self-Support" + +## Check if access_ip responds to ping. Set false if your firewall blocks ICMP. +# ping_access_ip: true + +# sysctl_file_path to add sysctl conf to +# sysctl_file_path: "/etc/sysctl.d/99-sysctl.conf" + +## Variables for webhook token auth https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication +kube_webhook_token_auth: false +kube_webhook_token_auth_url_skip_tls_verify: false +# kube_webhook_token_auth_url: https://... +## base64-encoded string of the webhook's CA certificate +# kube_webhook_token_auth_ca_data: "LS0t..." + +## NTP Settings +# Start the ntpd or chrony service and enable it at system boot. +ntp_enabled: false +ntp_manage_config: false +ntp_servers: + - "0.pool.ntp.org iburst" + - "1.pool.ntp.org iburst" + - "2.pool.ntp.org iburst" + - "3.pool.ntp.org iburst" + +## Used to control no_log attribute +unsafe_show_logs: false + +## If enabled it will allow kubespray to attempt setup even if the distribution is not supported. For unsupported distributions this can lead to unexpected failures in some cases. +allow_unsupported_distribution_setup: false diff --git a/openstack-enterprise/group_vars/all/containerd.yml b/openstack-enterprise/group_vars/all/containerd.yml new file mode 100644 index 000000000..b3be15c90 --- /dev/null +++ b/openstack-enterprise/group_vars/all/containerd.yml @@ -0,0 +1,44 @@ +--- +containerd_storage_dir: "/var/lib/containerd" +containerd_state_dir: "/run/containerd" +containerd_oom_score: 0 + +# containerd_default_runtime: "runc" +# containerd_snapshotter: "native" + +# containerd_runc_runtime: +# name: runc +# type: "io.containerd.runc.v2" +# engine: "" +# root: "" + +# containerd_additional_runtimes: +# Example for Kata Containers as additional runtime: +# - name: kata +# type: "io.containerd.kata.v2" +# engine: "" +# root: "" + +# containerd_grpc_max_recv_message_size: 16777216 +# containerd_grpc_max_send_message_size: 16777216 + +# containerd_debug_level: "info" + +# containerd_metrics_address: "" + +# containerd_metrics_grpc_histogram: false + +# Registries defined within containerd. +# containerd_registries_mirrors: +# - prefix: docker.io +# mirrors: +# - host: https://registry-1.docker.io +# capabilities: ["pull", "resolve"] +# skip_verify: false + +# containerd_max_container_log_line_size: -1 + +# containerd_registry_auth: +# - registry: 10.0.0.2:5000 +# username: user +# password: pass diff --git a/openstack-enterprise/group_vars/all/environment.yml b/openstack-enterprise/group_vars/all/environment.yml new file mode 100644 index 000000000..858f2b82f --- /dev/null +++ b/openstack-enterprise/group_vars/all/environment.yml @@ -0,0 +1,12 @@ +--- + +no_proxy_env: "localhost,127.0.0.1" +http_proxy_env: "{{ lookup('env', 'http_proxy') }}" +https_proxy_env: "{{ lookup('env', 'https_proxy') }}" +global_environment_variables: + HTTP_PROXY: "{{ http_proxy_env }}" + HTTPS_PROXY: "{{ https_proxy_env }}" + http_proxy: "{{ http_proxy_env }}" + https_proxy: "{{ https_proxy_env }}" + NO_PROXY: "{{ no_proxy_env }}" + no_proxy: "{{ no_proxy_env }}" diff --git a/openstack-enterprise/group_vars/all/etcd.yml b/openstack-enterprise/group_vars/all/etcd.yml new file mode 100644 index 000000000..39600c35f --- /dev/null +++ b/openstack-enterprise/group_vars/all/etcd.yml @@ -0,0 +1,16 @@ +--- +## Directory where etcd data stored +etcd_data_dir: /var/lib/etcd + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Additionally you can set this to kubeadm if you want to install etcd using kubeadm +## Kubeadm etcd deployment is experimental and only available for new deployments +## If this is not set, container manager will be inherited from the Kubespray defaults +## and not from k8s_cluster/k8s-cluster.yml, which might not be what you want. +## Also this makes possible to use different container manager for etcd nodes. +# container_manager: containerd + +## Settings for etcd deployment type +# Set this to docker if you are using container_manager: docker +etcd_deployment_type: host diff --git a/openstack-enterprise/group_vars/all/openstack.yml b/openstack-enterprise/group_vars/all/openstack.yml new file mode 100644 index 000000000..0fec79ad5 --- /dev/null +++ b/openstack-enterprise/group_vars/all/openstack.yml @@ -0,0 +1,50 @@ +## When OpenStack is used, Cinder version can be explicitly specified if autodetection fails (Fixed in 1.9: https://github.com/kubernetes/kubernetes/issues/50461) +# openstack_blockstorage_version: "v1/v2/auto (default)" +# openstack_blockstorage_ignore_volume_az: yes +## When OpenStack is used, if LBaaSv2 is available you can enable it with the following 2 variables. +# openstack_lbaas_enabled: True +# openstack_lbaas_subnet_id: "Neutron subnet ID (not network ID) to create LBaaS VIP" +## To enable automatic floating ip provisioning, specify a subnet. +# openstack_lbaas_floating_network_id: "Neutron network ID (not subnet ID) to get floating IP from, disabled by default" +## Override default LBaaS behavior +# openstack_lbaas_use_octavia: False +# openstack_lbaas_method: "ROUND_ROBIN" +# openstack_lbaas_provider: "haproxy" +# openstack_lbaas_create_monitor: "yes" +# openstack_lbaas_monitor_delay: "1m" +# openstack_lbaas_monitor_timeout: "30s" +# openstack_lbaas_monitor_max_retries: "3" + +## Values for the external OpenStack Cloud Controller +# external_openstack_lbaas_enabled: true +# external_openstack_lbaas_floating_network_id: "Neutron network ID to get floating IP from" +# external_openstack_lbaas_floating_subnet_id: "Neutron subnet ID to get floating IP from" +# external_openstack_lbaas_method: ROUND_ROBIN +# external_openstack_lbaas_provider: amphora +# external_openstack_lbaas_subnet_id: "Neutron subnet ID to create LBaaS VIP" +# external_openstack_lbaas_network_id: "Neutron network ID to create LBaaS VIP" +# external_openstack_lbaas_manage_security_groups: false +# external_openstack_lbaas_create_monitor: false +# external_openstack_lbaas_monitor_delay: 5 +# external_openstack_lbaas_monitor_max_retries: 1 +# external_openstack_lbaas_monitor_timeout: 3 +# external_openstack_lbaas_internal_lb: false +# external_openstack_network_ipv6_disabled: false +# external_openstack_network_internal_networks: [] +# external_openstack_network_public_networks: [] +# external_openstack_metadata_search_order: "configDrive,metadataService" + +## Application credentials to authenticate against Keystone API +## Those settings will take precedence over username and password that might be set your environment +## All of them are required +# external_openstack_application_credential_name: +# external_openstack_application_credential_id: +# external_openstack_application_credential_secret: + +## The tag of the external OpenStack Cloud Controller image +# external_openstack_cloud_controller_image_tag: "latest" + +## To use Cinder CSI plugin to provision volumes set this value to true +## Make sure to source in the openstack credentials +# cinder_csi_enabled: true +# cinder_csi_controller_replicas: 1 diff --git a/openstack-enterprise/group_vars/etcd.yml b/openstack-enterprise/group_vars/etcd.yml new file mode 100644 index 000000000..e49f3de1f --- /dev/null +++ b/openstack-enterprise/group_vars/etcd.yml @@ -0,0 +1,35 @@ +--- +## Etcd auto compaction retention for mvcc key value store in hour +# etcd_compaction_retention: 0 + +## Set level of detail for etcd exported metrics, specify 'extensive' to include histogram metrics. +# etcd_metrics: basic + +## Etcd is restricted by default to 512M on systems under 4GB RAM, 512MB is not enough for much more than testing. +## Set this if your etcd nodes have less than 4GB but you want more RAM for etcd. Set to 0 for unrestricted RAM. +## This value is only relevant when deploying etcd with `etcd_deployment_type: docker` +etcd_memory_limit: "0" + +## Etcd has a default of 2G for its space quota. If you put a value in etcd_memory_limit which is less than +## etcd_quota_backend_bytes, you may encounter out of memory terminations of the etcd cluster. Please check +## etcd documentation for more information. +# 8G is a suggested maximum size for normal environments and etcd warns at startup if the configured value exceeds it. +# etcd_quota_backend_bytes: "2147483648" + +# Maximum client request size in bytes the server will accept. +# etcd is designed to handle small key value pairs typical for metadata. +# Larger requests will work, but may increase the latency of other requests +# etcd_max_request_bytes: "1572864" + +### ETCD: disable peer client cert authentication. +# This affects ETCD_PEER_CLIENT_CERT_AUTH variable +# etcd_peer_client_auth: true + +## Enable distributed tracing +## To enable this experimental feature, set the etcd_experimental_enable_distributed_tracing: true, along with the +## etcd_experimental_distributed_tracing_sample_rate to choose how many samples to collect per million spans, +## the default sampling rate is 0 https://etcd.io/docs/v3.5/op-guide/monitoring/#distributed-tracing +# etcd_experimental_enable_distributed_tracing: false +# etcd_experimental_distributed_tracing_sample_rate: 100 +# etcd_experimental_distributed_tracing_address: "localhost:4317" +# etcd_experimental_distributed_tracing_service_name: etcd diff --git a/openstack-enterprise/group_vars/k8s_cluster/addons.yml b/openstack-enterprise/group_vars/k8s_cluster/addons.yml new file mode 100644 index 000000000..f531406a3 --- /dev/null +++ b/openstack-enterprise/group_vars/k8s_cluster/addons.yml @@ -0,0 +1,261 @@ +--- +# Kubernetes dashboard +# RBAC required. see docs/getting-started.md for access details. +# dashboard_enabled: false + +# Helm deployment +helm_enabled: true + +# Registry deployment +registry_enabled: true +registry_port: 5010 #Don't overlap with keystone on port 5000 +registry_namespace: kube-system +# registry_storage_class: "" +registry_disk_size: "50Gi" + +# Metrics Server deployment +metrics_server_enabled: true +metrics_server_container_port: 10250 +# metrics_server_kubelet_insecure_tls: true +metrics_server_metric_resolution: 60s +metrics_server_kubelet_preferred_address_types: "InternalIP" +# metrics_server_host_network: false +# metrics_server_replicas: 1 + +# Rancher Local Path Provisioner +local_path_provisioner_enabled: true +local_path_provisioner_namespace: "local-path-storage" +local_path_provisioner_storage_class: "local-path" +local_path_provisioner_reclaim_policy: Delete +local_path_provisioner_claim_root: /var/lib/local-path-provisioner +# local_path_provisioner_debug: false +# local_path_provisioner_image_repo: "rancher/local-path-provisioner" +# local_path_provisioner_image_tag: "v0.0.24" +# local_path_provisioner_helper_image_repo: "busybox" +# local_path_provisioner_helper_image_tag: "latest" + +# Local volume provisioner deployment +local_volume_provisioner_enabled: false +# local_volume_provisioner_namespace: kube-system +# local_volume_provisioner_nodelabels: +# - kubernetes.io/hostname +# - topology.kubernetes.io/region +# - topology.kubernetes.io/zone +# local_volume_provisioner_storage_classes: +# local-storage: +# host_dir: /mnt/disks +# mount_dir: /mnt/disks +# volume_mode: Filesystem +# fs_type: ext4 +# fast-disks: +# host_dir: /mnt/fast-disks +# mount_dir: /mnt/fast-disks +# block_cleaner_command: +# - "/scripts/shred.sh" +# - "2" +# volume_mode: Filesystem +# fs_type: ext4 +# local_volume_provisioner_tolerations: +# - effect: NoSchedule +# operator: Exists + +# CSI Volume Snapshot Controller deployment, set this to true if your CSI is able to manage snapshots +# currently, setting cinder_csi_enabled=true would automatically enable the snapshot controller +# Longhorn is an extenal CSI that would also require setting this to true but it is not included in kubespray +# csi_snapshot_controller_enabled: false +# csi snapshot namespace +# snapshot_controller_namespace: kube-system + +# CephFS provisioner deployment +cephfs_provisioner_enabled: false +# cephfs_provisioner_namespace: "cephfs-provisioner" +# cephfs_provisioner_cluster: ceph +# cephfs_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# cephfs_provisioner_admin_id: admin +# cephfs_provisioner_secret: secret +# cephfs_provisioner_storage_class: cephfs +# cephfs_provisioner_reclaim_policy: Delete +# cephfs_provisioner_claim_root: /volumes +# cephfs_provisioner_deterministic_names: true + +# RBD provisioner deployment +rbd_provisioner_enabled: false +# rbd_provisioner_namespace: rbd-provisioner +# rbd_provisioner_replicas: 2 +# rbd_provisioner_monitors: "172.24.0.1:6789,172.24.0.2:6789,172.24.0.3:6789" +# rbd_provisioner_pool: kube +# rbd_provisioner_admin_id: admin +# rbd_provisioner_secret_name: ceph-secret-admin +# rbd_provisioner_secret: ceph-key-admin +# rbd_provisioner_user_id: kube +# rbd_provisioner_user_secret_name: ceph-secret-user +# rbd_provisioner_user_secret: ceph-key-user +# rbd_provisioner_user_secret_namespace: rbd-provisioner +# rbd_provisioner_fs_type: ext4 +# rbd_provisioner_image_format: "2" +# rbd_provisioner_image_features: layering +# rbd_provisioner_storage_class: rbd +# rbd_provisioner_reclaim_policy: Delete + +# Nginx ingress controller deployment +ingress_nginx_enabled: false +# ingress_nginx_host_network: false +ingress_publish_status_address: "" +# ingress_nginx_nodeselector: +# kubernetes.io/os: "linux" +# ingress_nginx_tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# ingress_nginx_namespace: "ingress-nginx" +# ingress_nginx_insecure_port: 80 +# ingress_nginx_secure_port: 443 +# ingress_nginx_configmap: +# map-hash-bucket-size: "128" +# ssl-protocols: "TLSv1.2 TLSv1.3" +# ingress_nginx_configmap_tcp_services: +# 9000: "default/example-go:8080" +# ingress_nginx_configmap_udp_services: +# 53: "kube-system/coredns:53" +# ingress_nginx_extra_args: +# - --default-ssl-certificate=default/foo-tls +# ingress_nginx_termination_grace_period_seconds: 300 +# ingress_nginx_class: nginx +# ingress_nginx_without_class: true +# ingress_nginx_default: false + +# ALB ingress controller deployment +ingress_alb_enabled: false +# alb_ingress_aws_region: "us-east-1" +# alb_ingress_restrict_scheme: "false" +# Enables logging on all outbound requests sent to the AWS API. +# If logging is desired, set to true. +# alb_ingress_aws_debug: "false" + +# Cert manager deployment +cert_manager_enabled: false +# cert_manager_namespace: "cert-manager" +# cert_manager_tolerations: +# - key: node-role.kubernetes.io/control-plane +# effect: NoSchedule +# cert_manager_affinity: +# nodeAffinity: +# preferredDuringSchedulingIgnoredDuringExecution: +# - weight: 100 +# preference: +# matchExpressions: +# - key: node-role.kubernetes.io/control-plane +# operator: In +# values: +# - "" +# cert_manager_nodeselector: +# kubernetes.io/os: "linux" + +# cert_manager_trusted_internal_ca: | +# -----BEGIN CERTIFICATE----- +# [REPLACE with your CA certificate] +# -----END CERTIFICATE----- +# cert_manager_leader_election_namespace: kube-system + +# cert_manager_dns_policy: "ClusterFirst" +# cert_manager_dns_config: +# nameservers: +# - "1.1.1.1" +# - "8.8.8.8" + +# cert_manager_controller_extra_args: +# - "--dns01-recursive-nameservers-only=true" +# - "--dns01-recursive-nameservers=1.1.1.1:53,8.8.8.8:53" + +# MetalLB deployment +metallb_enabled: false +metallb_speaker_enabled: "{{ metallb_enabled }}" +# metallb_version: v0.13.9 +# metallb_protocol: "layer2" +# metallb_port: "7472" +# metallb_memberlist_port: "7946" +# metallb_config: +# speaker: +# nodeselector: +# kubernetes.io/os: "linux" +# tollerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# controller: +# nodeselector: +# kubernetes.io/os: "linux" +# tolerations: +# - key: "node-role.kubernetes.io/control-plane" +# operator: "Equal" +# value: "" +# effect: "NoSchedule" +# address_pools: +# primary: +# ip_range: +# - 10.5.0.0/16 +# auto_assign: true +# pool1: +# ip_range: +# - 10.6.0.0/16 +# auto_assign: true +# pool2: +# ip_range: +# - 10.10.0.0/16 +# auto_assign: true +# layer2: +# - primary +# layer3: +# defaults: +# peer_port: 179 +# hold_time: 120s +# communities: +# vpn-only: "1234:1" +# NO_ADVERTISE: "65535:65282" +# metallb_peers: +# peer1: +# peer_address: 10.6.0.1 +# peer_asn: 64512 +# my_asn: 4200000000 +# communities: +# - vpn-only +# address_pool: +# - pool1 +# peer2: +# peer_address: 10.10.0.1 +# peer_asn: 64513 +# my_asn: 4200000000 +# communities: +# - NO_ADVERTISE +# address_pool: +# - pool2 + +argocd_enabled: false +# argocd_version: v2.8.4 +# argocd_namespace: argocd +# Default password: +# - https://argo-cd.readthedocs.io/en/stable/getting_started/#4-login-using-the-cli +# --- +# The initial password is autogenerated and stored in `argocd-initial-admin-secret` in the argocd namespace defined above. +# Using the argocd CLI the generated password can be automatically be fetched from the current kubectl context with the command: +# argocd admin initial-password -n argocd +# --- +# Use the following var to set admin password +# argocd_admin_password: "password" + +# The plugin manager for kubectl +krew_enabled: false +krew_root_dir: "/usr/local/krew" + +# Kube VIP +kube_vip_enabled: false +# kube_vip_arp_enabled: true +# kube_vip_controlplane_enabled: true +# kube_vip_address: 192.168.56.120 +# loadbalancer_apiserver: +# address: "{{ kube_vip_address }}" +# port: 6443 +# kube_vip_interface: eth0 +# kube_vip_services_enabled: false diff --git a/openstack-enterprise/group_vars/k8s_cluster/k8s-cluster.yml b/openstack-enterprise/group_vars/k8s_cluster/k8s-cluster.yml new file mode 100644 index 000000000..1b0069dde --- /dev/null +++ b/openstack-enterprise/group_vars/k8s_cluster/k8s-cluster.yml @@ -0,0 +1,373 @@ +--- +# Kubernetes configuration dirs and system namespace. +# Those are where all the additional config stuff goes +# the kubernetes normally puts in /srv/kubernetes. +# This puts them in a sane location and namespace. +# Editing those values will almost surely break something. +kube_config_dir: /etc/kubernetes +kube_script_dir: "{{ bin_dir }}/kubernetes-scripts" +kube_manifest_dir: "{{ kube_config_dir }}/manifests" + +# This is where all the cert scripts and certs will be located +kube_cert_dir: "{{ kube_config_dir }}/ssl" + +# This is where all of the bearer tokens will be stored +kube_token_dir: "{{ kube_config_dir }}/tokens" + +kube_api_anonymous_auth: true + +## Change this to use another Kubernetes version, e.g. a current beta release +kube_version: v1.26.10 + +# Where the binaries will be downloaded. +# Note: ensure that you've enough disk space (about 1G) +local_release_dir: "/tmp/releases" +# Random shifts for retrying failed ops like pushing/downloading +retry_stagger: 5 + +# This is the user that owns tha cluster installation. +kube_owner: kube + +# This is the group that the cert creation scripts chgrp the +# cert files to. Not really changeable... +kube_cert_group: kube-cert + +# Cluster Loglevel configuration +kube_log_level: 2 + +# Directory where credentials will be stored +credentials_dir: "{{ inventory_dir }}/credentials" + +## It is possible to activate / deactivate selected authentication methods (oidc, static token auth) +# kube_oidc_auth: false +# kube_token_auth: false + + +## Variables for OpenID Connect Configuration https://kubernetes.io/docs/admin/authentication/ +## To use OpenID you have to deploy additional an OpenID Provider (e.g Dex, Keycloak, ...) + +# kube_oidc_url: https:// ... +# kube_oidc_client_id: kubernetes +## Optional settings for OIDC +# kube_oidc_ca_file: "{{ kube_cert_dir }}/ca.pem" +# kube_oidc_username_claim: sub +# kube_oidc_username_prefix: 'oidc:' +# kube_oidc_groups_claim: groups +# kube_oidc_groups_prefix: 'oidc:' + +## Variables to control webhook authn/authz +# kube_webhook_token_auth: false +# kube_webhook_token_auth_url: https://... +# kube_webhook_token_auth_url_skip_tls_verify: false + +## For webhook authorization, authorization_modes must include Webhook +# kube_webhook_authorization: false +# kube_webhook_authorization_url: https://... +# kube_webhook_authorization_url_skip_tls_verify: false + +# Choose network plugin (cilium, calico, kube-ovn, weave or flannel. Use cni for generic cni plugin) +# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing +kube_network_plugin: calico + +# Setting multi_networking to true will install Multus: https://github.com/k8snetworkplumbingwg/multus-cni +kube_network_plugin_multus: false + +# Kubernetes internal network for services, unused block of space. +kube_service_addresses: 10.192.0.0/13 + +# internal network. When used, it will assign IP +# addresses from this range to individual pods. +# This network must be unused in your network infrastructure! +kube_pods_subnet: 10.200.0.0/13 + +# internal network node size allocation (optional). This is the size allocated +# to each node for pod IP address allocation. Note that the number of pods per node is +# also limited by the kubelet_max_pods variable which defaults to 110. +# +# Example: +# Up to 64 nodes and up to 254 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 24 +# - kubelet_max_pods: 110 +# +# Example: +# Up to 128 nodes and up to 126 or kubelet_max_pods (the lowest of the two) pods per node: +# - kube_pods_subnet: 10.233.64.0/18 +# - kube_network_node_prefix: 25 +# - kubelet_max_pods: 110 +kube_network_node_prefix: 24 + +# Configure Dual Stack networking (i.e. both IPv4 and IPv6) +enable_dual_stack_networks: false + +# Kubernetes internal network for IPv6 services, unused block of space. +# This is only used if enable_dual_stack_networks is set to true +# This provides 4096 IPv6 IPs +kube_service_addresses_ipv6: fd85:ee78:d8a6:8607::1000/116 + +# Internal network. When used, it will assign IPv6 addresses from this range to individual pods. +# This network must not already be in your network infrastructure! +# This is only used if enable_dual_stack_networks is set to true. +# This provides room for 256 nodes with 254 pods per node. +kube_pods_subnet_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# IPv6 subnet size allocated to each for pods. +# This is only used if enable_dual_stack_networks is set to true +# This provides room for 254 pods per node. +kube_network_node_prefix_ipv6: 120 + +# The port the API Server will be listening on. +kube_apiserver_ip: "{{ kube_service_addresses | ipaddr('net') | ipaddr(1) | ipaddr('address') }}" +kube_apiserver_port: 6443 # (https) + +# Kube-proxy proxyMode configuration. +# Can be ipvs, iptables +kube_proxy_mode: ipvs + +# configure arp_ignore and arp_announce to avoid answering ARP queries from kube-ipvs0 interface +# must be set to true for MetalLB, kube-vip(ARP enabled) to work +kube_proxy_strict_arp: false + +# A string slice of values which specify the addresses to use for NodePorts. +# Values may be valid IP blocks (e.g. 1.2.3.0/24, 1.2.3.4/32). +# The default empty string slice ([]) means to use all local addresses. +# kube_proxy_nodeport_addresses_cidr is retained for legacy config +kube_proxy_nodeport_addresses: >- + {%- if kube_proxy_nodeport_addresses_cidr is defined -%} + [{{ kube_proxy_nodeport_addresses_cidr }}] + {%- else -%} + [] + {%- endif -%} + +# If non-empty, will use this string as identification instead of the actual hostname +# kube_override_hostname: >- +# {%- if cloud_provider is defined and cloud_provider in ['aws'] -%} +# {%- else -%} +# {{ inventory_hostname }} +# {%- endif -%} + +## Encrypting Secret Data at Rest +kube_encrypt_secret_data: true + +# Graceful Node Shutdown (Kubernetes >= 1.21.0), see https://kubernetes.io/blog/2021/04/21/graceful-node-shutdown-beta/ +# kubelet_shutdown_grace_period had to be greater than kubelet_shutdown_grace_period_critical_pods to allow +# non-critical podsa to also terminate gracefully +# kubelet_shutdown_grace_period: 60s +# kubelet_shutdown_grace_period_critical_pods: 20s + +# DNS configuration. +# Kubernetes cluster name, also will be used as DNS domain +cluster_name: cluster.local +# Subdomains of DNS domain to be resolved via /etc/resolv.conf for hostnet pods +ndots: 2 +# dns_timeout: 2 +# dns_attempts: 2 +# Custom search domains to be added in addition to the default cluster search domains +# searchdomains: +# - svc.{{ cluster_name }} +# - default.svc.{{ cluster_name }} +# Remove default cluster search domains (``default.svc.{{ dns_domain }}, svc.{{ dns_domain }}``). +# remove_default_searchdomains: false +# Can be coredns, coredns_dual, manual or none +dns_mode: coredns +# Set manual server if using a custom cluster DNS server +# manual_dns_server: 10.x.x.x +# Enable nodelocal dns cache +enable_nodelocaldns: true +enable_nodelocaldns_secondary: false +nodelocaldns_ip: 169.254.25.10 +nodelocaldns_health_port: 9254 +nodelocaldns_second_health_port: 9256 +nodelocaldns_bind_metrics_host_ip: false +nodelocaldns_secondary_skew_seconds: 5 +# nodelocaldns_external_zones: +# - zones: +# - example.com +# - example.io:1053 +# nameservers: +# - 1.1.1.1 +# - 2.2.2.2 +# cache: 5 +# - zones: +# - https://mycompany.local:4453 +# nameservers: +# - 192.168.0.53 +# cache: 0 +# - zones: +# - mydomain.tld +# nameservers: +# - 10.233.0.3 +# cache: 5 +# rewrite: +# - name website.tld website.namespace.svc.cluster.local +# Enable k8s_external plugin for CoreDNS +enable_coredns_k8s_external: false +coredns_k8s_external_zone: k8s_external.local +# Enable endpoint_pod_names option for kubernetes plugin +enable_coredns_k8s_endpoint_pod_names: false +# Set forward options for upstream DNS servers in coredns (and nodelocaldns) config +# dns_upstream_forward_extra_opts: +# policy: sequential +# Apply extra options to coredns kubernetes plugin +# coredns_kubernetes_extra_opts: +# - 'fallthrough example.local' +# Forward extra domains to the coredns kubernetes plugin +# coredns_kubernetes_extra_domains: '' + +# Can be docker_dns, host_resolvconf or none +resolvconf_mode: host_resolvconf +# Deploy netchecker app to verify DNS resolve as an HTTP service +deploy_netchecker: false +# Ip address of the kubernetes skydns service +skydns_server: "{{ kube_service_addresses | ipaddr('net') | ipaddr(3) | ipaddr('address') }}" +skydns_server_secondary: "{{ kube_service_addresses | ipaddr('net') | ipaddr(4) | ipaddr('address') }}" +dns_domain: "{{ cluster_name }}" + +## Container runtime +## docker for docker, crio for cri-o and containerd for containerd. +## Default: containerd +container_manager: containerd + +# Additional container runtimes +kata_containers_enabled: false + +kubeadm_certificate_key: "{{ lookup('password', credentials_dir + '/kubeadm_certificate_key.creds length=64 chars=hexdigits') | lower }}" + +# K8s image pull policy (imagePullPolicy) +k8s_image_pull_policy: IfNotPresent + +# audit log for kubernetes +kubernetes_audit: false + +# define kubelet config dir for dynamic kubelet +# kubelet_config_dir: +default_kubelet_config_dir: "{{ kube_config_dir }}/dynamic_kubelet_dir" + +# Make a copy of kubeconfig on the host that runs Ansible in {{ inventory_dir }}/artifacts +# kubeconfig_localhost: false +# Use ansible_host as external api ip when copying over kubeconfig. +# kubeconfig_localhost_ansible_host: false +# Download kubectl onto the host that runs Ansible in {{ bin_dir }} +# kubectl_localhost: false + +# A comma separated list of levels of node allocatable enforcement to be enforced by kubelet. +# Acceptable options are 'pods', 'system-reserved', 'kube-reserved' and ''. Default is "". +# kubelet_enforce_node_allocatable: pods + +## Set runtime and kubelet cgroups when using systemd as cgroup driver (default) +# kubelet_runtime_cgroups: "/{{ kube_service_cgroups }}/{{ container_manager }}.service" +# kubelet_kubelet_cgroups: "/{{ kube_service_cgroups }}/kubelet.service" + +## Set runtime and kubelet cgroups when using cgroupfs as cgroup driver +# kubelet_runtime_cgroups_cgroupfs: "/system.slice/{{ container_manager }}.service" +# kubelet_kubelet_cgroups_cgroupfs: "/system.slice/kubelet.service" + +# Optionally reserve this space for kube daemons. +# kube_reserved: false +## Uncomment to override default values +## The following two items need to be set when kube_reserved is true +# kube_reserved_cgroups_for_service_slice: kube.slice +# kube_reserved_cgroups: "/{{ kube_reserved_cgroups_for_service_slice }}" +# kube_memory_reserved: 256Mi +# kube_cpu_reserved: 100m +# kube_ephemeral_storage_reserved: 2Gi +# kube_pid_reserved: "1000" +# Reservation for master hosts +# kube_master_memory_reserved: 512Mi +# kube_master_cpu_reserved: 200m +# kube_master_ephemeral_storage_reserved: 2Gi +# kube_master_pid_reserved: "1000" + +## Optionally reserve resources for OS system daemons. +# system_reserved: true +## Uncomment to override default values +## The following two items need to be set when system_reserved is true +# system_reserved_cgroups_for_service_slice: system.slice +# system_reserved_cgroups: "/{{ system_reserved_cgroups_for_service_slice }}" +# system_memory_reserved: 512Mi +# system_cpu_reserved: 500m +# system_ephemeral_storage_reserved: 2Gi +## Reservation for master hosts +# system_master_memory_reserved: 256Mi +# system_master_cpu_reserved: 250m +# system_master_ephemeral_storage_reserved: 2Gi + +## Eviction Thresholds to avoid system OOMs +# https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/#eviction-thresholds +# eviction_hard: {} +# eviction_hard_control_plane: {} + +# An alternative flexvolume plugin directory +# kubelet_flexvolumes_plugins_dir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec + +## Supplementary addresses that can be added in kubernetes ssl keys. +## That can be useful for example to setup a keepalived virtual IP +# supplementary_addresses_in_ssl_keys: [10.0.0.1, 10.0.0.2, 10.0.0.3] + +## Running on top of openstack vms with cinder enabled may lead to unschedulable pods due to NoVolumeZoneConflict restriction in kube-scheduler. +## See https://github.com/kubernetes-sigs/kubespray/issues/2141 +## Set this variable to true to get rid of this issue +volume_cross_zone_attachment: false +## Add Persistent Volumes Storage Class for corresponding cloud provider (supported: in-tree OpenStack, Cinder CSI, +## AWS EBS CSI, Azure Disk CSI, GCP Persistent Disk CSI) +persistent_volumes_enabled: false + +## Container Engine Acceleration +## Enable container acceleration feature, for example use gpu acceleration in containers +# nvidia_accelerator_enabled: true +## Nvidia GPU driver install. Install will by done by a (init) pod running as a daemonset. +## Important: if you use Ubuntu then you should set in all.yml 'docker_storage_options: -s overlay2' +## Array with nvida_gpu_nodes, leave empty or comment if you don't want to install drivers. +## Labels and taints won't be set to nodes if they are not in the array. +# nvidia_gpu_nodes: +# - kube-gpu-001 +# nvidia_driver_version: "384.111" +## flavor can be tesla or gtx +# nvidia_gpu_flavor: gtx +## NVIDIA driver installer images. Change them if you have trouble accessing gcr.io. +# nvidia_driver_install_centos_container: atzedevries/nvidia-centos-driver-installer:2 +# nvidia_driver_install_ubuntu_container: gcr.io/google-containers/ubuntu-nvidia-driver-installer@sha256:7df76a0f0a17294e86f691c81de6bbb7c04a1b4b3d4ea4e7e2cccdc42e1f6d63 +## NVIDIA GPU device plugin image. +# nvidia_gpu_device_plugin_container: "registry.k8s.io/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e" + +## Support tls min version, Possible values: VersionTLS10, VersionTLS11, VersionTLS12, VersionTLS13. +# tls_min_version: "" + +## Support tls cipher suites. +# tls_cipher_suites: {} +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_ECDSA_WITH_RC4_128_SHA +# - TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA +# - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 +# - TLS_ECDHE_RSA_WITH_RC4_128_SHA +# - TLS_RSA_WITH_3DES_EDE_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA +# - TLS_RSA_WITH_AES_128_CBC_SHA256 +# - TLS_RSA_WITH_AES_128_GCM_SHA256 +# - TLS_RSA_WITH_AES_256_CBC_SHA +# - TLS_RSA_WITH_AES_256_GCM_SHA384 +# - TLS_RSA_WITH_RC4_128_SHA + +## Amount of time to retain events. (default 1h0m0s) +event_ttl_duration: "1h0m0s" + +## Automatically renew K8S control plane certificates on first Monday of each month +auto_renew_certificates: false +# First Monday of each month +# auto_renew_certificates_systemd_calendar: "Mon *-*-1,2,3,4,5,6,7 03:{{ groups['kube_control_plane'].index(inventory_hostname) }}0:00" + +# kubeadm patches path +kubeadm_patches: + enabled: false + source_dir: "{{ inventory_dir }}/patches" + dest_dir: "{{ kube_config_dir }}/patches" diff --git a/openstack-enterprise/group_vars/k8s_cluster/k8s-net-calico.yml b/openstack-enterprise/group_vars/k8s_cluster/k8s-net-calico.yml new file mode 100644 index 000000000..cc0499d00 --- /dev/null +++ b/openstack-enterprise/group_vars/k8s_cluster/k8s-net-calico.yml @@ -0,0 +1,131 @@ +--- +# see roles/network_plugin/calico/defaults/main.yml + +# the default value of name +calico_cni_name: k8s-pod-network + +## With calico it is possible to distributed routes with border routers of the datacenter. +## Warning : enabling router peering will disable calico's default behavior ('node mesh'). +## The subnets of each nodes will be distributed by the datacenter router +# peer_with_router: false + +# Enables Internet connectivity from containers +# nat_outgoing: true + +# Enables Calico CNI "host-local" IPAM plugin +# calico_ipam_host_local: true + +# add default ippool name +# calico_pool_name: "default-pool" + +# add default ippool blockSize (defaults kube_network_node_prefix) +calico_pool_blocksize: 26 + +# add default ippool CIDR (must be inside kube_pods_subnet, defaults to kube_pods_subnet otherwise) +# calico_pool_cidr: 1.2.3.4/5 + +# add default ippool CIDR to CNI config +# calico_cni_pool: true + +# Add default IPV6 IPPool CIDR. Must be inside kube_pods_subnet_ipv6. Defaults to kube_pods_subnet_ipv6 if not set. +# calico_pool_cidr_ipv6: fd85:ee78:d8a6:8607::1:0000/112 + +# Add default IPV6 IPPool CIDR to CNI config +# calico_cni_pool_ipv6: true + +# Global as_num (/calico/bgp/v1/global/as_num) +# global_as_num: "64512" + +# If doing peering with node-assigned asn where the globas does not match your nodes, you want this +# to be true. All other cases, false. +# calico_no_global_as_num: false + +# You can set MTU value here. If left undefined or empty, it will +# not be specified in calico CNI config, so Calico will use built-in +# defaults. The value should be a number, not a string. +# calico_mtu: 1500 + +# Configure the MTU to use for workload interfaces and tunnels. +# - If Wireguard is enabled, subtract 60 from your network MTU (i.e 1500-60=1440) +# - Otherwise, if VXLAN or BPF mode is enabled, subtract 50 from your network MTU (i.e. 1500-50=1450) +# - Otherwise, if IPIP is enabled, subtract 20 from your network MTU (i.e. 1500-20=1480) +# - Otherwise, if not using any encapsulation, set to your network MTU (i.e. 1500) +# calico_veth_mtu: 1440 + +# Advertise Cluster IPs +# calico_advertise_cluster_ips: true + +# Advertise Service External IPs +# calico_advertise_service_external_ips: +# - x.x.x.x/24 +# - y.y.y.y/32 + +# Advertise Service LoadBalancer IPs +# calico_advertise_service_loadbalancer_ips: +# - x.x.x.x/24 +# - y.y.y.y/16 + +# Choose data store type for calico: "etcd" or "kdd" (kubernetes datastore) +# calico_datastore: "kdd" + +# Choose Calico iptables backend: "Legacy", "Auto" or "NFT" +# calico_iptables_backend: "Auto" + +# Use typha (only with kdd) +# typha_enabled: false + +# Generate TLS certs for secure typha<->calico-node communication +# typha_secure: false + +# Scaling typha: 1 replica per 100 nodes is adequate +# Number of typha replicas +# typha_replicas: 1 + +# Set max typha connections +# typha_max_connections_lower_limit: 300 + +# Set calico network backend: "bird", "vxlan" or "none" +# bird enable BGP routing, required for ipip and no encapsulation modes +# calico_network_backend: vxlan + +# IP in IP and VXLAN is mutualy exclusive modes. +# set IP in IP encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_ipip_mode: 'Never' + +# set VXLAN encapsulation mode: "Always", "CrossSubnet", "Never" +# calico_vxlan_mode: 'Always' + +# set VXLAN port and VNI +# calico_vxlan_vni: 4096 +# calico_vxlan_port: 4789 + +# Enable eBPF mode +# calico_bpf_enabled: false + +# If you want to use non default IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD for calico node set this option to one of: +# * can-reach=DESTINATION +# * interface=INTERFACE-REGEX +# see https://docs.projectcalico.org/reference/node/configuration +# calico_ip_auto_method: "interface=eth.*" +# calico_ip6_auto_method: "interface=eth.*" + +# Set FELIX_MTUIFACEPATTERN, Pattern used to discover the host’s interface for MTU auto-detection. +# see https://projectcalico.docs.tigera.io/reference/felix/configuration +# calico_felix_mtu_iface_pattern: "^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)" + +# Choose the iptables insert mode for Calico: "Insert" or "Append". +# calico_felix_chaininsertmode: Insert + +# If you want use the default route interface when you use multiple interface with dynamique route (iproute2) +# see https://docs.projectcalico.org/reference/node/configuration : FELIX_DEVICEROUTESOURCEADDRESS +# calico_use_default_route_src_ipaddr: false + +# Enable calico traffic encryption with wireguard +# calico_wireguard_enabled: false + +# Under certain situations liveness and readiness probes may need tunning +# calico_node_livenessprobe_timeout: 10 +# calico_node_readinessprobe_timeout: 10 + +# Calico apiserver (only with kdd) +# calico_apiserver_enabled: false diff --git a/openstack-flex/group_vars/all/environment.yml b/openstack-flex/group_vars/all/environment.yml new file mode 100644 index 000000000..858f2b82f --- /dev/null +++ b/openstack-flex/group_vars/all/environment.yml @@ -0,0 +1,12 @@ +--- + +no_proxy_env: "localhost,127.0.0.1" +http_proxy_env: "{{ lookup('env', 'http_proxy') }}" +https_proxy_env: "{{ lookup('env', 'https_proxy') }}" +global_environment_variables: + HTTP_PROXY: "{{ http_proxy_env }}" + HTTPS_PROXY: "{{ https_proxy_env }}" + http_proxy: "{{ http_proxy_env }}" + https_proxy: "{{ https_proxy_env }}" + NO_PROXY: "{{ no_proxy_env }}" + no_proxy: "{{ no_proxy_env }}" diff --git a/playbooks/filter_plugins/genestack-filter.py b/playbooks/filter_plugins/genestack-filter.py new file mode 100644 index 000000000..944e3152e --- /dev/null +++ b/playbooks/filter_plugins/genestack-filter.py @@ -0,0 +1,17 @@ +def bit_length_power_of_2(value): + """Return the smallest power of 2 greater than a numeric value. + :param value: Number to find the smallest power of 2 + :type value: ``int`` + :returns: ``int`` + """ + return 2**(int(value) - 1).bit_length() + + +class FilterModule(object): + """Ansible jinja2 filters.""" + + @staticmethod + def filters(): + return { + 'bit_length_power_of_2': bit_length_power_of_2 + } diff --git a/playbooks/host-setup.yml b/playbooks/host-setup.yml new file mode 100644 index 000000000..2d58184d7 --- /dev/null +++ b/playbooks/host-setup.yml @@ -0,0 +1,36 @@ +--- +# Copyright 2024-Present, Rackspace Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- hosts: localhost + connection: local + user: root + gather_facts: "{{ gather_facts | default(true) }}" + environment: "{{ deployment_environment_variables | default({}) }}" + tasks: + - name: Install helm + community.general.snap: + name: + - helm + classic: true + when: + - ansible_os_family == 'Debian' + +- hosts: all + user: root + gather_facts: "{{ gather_facts | default(true) }}" + environment: "{{ deployment_environment_variables | default({}) }}" + roles: + - host_setup + diff --git a/playbooks/inventory-example.yaml b/playbooks/inventory-example.yaml deleted file mode 120000 index 19cda957e..000000000 --- a/playbooks/inventory-example.yaml +++ /dev/null @@ -1 +0,0 @@ -../openstack-flex/inventory-example.yaml \ No newline at end of file diff --git a/playbooks/inventory/flex-example.yaml b/playbooks/inventory/flex-example.yaml new file mode 120000 index 000000000..6beacb857 --- /dev/null +++ b/playbooks/inventory/flex-example.yaml @@ -0,0 +1 @@ +../../openstack-flex/inventory-example.yaml \ No newline at end of file diff --git a/playbooks/roles b/playbooks/roles new file mode 120000 index 000000000..d8c4472ca --- /dev/null +++ b/playbooks/roles @@ -0,0 +1 @@ +../roles \ No newline at end of file diff --git a/releasenotes/notes/initial-release-24eec8a334a2746f.yaml b/releasenotes/notes/initial-release-24eec8a334a2746f.yaml new file mode 100644 index 000000000..1cfe44c6e --- /dev/null +++ b/releasenotes/notes/initial-release-24eec8a334a2746f.yaml @@ -0,0 +1,2 @@ +prelude: > + Initial release of a genestack installer for Rackspace OpenStack evnrionements diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..fc21a9226 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,9 @@ +ansible>=6.7.0,<=8.5.0 +cryptography==41.0.4 +jinja2==3.1.2 +jmespath==1.0.1 +MarkupSafe==2.1.3 +netaddr==0.9.0 +pbr==5.11.1 +ruamel.yaml==0.17.35 +ruamel.yaml.clib==0.2.8 diff --git a/roles/host_setup/defaults/main.yml b/roles/host_setup/defaults/main.yml new file mode 100644 index 000000000..d226736b3 --- /dev/null +++ b/roles/host_setup/defaults/main.yml @@ -0,0 +1,97 @@ +--- + +ca_bundle_path: /etc/ssl/certs/ca-certificates.crt + +host_sysstat_enabled: true +host_sysstat_interval: 1 +host_sysstat_statistics_hour: 23 +host_sysstat_cron_mode: '0755' + +# Set the package install state for distribution packages +# Options are 'present' and 'latest' +host_package_state: "{{ package_state | default('present') }}" +host_distro_packages: "{{ _host_distro_packages }}" + +# Overridable package list is composed of the old override +# named user_package_list and the standard defaults _hosts_package_list +host_package_list: "{{ _hosts_package_list + (user_package_list | default([])) }}" + +# User defined list of extra packages to install on all hosts +host_extra_distro_packages: [] + +# Controls the shell search PATH environment variable dropped in +# /etc/environment +host_environment_path: + - /usr/local/sbin + - /usr/local/bin + - /usr/sbin + - /usr/bin + - /sbin + - /bin + +# Allows the ability to override or add extra parameters to the systemd global config +# that will be applied by default to all units +systemd_global_overrides: {} + +# The following garbage collection values are set to better support lots of neutron networks/routers. +# Used for setting the net.ipv4/6.neigh.default.gc_thresh* values. This assumes that facts were +# gathered to obtain the total amount of memory available on a given host. If no facts are gathered +# the default set will be 1024 unless its defined by the user. +gc_val: "{{ ansible_facts['memtotal_mb'] | default(1024) | bit_length_power_of_2 }}" +# The ste value has a Max allowable value of 8192 unless set by the user. +set_gc_val: "{{ gc_val if (gc_val | int <= 8192) else 8192 }}" + +# Set the level of reverse path filtering to use +host_rp_filter_all: 0 +host_rp_filter_default: 0 + +# Set the maximum size of the connection tracking table. +host_nf_conntrack_max: 262144 + +# System control kernel tuning +kernel_options: + - { key: 'fs.inotify.max_user_watches', value: 1048576 } + - { key: 'net.ipv4.conf.all.rp_filter', value: "{{ host_rp_filter_all }}" } + - { key: 'net.ipv4.conf.default.rp_filter', value: "{{ host_rp_filter_default }}" } + - { key: 'net.ipv4.ip_forward', value: 1 } + - { key: 'net.netfilter.nf_conntrack_max', value: "{{ host_nf_conntrack_max }}" } + - { key: 'net.core.netdev_max_backlog', value: 4096 } + - { key: 'net.core.somaxconn', value: 8192 } + - { key: 'vm.dirty_background_ratio', value: 5 } + - { key: 'vm.dirty_ratio', value: 10 } + - { key: 'vm.swappiness', value: 5 } + - { key: 'net.bridge.bridge-nf-call-ip6tables', value: 1 } + - { key: 'net.bridge.bridge-nf-call-iptables', value: 1 } + - { key: 'net.bridge.bridge-nf-call-arptables', value: 1 } + - { key: 'net.ipv4.igmp_max_memberships', value: 1024 } + - { key: 'net.ipv4.neigh.default.gc_thresh1', value: "{{ set_gc_val | int // 2 }}" } + - { key: 'net.ipv4.neigh.default.gc_thresh2', value: "{{ set_gc_val | int }}" } + - { key: 'net.ipv4.neigh.default.gc_thresh3', value: "{{ set_gc_val | int * 2 }}" } + - { key: 'net.ipv4.route.gc_thresh', value: "{{ set_gc_val | int * 2 }}" } + - { key: 'net.ipv4.neigh.default.gc_interval', value: 60 } + - { key: 'net.ipv4.neigh.default.gc_stale_time', value: 120 } + - { key: 'net.ipv6.neigh.default.gc_thresh1', value: "{{ set_gc_val | int // 2 }}" } + - { key: 'net.ipv6.neigh.default.gc_thresh2', value: "{{ set_gc_val | int }}" } + - { key: 'net.ipv6.neigh.default.gc_thresh3', value: "{{ set_gc_val | int * 2 }}" } + - { key: 'net.ipv6.route.gc_thresh', value: "{{ set_gc_val | int * 2 }}" } + - { key: 'net.ipv6.neigh.default.gc_interval', value: 60 } + - { key: 'net.ipv6.neigh.default.gc_stale_time', value: 120 } + - { key: 'net.ipv6.conf.lo.disable_ipv6', value: 0 } + - { key: 'fs.aio-max-nr', value: 131072 } + + +## kernel modules for specific group hosts +host_specific_kernel_modules: [] +# If you want to include some specific modules per group +# of hosts, override this with a group/host var, like below: +# host_specific_kernel_modules: +# - name: "ebtables" +# pattern: "CONFIG_BRIDGE_NF_EBTABLES" +## Where: +## :param name: name of the kernel module +## :param pattern: pattern to grep for in /boot/config-$kernel_version to check how module is configured inside kernel +## Our default overrides will be combined with your overrides. + +# Optional user defined list of sysctl options in the same dict item format as +# above. +user_kernel_options: [] diff --git a/roles/host_setup/handlers/main.yml b/roles/host_setup/handlers/main.yml new file mode 100644 index 000000000..3163e69e0 --- /dev/null +++ b/roles/host_setup/handlers/main.yml @@ -0,0 +1,34 @@ +--- +# Copyright 2024, Rackspace Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Restart sysstat + service: + name: "sysstat" + state: "restarted" + enabled: "yes" + +- name: Restart systemd-journald + service: + name: systemd-journald + state: restarted + enabled: yes + register: _restart + until: _restart is success + retries: 5 + delay: 2 + +- name: Systemd daemon reload + systemd: + daemon_reload: yes diff --git a/roles/host_setup/tasks/configure_hosts.yml b/roles/host_setup/tasks/configure_hosts.yml new file mode 100644 index 000000000..cb454b63c --- /dev/null +++ b/roles/host_setup/tasks/configure_hosts.yml @@ -0,0 +1,67 @@ +--- +# Copyright 2024, Rackspace Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Check Kernel Version + fail: + msg: > + Wrong kernel Version found + [ {{ ansible_facts['kernel'] }} < {{ host_required_kernel }} ] + Resolve this issue before continuing. + when: + - ansible_facts['kernel'] is version(host_required_kernel, '<') + +- name: Check how kernel modules are implemented (statically builtin, dynamic, not set) + slurp: + src: "/boot/config-{{ ansible_facts['kernel'] }}" + register: modules + when: + - host_specific_kernel_modules | length > 0 + +- name: Fail fast if we can't load a module + fail: + msg: "{{ item.pattern }} is not set" + with_items: "{{ host_specific_kernel_modules }}" + when: + - item.pattern is defined + - (modules.content | b64decode).find(item.pattern + ' is not set') != -1 + +- name: "Load kernel module(s)" + modprobe: + name: "{{ item.name }}" + with_items: "{{ host_kernel_modules + host_specific_kernel_modules }}" + when: + - item.name | length > 0 + - item.pattern is undefined or (item.pattern is defined and (modules.content | b64decode).find(item.pattern + '=m') != -1) + +- name: Write list of modules to load at boot + template: + src: modprobe.conf.j2 + dest: "{{ host_module_file }}" + mode: "0644" + +- name: Adding new system tuning + sysctl: + name: "{{ item.key }}" + value: "{{ item.value }}" + sysctl_set: "{{ item.set | default('yes') }}" + state: "{{ item.state | default('present') }}" + reload: no + with_items: "{{ kernel_options + user_kernel_options }}" + failed_when: false + +- name: Configure sysstat + include_tasks: sysstat.yml + when: + - host_sysstat_enabled | bool diff --git a/roles/host_setup/tasks/main.yml b/roles/host_setup/tasks/main.yml new file mode 100644 index 000000000..2e5f16a03 --- /dev/null +++ b/roles/host_setup/tasks/main.yml @@ -0,0 +1,112 @@ +--- +# Copyright 2024, Rackspace Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Gather variables for each operating system + include_vars: "{{ lookup('first_found', params) }}" + vars: + params: + files: + - "{{ ansible_facts['distribution'] | lower }}-{{ ansible_facts['distribution_version'] | lower }}.yml" + - "{{ ansible_facts['distribution'] | lower }}-{{ ansible_facts['distribution_major_version'] | lower }}.yml" + - "{{ ansible_facts['os_family'] | lower }}-{{ ansible_facts['distribution_major_version'] | lower }}.yml" + - "{{ ansible_facts['distribution'] | lower }}.yml" + - "{{ ansible_facts['os_family'] | lower }}.yml" + paths: + - "{{ role_path }}/vars" + tags: + - always + +# Environment configuration applies to all nodes +- name: Add global_environment_variables to environment file + blockinfile: + dest: "/etc/environment" + state: present + marker: "# {mark} Managed by genestack" + insertbefore: EOF + block: "{{ lookup('template', 'environment.j2') }}" + tags: + - hosts-config + +- name: Ensure environement is applied during sudo + lineinfile: + path: /etc/pam.d/sudo + line: "session required pam_env.so readenv=1 user_readenv=0" + regexp: 'session\s+required\s+pam_env\.so' + insertbefore: '^@include' + when: ansible_facts['distribution'] | lower == 'debian' + +- name: Create systemd global directory + file: + path: /etc/systemd/system.conf.d/ + state: directory + owner: "root" + group: "root" + mode: "0755" + +- name: Add DefaultEnvironment to systemd + openstack.config_template.config_template: + src: systemd-environment.j2 + dest: /etc/systemd/system.conf.d/genestack-default-environment.conf + owner: "root" + group: "root" + mode: "0644" + config_overrides: "{{ systemd_global_overrides }}" + config_type: ini + notify: Systemd daemon reload + when: systemd_global_overrides is defined + +- name: Remove the blacklisted packages + package: + name: "{{ host_package_list | selectattr('state', 'equalto', 'absent') | map(attribute='name') | list }}" + state: absent + +# Configure bare metal nodes: Kernel, sysctl, sysstat, hosts files packages +- name: Including configure_hosts tasks + include_tasks: configure_hosts.yml + args: + apply: + tags: + - hosts-install + tags: + - always + +- name: Update package cache + apt: + update_cache: true + cache_valid_time: 600 + when: ansible_facts['os_family'] | lower == 'debian' + +- name: Install distro packages + package: + name: "{{ host_distro_packages }}" + state: "{{ host_package_state }}" + when: + - host_distro_packages | length > 0 + register: install_packages + until: install_packages is success + retries: 5 + delay: 2 + +- name: Install user defined extra distro packages + package: + name: "{{ host_extra_distro_packages }}" + state: "{{ host_package_state }}" + when: + - host_extra_distro_packages | length > 0 + register: install_packages + until: install_packages is success + retries: 5 + delay: 2 + diff --git a/roles/host_setup/tasks/sysstat.yml b/roles/host_setup/tasks/sysstat.yml new file mode 100644 index 000000000..720c2da55 --- /dev/null +++ b/roles/host_setup/tasks/sysstat.yml @@ -0,0 +1,38 @@ +--- +# Copyright 2024, Rackspace Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +- name: Enable sysstat config + template: + src: "sysstat.default.j2" + dest: "{{ host_sysstat_file }}" + mode: "0644" + when: + - ansible_facts['pkg_mgr'] == 'apt' + notify: Restart sysstat + +- name: Enable sysstat cron + template: + src: "{{ host_cron_template }}" + dest: "{{ host_sysstat_cron_file }}" + mode: "{{ host_sysstat_cron_mode }}" + setype: "{{ (ansible_facts['selinux']['status'] == 'enabled') | ternary('system_cron_spool_t', omit) }}" + +- name: Start and enable the sysstat service + service: + name: sysstat + state: started + enabled: yes + when: + - ansible_facts['pkg_mgr'] == 'dnf' diff --git a/roles/host_setup/templates/environment.j2 b/roles/host_setup/templates/environment.j2 new file mode 100644 index 000000000..3aab5a6a3 --- /dev/null +++ b/roles/host_setup/templates/environment.j2 @@ -0,0 +1,7 @@ +PATH="{{ host_environment_path | join(':') }}" +REQUESTS_CA_BUNDLE="{{ ca_bundle_path }}" +{% for key, value in global_environment_variables.items() %} +{% if value %} +{{ key }}={{ value }} +{% endif %} +{% endfor %} diff --git a/roles/host_setup/templates/modprobe.conf.j2 b/roles/host_setup/templates/modprobe.conf.j2 new file mode 100644 index 000000000..c719b4d39 --- /dev/null +++ b/roles/host_setup/templates/modprobe.conf.j2 @@ -0,0 +1,5 @@ +# {{ ansible_managed }} +# Modules from the genestack host_setup role +{% for module in host_kernel_modules + host_specific_kernel_modules %} +{{ module.name }} +{% endfor %} diff --git a/roles/host_setup/templates/sysstat.cron.debian.j2 b/roles/host_setup/templates/sysstat.cron.debian.j2 new file mode 100644 index 000000000..db046ebf2 --- /dev/null +++ b/roles/host_setup/templates/sysstat.cron.debian.j2 @@ -0,0 +1,10 @@ +# {{ ansible_managed }} + +# The first element of the path is a directory where the debian-sa1 script is located +PATH=/usr/lib/sysstat:/usr/sbin:/usr/sbin:/usr/bin:/sbin:/bin + +# Activity reports every 10 minutes everyday +*/{{ host_sysstat_interval }} * * * * root command -v debian-sa1 > /dev/null && debian-sa1 1 1 + +# Additional run at 23:59 to rotate the statistics file +59 {{ host_sysstat_statistics_hour }} * * * root command -v debian-sa1 > /dev/null && debian-sa1 60 2 diff --git a/roles/host_setup/templates/sysstat.default.j2 b/roles/host_setup/templates/sysstat.default.j2 new file mode 100644 index 000000000..ae9d42805 --- /dev/null +++ b/roles/host_setup/templates/sysstat.default.j2 @@ -0,0 +1,11 @@ +# {{ ansible_managed }} + +# +# Default settings for /etc/init.d/sysstat, /etc/cron.d/sysstat +# and /etc/cron.daily/sysstat files +# + +# Should sadc collect system activity informations? Valid values +# are "true" and "false". Please do not put other values, they +# will be overwritten by debconf! +ENABLED="{{ host_sysstat_enabled | bool | lower }}" diff --git a/roles/host_setup/templates/systemd-environment.j2 b/roles/host_setup/templates/systemd-environment.j2 new file mode 100644 index 000000000..4575cd60b --- /dev/null +++ b/roles/host_setup/templates/systemd-environment.j2 @@ -0,0 +1,4 @@ +# {{ ansible_managed }} + +[Manager] +DefaultEnvironment=REQUESTS_CA_BUNDLE={{ ca_bundle_path }} diff --git a/roles/host_setup/vars/ubuntu.yml b/roles/host_setup/vars/ubuntu.yml new file mode 100644 index 000000000..df3b0715c --- /dev/null +++ b/roles/host_setup/vars/ubuntu.yml @@ -0,0 +1,63 @@ +--- + +## Defined required kernel +host_required_kernel: 5.4.0-0-generic +host_sysstat_file: /etc/default/sysstat +host_sysstat_cron_file: /etc/cron.d/sysstat +host_cron_template: sysstat.cron.debian.j2 +host_module_file: /etc/modules + +## Kernel modules loaded on hosts +host_kernel_modules: + - name: 8021q + - name: br_netfilter + - name: dm_multipath + - name: dm_snapshot + - name: ebtables + - name: ip6table_filter + - name: ip6_tables + - name: ip_tables + - name: xt_MASQUERADE + - name: ipt_REJECT + - name: iptable_filter + - name: iptable_mangle + - name: iptable_nat + - name: ip_vs + - name: iscsi_tcp + - name: nbd + - name: nf_conntrack + - name: nf_defrag_ipv4 + - name: nf_nat + - name: vhost_net + - name: x_tables + +## Bare metal base packages +_host_distro_packages: + - acl + - apt-utils + - apparmor-utils + - apt-transport-https + - bridge-utils + - cgroup-lite + - curl + - dmeventd + - dstat + - ebtables + - htop + - iptables + - irqbalance + - libkmod2 + - lvm2 + - rsync + - software-properties-common + - sysstat + - time + - vlan + - wget + +_hosts_package_list: + - name: ubuntu-cloud-keyring + state: "{{ host_package_state }}" + - name: ca-certificates + state: latest + diff --git a/scripts/convert_osa_inventory.py b/scripts/convert_osa_inventory.py new file mode 100755 index 000000000..435ab8129 --- /dev/null +++ b/scripts/convert_osa_inventory.py @@ -0,0 +1,198 @@ +#!/usr/bin/python3 + +# Copyright 2019-Present, Rackspace Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# (C) 2019, Bjoern Teipel +# + +from ruamel.yaml import YAML +from pprint import pprint + +import argparse +import copy +import json +import logging +import parser +import sys + +logger = logging.getLogger('convert_osa_inventory') + +inventory_skel = { 'all': { + 'hosts': {}, + 'children' : {}, + 'vars': {}, + } + } + +def parse_args(args): + parser = argparse.ArgumentParser( + usage='%(prog)s', + description='OSA Inventory Converter to k8s', + epilog='Generator Licensed "Apache 2.0"') + + parser.add_argument( + '-d', + '--debug', + help=('Append output debug messages to log file.'), + action='store_true', + default=False, + ) + + parser.add_argument( + '--hosts_file', + help=('Defines the output file for the k8s hosts inventory'), + nargs='?', + default='/etc/genestack/inventory/hosts.yml', + ) + + parser.add_argument( + '--ansible_user', + help=('Set user for ansible logins with implies use of become directive'), + nargs='?', + default='root', + ) + + return vars(parser.parse_args(args)) + +def load_osa_inventory(file='/etc/openstack_deploy/openstack_inventory.json'): + try: + with open(file) as fp: + inventory = json.load(fp) + except Exception as ex: + logger.debug('load_inventory: %s',ex) + raise SystemExit + + return inventory + +def hosts_from_group(inventory=dict(), groups=list()): + hosts = list() + + try: + for group in groups: + for host in inventory[group]['hosts']: + if host not in hosts: + hosts.append(host) + except KeyError: + pass + finally: + return(hosts) + + +def main(debug=False, **kwargs): + """Run the main application. + :param debug: ``bool`` enables debug logging + :param kwargs: ``dict`` for passing command line arguments + """ + + if debug: + log_fmt = "%(lineno)d - %(funcName)s: %(message)s" + logging.basicConfig(format=log_fmt, filename='convert_osa_inventory.log') + logger.setLevel(logging.DEBUG) + + osa_inv = load_osa_inventory() + k8s_inv = copy.deepcopy(inventory_skel) + yaml_inv = YAML() + + """ Determine basic hosts groups to build new inventory + and filter controller hosts from the nova_compute group + in situations where ironic compute services are deployed + """ + controller_nodes = hosts_from_group(osa_inv, ['os-infra_hosts']) + #worker_nodes = [ h for h in hosts_from_group(osa_inv, ['nova_compute', 'storage_hosts', 'osds', 'mon']) + worker_nodes = [ h for h in hosts_from_group(osa_inv, ['nova_compute', 'storage_hosts']) + if h not in hosts_from_group(osa_inv, ['os-infra_hosts']) ] + + if (len(controller_nodes) < 3 or len(worker_nodes) < 1): + logger.debug('No controller_nodes/worker_nodes %d / %d',controller_nodes, worker_nodes) + raise SystemExit('Insufficient controller (os-infra_hosts) and ' + 'worker hosts (compute etc) defined in OSA Inventory') + + logger.debug('Controller Nodes: %s', controller_nodes) + logger.debug('Worker Nodes: %s', worker_nodes) + + """ Constructing all group + """ + for host in controller_nodes + worker_nodes: + try: + ansible_host = osa_inv['_meta']['hostvars'][host]['ansible_host'] + except KeyError: + ansible_host = osa_inv['_meta']['hostvars'][host]['ansible_ssh_host'] + + become_user = kwargs['ansible_user'] + + k8s_inv['all']['hosts'][host] = { 'ansible_host': ansible_host, + 'access_ip': ansible_host, + 'become_user': become_user } + if become_user != "root": + k8s_inv['all']['hosts'][host]['ansible_become'] = 'yes' + + logger.debug('Adding host (group all): %s %s', host, k8s_inv['all']['hosts'][host]) + + """ Constructing children + """ + k8s_inv['all']['children'] = { 'kube-master': { + 'hosts': {} + }, + 'kube-node': { + 'hosts': {} + }, + 'etcd': { + 'hosts': {} + }, + 'openstack_control_plane': { + 'hosts': {} + }, + 'openstack-compute-node': { + 'hosts': {} + }, + 'k8s-cluster': { + 'children': { + 'kube-master': {}, + 'kube-node': {}, + 'openstack_control_plane': {}, + 'openstack-compute-node': {}, + } + }, + 'calico-rr': { + 'hosts': {} + }, + } + + for group in ['kube-master', 'etcd', 'openstack_control_plane']: + for host in controller_nodes: + logger.debug('Adding host (group %s): %s', group, host) + k8s_inv['all']['children'][group]['hosts'][host] = {} + + for group in ['kube-node','openstack-compute-node']: + for host in worker_nodes: + logger.debug('Adding host (group %s): %s', group, host) + k8s_inv['all']['children'][group]['hosts'][host] = {} + + """ Dump dictionary to a human readable inventory + to stdout. This can be used to create the inventory file + """ + try: + with open(args['hosts_file'], 'w') as hosts_yaml: + yaml_inv.dump(k8s_inv, hosts_yaml) + + logger.info('Inventory written to: %s', k8s_inv) + hosts_yaml.close() + except Exception as ex: + logger.error('Could not dump YAML to file: %s', args['hosts_file']) + raise SystemExit + +if __name__ == "__main__": + args = parse_args(sys.argv[1:]) + main(**args) diff --git a/scripts/genestack.rc b/scripts/genestack.rc new file mode 100644 index 000000000..738ae3021 --- /dev/null +++ b/scripts/genestack.rc @@ -0,0 +1,26 @@ + +# Automation variables +export GENESTACK_CONFIG="${GENESTACK_CONFIG:-/etc/genestack}" +export SKIP_PROMPTS="${SKIP_PROMPTS:-false}" +export ANSIBLE_FORKS="${ANSIBLE_FORKS:-24}" +export ANSIBLE_FILTER_PLUGINS="${ANSIBLE_FILTER_PLUGINS:-/opt/genestack/playbooks/filter_plugins}" +export ANSIBLE_COLLECTION_FILE=${ANSIBLE_COLLECTION_FILE:-"ansible-collection-requirements.yml"} +export ANSIBLE_INVENTORY="${ANSIBLE_INVENTORY:-${GENESTACK_CONFIG}/inventory}" +export ANSIBLE_INVENTORY_IGNORE_REGEX="${ANSIBLE_INVENTORY_IGNORE_REGEX:-'artifacts,credentials'}" +export USER_COLLECTION_FILE=${USER_COLLECTION_FILE:-"user-collection-requirements.yml"} + +test -f "${GENESTACK_CONFIG}/provider" 2>/dev/null && export K8S_PROVIDER=$(head -n1 ${GENESTACK_CONFIG}/provider) +export K8S_PROVIDER="${K8S_PROVIDER:-kubespray}" + +test -f "${GENESTACK_CONFIG}/product" 2>/dev/null && export GENESTACK_PRODUCT=$(head -n1 ${GENESTACK_CONFIG}/product) +export GENESTACK_PRODUCT=${GENESTACK_PRODUCT:-openstack-enterprise} + +# Export OSH variables +export CONTAINER_DISTRO_NAME=ubuntu +export CONTAINER_DISTRO_VERSION=jammy +export OSH_DEPLOY_MULTINODE=True + +## OpenStack Antelope 2023.1 +export OPENSTACK_RELEASE=2023.1 + +test -f ~/.venvs/genestack/bin/activate 2>/dev?null && source ~/.venvs/genestack/bin/activate diff --git a/scripts/get-ansible-collection-requirements.yml b/scripts/get-ansible-collection-requirements.yml new file mode 100644 index 000000000..643450668 --- /dev/null +++ b/scripts/get-ansible-collection-requirements.yml @@ -0,0 +1,73 @@ +--- +# Copyright 2024-Present, Rackspace Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +- name: Clone the role ansible-role-requirements + hosts: localhost + connection: local + user: root + gather_facts: false + tasks: + - name: Read the list of user collections + set_fact: + user_collection_names: "{{ user_collections.collections | default([]) | map(attribute='name') | list }}" + + - name: Generate a list of required collections excluding user overridden collections + set_fact: + galaxy_collections_list : "{{ galaxy_collections_list + [ item ] }}" + when: + - item.name not in user_collection_names + with_items: "{{ required_collections.collections }}" + + - name: Append user collections to filtered required collections + set_fact: + galaxy_collections_list: "{{ galaxy_collections_list + [ item ] }}" + with_items: "{{ user_collections.collections }}" + when: + - user_collections.collections is defined + + - name: Create temporary file for galaxy collection requirements + tempfile: + register: collection_requirements_tmpfile + + - name: Copy content into galaxy collection requirements temporary file + copy: + content: "{{ galaxy_collections | to_nice_yaml }}" + dest: "{{ collection_requirements_tmpfile.path }}" + + - name: Install collection requirements with ansible galaxy + command: > + ansible-galaxy collection install --force + -r "{{ collection_requirements_tmpfile.path }}" + register: collection_install + until: collection_install is success + retries: 5 + delay: 2 + + - name: Show collection install output + debug: msg="{{ collection_install.stdout.split('\n') }}" + + - name: Clean up temporary file + file: + path: "{{ collection_requirements_tmpfile.path }}" + state: absent + + vars: + galaxy_collections_list: [] + galaxy_collections: + collections: "{{ galaxy_collections_list }}" + collections_file: "/opt/genestack/ansible-collection-requirements.yml" + required_collections: "{{ lookup('file', collections_file) | from_yaml }}" + #user_collections: "{{ lookup('file', user_collections_path, errors='ignore')|default([], true) | from_yaml }}" + #user_collections_path: "{{ lookup('env', 'OSA_CONFIG_DIR') | default('/etc/openstack_deploy', true) ~ '/' ~ (user_collection_file|default('')) }}" diff --git a/scripts/lib/functions.sh b/scripts/lib/functions.sh new file mode 100644 index 000000000..828061148 --- /dev/null +++ b/scripts/lib/functions.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +# Copyright 2024-Present, Rackspace Technology, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Globals +source /opt/genestack/scripts/genestack.rc + +export SUDO_CMD="" +sudo -l |grep -q NOPASSWD && SUDO_CMD="/usr/bin/sudo -n " + +test -f ~/.rackspace/datacenter && export RAX_DC="$(cat ~/.rackspace/datacenter |tr '[:upper:]' '[:lower:]')" +test -f /etc/openstack_deploy/openstack_inventory.json && export RPC_CONFIG_IN_PLACE=true || export RPC_CONFIG_IN_PLACE=false + + + # Global functions +function success { + echo -e "\n\n\x1B[32m>> $1\x1B[39m" +} + +function error { + >&2 echo -e "\n\n\x1B[31m>> $1\x1B[39m" + exit 1 +} + +function message { + echo -n -e "\n\x1B[32m$1\x1B[39m" +}