diff --git a/.ansible/.lock b/.ansible/.lock deleted file mode 100644 index e69de29bb..000000000 diff --git a/.github/workflows/enterprise-patching.yaml b/.github/workflows/enterprise-patching.yaml new file mode 100644 index 000000000..ff5ad6ed3 --- /dev/null +++ b/.github/workflows/enterprise-patching.yaml @@ -0,0 +1,129 @@ +name: Patch and Retag Images + +on: + workflow_dispatch: + workflow_run: + workflows: ["Migrate Images to QUAY"] + types: + - completed + branches: + - main + +jobs: + generate-matrix: + runs-on: ubuntu-latest + if: ${{ github.event.workflow_run.conclusion == 'success' || github.event_name == 'workflow_dispatch' }} + outputs: + images: ${{ steps.generate-matrix.outputs.images }} + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Generate Matrix + id: generate-matrix + run: | + images=$(jq -r '.[]' .original-images.json | jq -R -s -c 'split("\n") | map(select(length > 0))') + echo "images=$images" >> $GITHUB_OUTPUT + + patch-and-retag: + needs: generate-matrix + runs-on: ubuntu-latest + strategy: + matrix: + image: ${{ fromJson(needs.generate-matrix.outputs.images) }} + fail-fast: false + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker-container + driver-opts: | + image=moby/buildkit:master + network=host + + - name: Install Copacetic + run: | + wget https://github.com/project-copacetic/copacetic/releases/download/v0.9.0/copa_0.9.0_linux_amd64.tar.gz + tar -xzf copa_0.9.0_linux_amd64.tar.gz + chmod +x copa + sudo mv copa /usr/local/bin/ + + - name: Install Trivy + run: | + TRIVY_VERSION="0.55.0" + wget https://github.com/aquasecurity/trivy/releases/download/v${TRIVY_VERSION}/trivy_${TRIVY_VERSION}_Linux-64bit.tar.gz + tar -xzf trivy_${TRIVY_VERSION}_Linux-64bit.tar.gz + chmod +x trivy + sudo mv trivy /usr/local/bin/ + + - name: Login to Quay.io + uses: docker/login-action@v3 + with: + registry: quay.io + username: ${{ secrets.QUAY_USER }} + password: ${{ secrets.QUAY_TOKEN }} + + - name: Process Image + run: | + sudo apt-get update && sudo apt-get install -y jq python3-pip + image="${{ matrix.image }}" + echo "Processing $image" + base_name=$(echo "$image" | awk -F'/' '{print $NF}' | cut -d':' -f1) + tag=$(echo "$image" | awk -F':' '{print $NF}') + new_image="quay.io/rackspace/rackerlabs-${base_name}:${tag}" + patched_tag="${tag}-enterprise" + patched_image="quay.io/rackspace/rackerlabs-${base_name}:${patched_tag}" + + # Pull the image + docker pull "$new_image" || { echo "Failed to pull $new_image"; exit 1; } + + # Scan all vulnerabilities (OS and language-specific) + trivy image -f json -o "report-${base_name}-${tag}.json" "$new_image" || { echo "Failed to scan $new_image"; exit 1; } + + # Scan OS vulnerabilities with fixes for Copacetic + trivy image --vuln-type os --ignore-unfixed -f json -o "os-report-${base_name}-${tag}.json" "$new_image" || { echo "Failed to scan OS vulnerabilities for $new_image"; exit 1; } + + # Attempt to patch OS vulnerabilities; set intermediate image + if copa patch -i "$new_image" -r "os-report-${base_name}-${tag}.json" -t "$patched_tag"; then + echo "Patched OS vulnerabilities in $new_image" + intermediate_image="$patched_image" + else + echo "No OS vulnerabilities patched for $new_image" + intermediate_image="$new_image" + fi + + # Filter cve/requirements.txt to only update installed packages + docker run --rm -v "$(pwd):/output" "$intermediate_image" sh -c "/var/lib/openstack/bin/pip3 list --format=json > /output/installed.json 2>/dev/null || echo '[]' > /output/installed.json" + python3 cve/filter.py + + if [ -s "filtered-requirements.txt" ]; then + echo "Applying Python package updates from cve/requirements.txt" + echo "FROM $intermediate_image" > Dockerfile.temp + echo "COPY filtered-requirements.txt /tmp/filtered-requirements.txt" >> Dockerfile.temp + echo "RUN /var/lib/openstack/bin/pip3 install -r /tmp/filtered-requirements.txt" >> Dockerfile.temp + docker build -f Dockerfile.temp -t "$patched_image" . || { echo "Failed to build $patched_image with Python patches"; exit 1; } + intermediate_image="$patched_image" + else + echo "No Python packages updated from cve/requirements.txt" + fi + + # Flatten the image + echo "Flattening $patched_image" + container_id=$(docker create "$intermediate_image") + docker export "$container_id" > "flattened-${base_name}-${patched_tag}.tar" + docker import "flattened-${base_name}-${patched_tag}.tar" "$patched_image" + docker rm "$container_id" + rm "flattened-${base_name}-${patched_tag}.tar" + + # Push the flattened image + docker push "$patched_image" || { echo "Failed to push $patched_image"; exit 1; } + echo "Pushed $patched_image" + + # Clean up + rm -f "report-${base_name}-${tag}.json" "os-report-${base_name}-${tag}.json" filtered-requirements.txt Dockerfile.temp installed.json requirements.txt + +env: + DOCKER_CLI_EXPERIMENTAL: enabled diff --git a/.github/workflows/helm-argocd.yaml b/.github/workflows/helm-argocd.yaml new file mode 100644 index 000000000..fed2a840e --- /dev/null +++ b/.github/workflows/helm-argocd.yaml @@ -0,0 +1,45 @@ +name: Helm GitHub Actions for argocd + +on: + pull_request: + paths: + - base-kustomize/argocd/** + - base-helm-configs/argocd/** + - .github/workflows/helm-argocd.yaml +jobs: + helm: + strategy: + matrix: + overlays: + - base + name: Helm + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: azure/setup-helm@v3 + with: + version: latest + token: "${{ secrets.GITHUB_TOKEN }}" + id: helm + - name: Kubectl Install + working-directory: /usr/local/bin/ + run: | + if [ ! -f /usr/local/bin/kubectl ]; then + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x ./kubectl + fi + - name: Run Helm Template + run: | + ${{ steps.helm.outputs.helm-path }} template argocd oci://registry-1.docker.io/bitnamicharts/argo-cd \ + --namespace=argocd \ + --wait \ + --timeout 120m \ + -f ${{ github.workspace }}/base-helm-configs/argocd/helm-argocd-overrides.yaml \ + --post-renderer ${{ github.workspace }}/base-kustomize/kustomize.sh \ + --post-renderer-args argocd/${{ matrix.overlays }} > /tmp/rendered.yaml + - name: Return helm Build + uses: actions/upload-artifact@v4 + with: + name: helm-argocd-artifact-${{ matrix.overlays }} + path: /tmp/rendered.yaml diff --git a/.github/workflows/helm-barbican.yaml b/.github/workflows/helm-barbican.yaml index 26b030316..f0755a430 100644 --- a/.github/workflows/helm-barbican.yaml +++ b/.github/workflows/helm-barbican.yaml @@ -30,18 +30,13 @@ jobs: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x ./kubectl fi - - name: Pull OSH submodules + - name: Pull OSH repositories run: | - git submodule update --init submodules/openstack-helm - git submodule update --init submodules/openstack-helm-infra - - name: Make OSH submodule - run: | - cd submodules/openstack-helm - make barbican + helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm + helm repo update - name: Run Helm Template run: | - cd submodules/openstack-helm - ${{ steps.helm.outputs.helm-path }} template barbican ./barbican \ + ${{ steps.helm.outputs.helm-path }} template barbican openstack-helm/barbican \ --namespace=openstack \ --wait \ --timeout 120m \ diff --git a/.github/workflows/helm-cinder.yaml b/.github/workflows/helm-cinder.yaml index d1e646e57..dc5112eb1 100644 --- a/.github/workflows/helm-cinder.yaml +++ b/.github/workflows/helm-cinder.yaml @@ -30,18 +30,13 @@ jobs: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x ./kubectl fi - - name: Pull OSH submodules + - name: Pull OSH repositories run: | - git submodule update --init submodules/openstack-helm - git submodule update --init submodules/openstack-helm-infra - - name: Make OSH submodule - run: | - cd submodules/openstack-helm - make cinder + helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm + helm repo update - name: Run Helm Template run: | - cd submodules/openstack-helm - ${{ steps.helm.outputs.helm-path }} template cinder ./cinder \ + ${{ steps.helm.outputs.helm-path }} template cinder openstack-helm/cinder \ --namespace=openstack \ --wait \ --timeout 120m \ diff --git a/.github/workflows/helm-glance.yaml b/.github/workflows/helm-glance.yaml index fc8482f7b..15b7e90fc 100644 --- a/.github/workflows/helm-glance.yaml +++ b/.github/workflows/helm-glance.yaml @@ -30,18 +30,13 @@ jobs: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x ./kubectl fi - - name: Pull OSH submodules + - name: Pull OSH repositories run: | - git submodule update --init submodules/openstack-helm - git submodule update --init submodules/openstack-helm-infra - - name: Make OSH submodule - run: | - cd submodules/openstack-helm - make glance + helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm + helm repo update - name: Run Helm Template run: | - cd submodules/openstack-helm - ${{ steps.helm.outputs.helm-path }} template glance ./glance \ + ${{ steps.helm.outputs.helm-path }} template glance openstack-helm/glance \ --namespace=openstack \ --wait \ --timeout 120m \ diff --git a/.github/workflows/helm-heat.yaml b/.github/workflows/helm-heat.yaml index 18afda13b..fac14878e 100644 --- a/.github/workflows/helm-heat.yaml +++ b/.github/workflows/helm-heat.yaml @@ -30,18 +30,13 @@ jobs: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x ./kubectl fi - - name: Pull OSH submodules + - name: Pull OSH repositories run: | - git submodule update --init submodules/openstack-helm - git submodule update --init submodules/openstack-helm-infra - - name: Make OSH submodule - run: | - cd submodules/openstack-helm - make heat + helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm + helm repo update - name: Run Helm Template run: | - cd submodules/openstack-helm - ${{ steps.helm.outputs.helm-path }} template heat ./heat \ + ${{ steps.helm.outputs.helm-path }} template heat openstack-helm/heat \ --namespace=openstack \ --wait \ --timeout 120m \ diff --git a/.github/workflows/helm-horizon.yaml b/.github/workflows/helm-horizon.yaml index a828e2ac8..95eb565f9 100644 --- a/.github/workflows/helm-horizon.yaml +++ b/.github/workflows/helm-horizon.yaml @@ -30,18 +30,13 @@ jobs: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x ./kubectl fi - - name: Pull OSH submodules + - name: Pull OSH repositories run: | - git submodule update --init submodules/openstack-helm - git submodule update --init submodules/openstack-helm-infra - - name: Make OSH submodule - run: | - cd submodules/openstack-helm - make horizon + helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm + helm repo update - name: Run Helm Template run: | - cd submodules/openstack-helm - ${{ steps.helm.outputs.helm-path }} template horizon ./horizon \ + ${{ steps.helm.outputs.helm-path }} template horizon openstack-helm/horizon \ --namespace=openstack \ --wait \ --timeout 120m \ diff --git a/.github/workflows/helm-keystone.yaml b/.github/workflows/helm-keystone.yaml index c507ce209..1e02f751c 100644 --- a/.github/workflows/helm-keystone.yaml +++ b/.github/workflows/helm-keystone.yaml @@ -30,18 +30,13 @@ jobs: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x ./kubectl fi - - name: Pull OSH submodules + - name: Pull OSH repositories run: | - git submodule update --init submodules/openstack-helm - git submodule update --init submodules/openstack-helm-infra - - name: Make OSH submodule - run: | - cd submodules/openstack-helm - make keystone + helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm + helm repo update - name: Run Helm Template run: | - cd submodules/openstack-helm - ${{ steps.helm.outputs.helm-path }} template keystone ./keystone \ + ${{ steps.helm.outputs.helm-path }} template keystone openstack-helm/keystone \ --namespace=openstack \ --wait \ --timeout 120m \ diff --git a/.github/workflows/helm-libvirt.yaml b/.github/workflows/helm-libvirt.yaml index cce0a7027..870d91c1b 100644 --- a/.github/workflows/helm-libvirt.yaml +++ b/.github/workflows/helm-libvirt.yaml @@ -28,18 +28,13 @@ jobs: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x ./kubectl fi - - name: Pull OSH submodules + - name: Pull OSH repositories run: | - git submodule update --init submodules/openstack-helm - git submodule update --init submodules/openstack-helm-infra - - name: Make OSH submodule - run: | - cd submodules/openstack-helm-infra - make libvirt + helm repo add openstack-helm-infra https://tarballs.opendev.org/openstack/openstack-helm-infra + helm repo update - name: Run Helm Template run: | - cd submodules/openstack-helm-infra - ${{ steps.helm.outputs.helm-path }} template libvirt ./libvirt \ + ${{ steps.helm.outputs.helm-path }} template libvirt openstack-helm-infra/libvirt \ --namespace=openstack \ --wait \ --timeout 120m \ diff --git a/.github/workflows/helm-magnum.yaml b/.github/workflows/helm-magnum.yaml index e6fff7eae..022d4cf22 100644 --- a/.github/workflows/helm-magnum.yaml +++ b/.github/workflows/helm-magnum.yaml @@ -30,18 +30,13 @@ jobs: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x ./kubectl fi - - name: Pull OSH submodules + - name: Pull OSH repositories run: | - git submodule update --init submodules/openstack-helm - git submodule update --init submodules/openstack-helm-infra - - name: Make OSH submodule - run: | - cd submodules/openstack-helm - make magnum + helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm + helm repo update - name: Run Helm Template run: | - cd submodules/openstack-helm - ${{ steps.helm.outputs.helm-path }} template magnum ./magnum \ + ${{ steps.helm.outputs.helm-path }} template magnum openstack-helm/magnum \ --namespace=openstack \ --wait \ --timeout 120m \ diff --git a/.github/workflows/helm-neutron.yaml b/.github/workflows/helm-neutron.yaml index 94749784b..e09469539 100644 --- a/.github/workflows/helm-neutron.yaml +++ b/.github/workflows/helm-neutron.yaml @@ -30,18 +30,13 @@ jobs: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x ./kubectl fi - - name: Pull OSH submodules + - name: Pull OSH repositories run: | - git submodule update --init submodules/openstack-helm - git submodule update --init submodules/openstack-helm-infra - - name: Make OSH submodule - run: | - cd submodules/openstack-helm - make neutron + helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm + helm repo update - name: Run Helm Template run: | - cd submodules/openstack-helm - ${{ steps.helm.outputs.helm-path }} template neutron ./neutron \ + ${{ steps.helm.outputs.helm-path }} template neutron openstack-helm/neutron \ --namespace=openstack \ --wait \ --timeout 120m \ diff --git a/.github/workflows/helm-nova.yaml b/.github/workflows/helm-nova.yaml index d7e836e39..125b34a15 100644 --- a/.github/workflows/helm-nova.yaml +++ b/.github/workflows/helm-nova.yaml @@ -30,18 +30,13 @@ jobs: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x ./kubectl fi - - name: Pull OSH submodules + - name: Pull OSH repositories run: | - git submodule update --init submodules/openstack-helm - git submodule update --init submodules/openstack-helm-infra - - name: Make OSH submodule - run: | - cd submodules/openstack-helm - make nova + helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm + helm repo update - name: Run Helm Template run: | - cd submodules/openstack-helm - ${{ steps.helm.outputs.helm-path }} template nova ./nova \ + ${{ steps.helm.outputs.helm-path }} template nova openstack-helm/nova \ --namespace=openstack \ --wait \ --timeout 120m \ diff --git a/.github/workflows/helm-octavia.yaml b/.github/workflows/helm-octavia.yaml index 21ef9128f..9589bee2e 100644 --- a/.github/workflows/helm-octavia.yaml +++ b/.github/workflows/helm-octavia.yaml @@ -30,18 +30,13 @@ jobs: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x ./kubectl fi - - name: Pull OSH submodules + - name: Pull OSH repositories run: | - git submodule update --init submodules/openstack-helm - git submodule update --init submodules/openstack-helm-infra - - name: Make OSH submodule - run: | - cd submodules/openstack-helm - make octavia + helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm + helm repo update - name: Run Helm Template run: | - cd submodules/openstack-helm - ${{ steps.helm.outputs.helm-path }} template octavia ./octavia \ + ${{ steps.helm.outputs.helm-path }} template octavia openstack-helm/octavia \ --namespace=openstack \ --wait \ --timeout 120m \ diff --git a/.github/workflows/helm-placement.yaml b/.github/workflows/helm-placement.yaml index ceadb31a1..c7846c14c 100644 --- a/.github/workflows/helm-placement.yaml +++ b/.github/workflows/helm-placement.yaml @@ -30,18 +30,13 @@ jobs: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" chmod +x ./kubectl fi - - name: Pull OSH submodules + - name: Pull OSH repositories run: | - git submodule update --init submodules/openstack-helm - git submodule update --init submodules/openstack-helm-infra - - name: Make OSH submodule - run: | - cd submodules/openstack-helm - make placement + helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm + helm repo update - name: Run Helm Template run: | - cd submodules/openstack-helm - ${{ steps.helm.outputs.helm-path }} template placement ./placement \ + ${{ steps.helm.outputs.helm-path }} template placement openstack-helm/placement \ --namespace=openstack \ --wait \ --timeout 120m \ diff --git a/.github/workflows/helm-prometheus-snmp-exporter.yaml b/.github/workflows/helm-prometheus-snmp-exporter.yaml new file mode 100644 index 000000000..a124d4522 --- /dev/null +++ b/.github/workflows/helm-prometheus-snmp-exporter.yaml @@ -0,0 +1,41 @@ +name: Helm GitHub Actions for Prometheus SNMP Exporter + +on: + pull_request: + paths: + - base-helm-configs/prometheus-snmp-exporter/** + - base-kustomize/prometheus-snmp-exporter/** + - .github/workflows/helm-prometheus-snmp-exporter.yaml +jobs: + helm: + strategy: + matrix: + overlays: + - base + name: Helm + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: azure/setup-helm@v3 + with: + version: latest + token: "${{ secrets.GITHUB_TOKEN }}" + id: helm + - name: Add prometheus-community repo to helm + run: | + ${{ steps.helm.outputs.helm-path }} repo add prometheus-community https://prometheus-community.github.io/helm-charts + ${{ steps.helm.outputs.helm-path }} repo update + - name: Run Helm Template + run: | + ${{ steps.helm.outputs.helm-path }} template prometheus-snmp-exporter prometheus-community/prometheus-snmp-exporter \ + --create-namespace \ + --namespace=prometheus \ + -f ${{ github.workspace }}//base-helm-configs/prometheus-snmp-exporter/values.yaml \ + --post-renderer ${{ github.workspace }}/base-kustomize/kustomize.sh \ + --post-renderer-args prometheus-snmp-exporter/${{ matrix.overlays }} > /tmp/rendered.yaml + - name: Return helm Build + uses: actions/upload-artifact@v4 + with: + name: helm-prometheus-artifact-${{ matrix.overlays }} + path: /tmp/rendered.yaml diff --git a/.github/workflows/helm-topolvm.yaml b/.github/workflows/helm-topolvm.yaml new file mode 100644 index 000000000..0855c97e6 --- /dev/null +++ b/.github/workflows/helm-topolvm.yaml @@ -0,0 +1,45 @@ +name: Helm GitHub Actions for topolvm + +on: + pull_request: + paths: + - base-kustomize/topolvm/** + - base-helm-configs/topolvm/** + - .github/workflows/helm-topolvm.yaml +jobs: + helm: + strategy: + matrix: + overlays: + - base + name: Helm + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: azure/setup-helm@v3 + with: + version: latest + token: "${{ secrets.GITHUB_TOKEN }}" + id: helm + - name: Kubectl Install + working-directory: /usr/local/bin/ + run: | + if [ ! -f /usr/local/bin/kubectl ]; then + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + chmod +x ./kubectl + fi + - name: Run Helm Template + run: | + ${{ steps.helm.outputs.helm-path }} repo add topolvm https://topolvm.github.io/topolvm + ${{ steps.helm.outputs.helm-path }} repo update + ${{ steps.helm.outputs.helm-path }} template topolvm topolvm/topolvm \ + --create-namespace --namespace=topolvm-system --wait --timeout 120m \ + -f ${{ github.workspace }}/base-helm-configs/topolvm/helm-topolvm-overrides.yaml \ + --post-renderer ${{ github.workspace }}/base-kustomize/kustomize.sh \ + --post-renderer-args topolvm/${{ matrix.overlays }} > /tmp/rendered.yaml + - name: Return helm Build + uses: actions/upload-artifact@v4 + with: + name: helm-topolvm-artifact-${{ matrix.overlays }} + path: /tmp/rendered.yaml diff --git a/.github/workflows/kustomize-argocd.yaml b/.github/workflows/kustomize-argocd.yaml deleted file mode 100644 index 087b1c773..000000000 --- a/.github/workflows/kustomize-argocd.yaml +++ /dev/null @@ -1,37 +0,0 @@ -name: Kustomize GitHub Actions for argocd - -on: - pull_request: - paths: - - base-kustomize/argocd/** - - .github/workflows/kustomize-argocd.yaml -jobs: - kustomize: - strategy: - matrix: - overlays: - - base - name: Kustomize - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - uses: azure/setup-helm@v3 - with: - version: latest - token: "${{ secrets.GITHUB_TOKEN }}" - id: helm - - name: Kustomize Install - working-directory: /usr/local/bin/ - run: | - if [ ! -f /usr/local/bin/kustomize ]; then - curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | sudo bash - fi - - name: Run Kustomize Build - run: | - kustomize build base-kustomize/argocd/${{ matrix.overlays }} --enable-helm --helm-command ${{ steps.helm.outputs.helm-path }} > /tmp/rendered.yaml - - name: Return Kustomize Build - uses: actions/upload-artifact@v4 - with: - name: kustomize-argocd-artifact-${{ matrix.overlays }} - path: /tmp/rendered.yaml diff --git a/.github/workflows/kustomize-gateway-api-envoyproxy.yaml b/.github/workflows/kustomize-gateway-api-envoyproxy.yaml index 1f72f28cd..7ef814d39 100644 --- a/.github/workflows/kustomize-gateway-api-envoyproxy.yaml +++ b/.github/workflows/kustomize-gateway-api-envoyproxy.yaml @@ -3,7 +3,7 @@ name: Kustomize GitHub Actions for Gateway API(envoyproxy) on: pull_request: paths: - - base-kustomize/gateway/envoyproxy/** + - base-kustomize/gateway/envoyproxy-gateway/** - .github/workflows/kustomize-gateway-api-envoyproxy.yaml jobs: kustomize: @@ -25,7 +25,7 @@ jobs: fi - name: Run Kustomize Build run: | - kustomize build base-kustomize/gateway/envoyproxy/ --enable-helm --helm-command ${{ steps.helm.outputs.helm-path }} > /tmp/rendered.yaml + kustomize build base-kustomize/envoyproxy-gateway/base > /tmp/rendered.yaml - name: Return Kustomize Build uses: actions/upload-artifact@v4 with: diff --git a/.github/workflows/kustomize-topolvm.yaml b/.github/workflows/kustomize-topolvm.yaml deleted file mode 100644 index 39cda6ad5..000000000 --- a/.github/workflows/kustomize-topolvm.yaml +++ /dev/null @@ -1,37 +0,0 @@ -name: Kustomize GitHub Actions for topolvm - -on: - pull_request: - paths: - - base-kustomize/topolvm/** - - .github/workflows/kustomize-topolvm.yaml -jobs: - kustomize: - strategy: - matrix: - overlays: - - general - name: Kustomize - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - uses: azure/setup-helm@v3 - with: - version: latest - token: "${{ secrets.GITHUB_TOKEN }}" - id: helm - - name: Kustomize Install - working-directory: /usr/local/bin/ - run: | - if [ ! -f /usr/local/bin/kustomize ]; then - curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | sudo bash - fi - - name: Run Kustomize Build - run: | - kustomize build base-kustomize/topolvm/${{ matrix.overlays }} --enable-helm --helm-command ${{ steps.helm.outputs.helm-path }} > /tmp/rendered.yaml - - name: Return Kustomize Build - uses: actions/upload-artifact@v4 - with: - name: kustomize-topolvm-artifact-${{ matrix.overlays }} - path: /tmp/rendered.yaml diff --git a/.github/workflows/mkdocs.yaml b/.github/workflows/mkdocs.yaml index 0a942ea38..4c706aefb 100644 --- a/.github/workflows/mkdocs.yaml +++ b/.github/workflows/mkdocs.yaml @@ -8,11 +8,13 @@ on: - mkdocs.yml - "docs/**" - ".github/workflows/mkdocs.yml" + - "releasenotes/notes/**" pull_request: paths: - mkdocs.yml - "docs/**" - ".github/workflows/mkdocs.yml" + - "releasenotes/notes/**" workflow_dispatch: # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. @@ -27,10 +29,15 @@ jobs: steps: - uses: actions/checkout@v4 + with: + fetch-depth: 0 - uses: actions/setup-python@v5 with: python-version: 3.x - run: pip install -r doc-requirements.txt + - run: sudo wget https://github.com/jgm/pandoc/releases/download/3.6.3/pandoc-3.6.3-1-amd64.deb && sudo apt install -y ./pandoc-3.6.3-1-amd64.deb + - run: reno report -o /tmp/reno.rst + - run: pandoc /tmp/reno.rst -f rst -t markdown -o docs/release-notes.md - run: mkdocs build --strict - uses: actions/upload-pages-artifact@main with: diff --git a/.github/workflows/new-contributor.yaml b/.github/workflows/new-contributor.yaml new file mode 100644 index 000000000..7d5d8396e --- /dev/null +++ b/.github/workflows/new-contributor.yaml @@ -0,0 +1,37 @@ +on: pull_request_target + +jobs: + welcome: + runs-on: ubuntu-latest + steps: + - uses: actions/github-script@v7 + with: + script: | + // Get a list of all issues created by the PR opener + // See: https://octokit.github.io/rest.js/#pagination + const creator = context.payload.sender.login + const opts = github.rest.issues.listForRepo.endpoint.merge({ + ...context.issue, + creator, + state: 'all' + }) + const issues = await github.paginate(opts) + + for (const issue of issues) { + if (issue.number === context.issue.number) { + continue + } + + if (issue.pull_request) { + return // Creator is already a contributor. + } + } + + await github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: `**Welcome**, new contributor! + + Please make sure you've read our [contributing guide](CONTRIBUTING.md) and we look forward to reviewing your Pull request shortly ✨` + }) diff --git a/.github/workflows/pre-commit.yaml b/.github/workflows/pre-commit.yaml index a476473aa..e0d092b6b 100644 --- a/.github/workflows/pre-commit.yaml +++ b/.github/workflows/pre-commit.yaml @@ -1,6 +1,7 @@ name: Run pull-request syntax workflows on: - pull_request + jobs: pre_commit: runs-on: ubuntu-latest @@ -15,22 +16,17 @@ jobs: uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - - name: Get changed files + - name: Fetch all branches and tags + run: git fetch --prune --unshallow + - name: Determine changed files id: changed-files - uses: tj-actions/changed-files@v23.1 - - name: Run Check - uses: pre-commit/action@v3.0.0 - with: - extra_args: --files ${{ steps.changed-files.outputs.all_changed_files }} - - name: Checks failed, notification - if: failure() run: | - echo "Tests Failed" - echo "Run the following command to identify issues" - echo "pre-commit run --files ${{ steps.changed-files.outputs.all_changed_files }}" - - name: Upload log artifacts on failure - if: failure() - uses: actions/upload-artifact@v4 + CHANGED_FILES=$(git diff --name-only HEAD^ | xargs) + echo "Changed files: $CHANGED_FILES" + echo "CHANGED_FILES=$CHANGED_FILES" >> $GITHUB_ENV + - name: Run Check + uses: pre-commit/action@v3.0.1 with: - name: pre-commit-py${{ matrix.python-version }} - path: /home/runner/.cache/pre-commit/pre-commit.log + extra_args: >- + --files ${{ env.CHANGED_FILES }} + --hook-stage manual diff --git a/.github/workflows/testing-deploy-openstack.yaml b/.github/workflows/testing-deploy-openstack.yaml index b62358dfe..60517178f 100644 --- a/.github/workflows/testing-deploy-openstack.yaml +++ b/.github/workflows/testing-deploy-openstack.yaml @@ -10,6 +10,7 @@ on: jobs: deploy: + runs-on: ubuntu-latest steps: diff --git a/.gitmodules b/.gitmodules index ec787b589..5c7fcd2f8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,21 +1,12 @@ [submodule "submodules/kubespray"] path = submodules/kubespray url = https://github.com/kubernetes-sigs/kubespray -[submodule "submodules/openstack-helm-infra"] - path = submodules/openstack-helm-infra - url = https://github.com/openstack/openstack-helm-infra -[submodule "submodules/openstack-helm"] - path = submodules/openstack-helm - url = https://github.com/openstack/openstack-helm -[submodule "submodules/rook"] - path = submodules/rook - url = https://github.com/rook/rook.git + ignore = all [submodule "submodules/openstack-exporter"] path = submodules/openstack-exporter url = https://github.com/openstack-exporter/helm-charts -[submodule "submodules/nginx-gateway-fabric"] - path = submodules/nginx-gateway-fabric - url = https://github.com/nginxinc/nginx-gateway-fabric.git + ignore = all [submodule "submodules/postgres-operator"] path = submodules/postgres-operator url = https://github.com/zalando/postgres-operator.git + ignore = all diff --git a/.original-images.json b/.original-images.json index 7189c0b4b..30442d5f2 100644 --- a/.original-images.json +++ b/.original-images.json @@ -1,18 +1,35 @@ [ + "quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy", + "docker.io/rabbitmq:3.13-management", + "docker.io/docker:17.07.0", + "docker.io/xrally/xrally-openstack:2.0.0", + "docker.io/openstackhelm/cinder:2024.1-ubuntu_jammy", + "docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_jammy", + "docker.io/openstackhelm/designate:2024.1-ubuntu_jammy", "docker.io/openstackhelm/barbican:2024.1-ubuntu_jammy", "docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_jammy", "docker.io/openstackhelm/glance:2024.1-ubuntu_jammy", + "docker.io/library/postgres:14.5", + "docker.io/openstackhelm/horizon:2023.1-ubuntu_jammy", + "docker.io/openstackhelm/osh-selenium:latest-ubuntu_jammy", + "docker.io/openstackhelm/ospurge:latest", + "ghcr.io/vexxhost/netoffload:v1.0.1", + "docker.io/kolla/ubuntu-source-nova-compute-ironic:master", + "gcr.io/google_containers/hyperkube-amd64:v1.11.6", + "docker.io/kolla/centos-source-openvswitch-vswitchd:master", + "docker.io/openstackhelm/placement:2024.1-ubuntu_jammy", + "docker.io/library/postgres:14.5", + "docker.io/wrouesnel/postgres_exporter:v0.4.6", + "quay.io/airshipit/porthole-postgresql-utility:latest-ubuntu_bionic", "docker.io/openstackhelm/magnum:2024.1-ubuntu_jammy", "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy", "ghcr.io/rackerlabs/genestack/ceilometer:2024.1-ubuntu_jammy-1738626813", "ghcr.io/rackerlabs/genestack/cinder-volume-rxt:2024.1-ubuntu_jammy-1731085441", - "ghcr.io/rackerlabs/genestack/cinder:2024.1-ubuntu_jammy-1738626871", + "ghcr.io/rackerlabs/genestack/glance:2024.1-ubuntu_jammy-1740121591", "ghcr.io/rackerlabs/genestack/gnocchi:2024.1-ubuntu_jammy-1738626728", "ghcr.io/rackerlabs/genestack/heat:2024.1-ubuntu_jammy-1738626724", - "ghcr.io/rackerlabs/genestack/horizon:2024.1-ubuntu_jammy-1738626972", "ghcr.io/rackerlabs/genestack/neutron-oslodb:2024.1-ubuntu_jammy-1738626982", "ghcr.io/rackerlabs/genestack/neutron-oslodb:2024.1-ubuntu_jammy-1739651767", - "ghcr.io/rackerlabs/genestack/nova-efi:2024.1-ubuntu_jammy-1723129048", "ghcr.io/rackerlabs/genestack/nova-efi:2024.1-ubuntu_jammy-1737928811", "ghcr.io/rackerlabs/genestack/octavia-ovn:2024.1-ubuntu_jammy-1737651745", "ghcr.io/rackerlabs/keystone-rxt:2024.1-ubuntu_jammy-1739377879", diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 132747f46..1273e5863 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,33 +1,76 @@ --- +default_install_hook_types: + - pre-commit + - commit-msg + repos: + - repo: https://github.com/compilerla/conventional-pre-commit + rev: v4.0.0 + hooks: + - id: conventional-pre-commit + stages: + - commit-msg + args: + - "--strict" + - repo: https://github.com/jumanjihouse/pre-commit-hooks + rev: 3.0.0 + hooks: + - id: shellcheck + stages: + - commit-msg - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v5.0.0 hooks: - - id: end-of-file-fixer - - id: trailing-whitespace - - id: mixed-line-ending - - id: check-byte-order-marker - - id: check-executables-have-shebangs - - id: check-merge-conflict - - id: check-symlinks - - id: check-yaml - files: .*\.(yaml|yml)$ - args: - - "--allow-multiple-documents" - - "--unsafe" - - id: debug-statements + - id: end-of-file-fixer + stages: + - commit-msg + - id: trailing-whitespace + stages: + - commit-msg + - id: mixed-line-ending + stages: + - commit-msg + - id: check-byte-order-marker + stages: + - commit-msg + - id: check-executables-have-shebangs + stages: + - commit-msg + - id: check-merge-conflict + stages: + - commit-msg + - id: check-symlinks + stages: + - commit-msg + - id: check-yaml + stages: + - commit-msg + files: .*\.(yaml|yml)$ + args: + - "--allow-multiple-documents" + - "--unsafe" + - id: debug-statements - repo: https://github.com/psf/black - rev: 24.1.1 + rev: 25.1.0 hooks: - - id: black - - repo: https://github.com/ansible-community/ansible-lint - rev: v6.22.2 + - id: black + stages: + - manual + - repo: https://github.com/adrienverge/yamllint.git + rev: v1.35.1 hooks: - - id: ansible-lint - additional_dependencies: - - ansible - - yamllint - - repo: https://github.com/jumanjihouse/pre-commit-hooks - rev: 3.0.0 + - id: yamllint + stages: + - manual + args: + - >- + -d {extends: default, rules: {line-length: disable}, + ignore: [submodules/]} + - repo: https://github.com/ansible-community/ansible-lint + rev: v24.9.2 hooks: - - id: shellcheck + - id: ansible-lint + stages: + - manual + additional_dependencies: + - ansible diff --git a/Containerfiles/Glance-Containerfile b/Containerfiles/Glance-Containerfile index 38d97607f..a710b16b7 100644 --- a/Containerfiles/Glance-Containerfile +++ b/Containerfiles/Glance-Containerfile @@ -6,7 +6,10 @@ RUN apt update && apt install -y git RUN export ORIG_PLUGIN_VERSION="${PLUGIN_VERSION}"; \ if [ "${PLUGIN_VERSION}" != 'master' ]; then export PLUGIN_VERSION=stable/${PLUGIN_VERSION}; fi; \ . /var/lib/openstack/bin/activate; \ -/var/lib/openstack/bin/pip install boto3 git+https://github.com/openstack/oslo.db@${PLUGIN_VERSION}#egg=oslo_db +/var/lib/openstack/bin/pip install boto3 os-brick \ + git+https://github.com/openstack/python-cinderclient@${PLUGIN_VERSION}#egg=python-cinderclient \ + git+https://github.com/openstack/oslo.db@${PLUGIN_VERSION}#egg=oslo_db \ + git+https://github.com/openstack/glance@${PLUGIN_VERSION}#egg=glance FROM openstackhelm/glance:${VERSION} COPY --from=build /var/lib/openstack/. /var/lib/openstack/ diff --git a/Containerfiles/MagnumRXT-Containerfile b/Containerfiles/MagnumRXT-Containerfile new file mode 100644 index 000000000..b12e2ae5e --- /dev/null +++ b/Containerfiles/MagnumRXT-Containerfile @@ -0,0 +1,11 @@ +ARG VERSION=master-ubuntu_jammy +FROM openstackhelm/magnum:${VERSION} as build +RUN apt-get update && apt-get install -y git && apt clean +RUN . /var/lib/openstack/bin/activate; \ +/var/lib/openstack/bin/pip install git+https://github.com/openstack/oslo.db@${PLUGIN_VERSION}#egg=oslo_db \ + git+https://opendev.org/openstack/magnum-capi-helm@${PLUGIN_VERSION}#egg=magnum_capi_helm +RUN /var/lib/openstack/bin/pip install --upgrade --force-reinstall pip +RUN find /var/lib/openstack -regex '^.*\(__pycache__\|\.py[co]\)$' -delete + +FROM openstackhelm/magnum:${VERSION} +COPY --from=build /var/lib/openstack/. /var/lib/openstack/ diff --git a/ansible/playbooks/extra/custom_exporters/multipathd_info b/ansible/playbooks/extra/custom_exporters/multipathd_info index d904cc6f5..58c9c706e 100644 --- a/ansible/playbooks/extra/custom_exporters/multipathd_info +++ b/ansible/playbooks/extra/custom_exporters/multipathd_info @@ -6,4 +6,5 @@ echo '# HELP node_dmpath_info State info for dev-mapper path' echo '# TYPE node_dmpath_info gauge' -/sbin/multipathd show paths format '%s %S %z %m %d %D %i %a %P %I %M %t %T %o' | /usr/bin/awk '{ if ( NR > 1) {print "node_dmpath_info{vendor=\""$1"\"," "size=\""$2"\"," "volume_id=\""$3"\"," "multipath_id=\""$4"\"," "device=\""$5"\"," "dm_dev=\""$6"\"," "hcil=\""$7"\"," "interface_ip=\""$8"\"," "protocol=\""$9"\"," "init_st=\""$10"\"," "marginal_st=\""$11"\"," "device_mapper_state=\""$12"\"," "path_checker_state=\""$13"\"," "device_state=\""$14"\"}" " 1"}}' +VENDOR_CHECK=$(/sbin/multipathd show paths format '%s %S %z %m %d %D %i %a %P %I %M %t %T %o' | sed 's/LUN C-Mode/LUN_C-Mode/') +echo "${VENDOR_CHECK}" | /usr/bin/awk '{ if ( NR > 1) {print "node_dmpath_info{vendor=\""$1"\"," "size=\""$2"\"," "volume_id=\""$3"\"," "multipath_id=\""$4"\"," "device=\""$5"\"," "dm_dev=\""$6"\"," "hcil=\""$7"\"," "interface_ip=\""$8"\"," "protocol=\""$9"\"," "init_st=\""$10"\"," "marginal_st=\""$11"\"," "device_mapper_state=\""$12"\"," "path_checker_state=\""$13"\"," "device_state=\""$14"\"}" " 1"}}' diff --git a/ansible/playbooks/extra/custom_exporters/perccli.py b/ansible/playbooks/extra/custom_exporters/perccli.py index 2c6eb9590..8d386f73a 100644 --- a/ansible/playbooks/extra/custom_exporters/perccli.py +++ b/ansible/playbooks/extra/custom_exporters/perccli.py @@ -213,6 +213,8 @@ "state", "firmware", "serial", + "manufacturer", + "type", ], namespace=namespace, registry=registry, @@ -394,7 +396,7 @@ def create_metrics_of_physical_drive( physical_drive, detailed_info_array, controller_index ): enclosure, slot = physical_drive.get("EID:Slt").split(":")[:2] - + type_pd = physical_drive.get("Type") if enclosure == " ": drive_identifier = "Drive /c{0}/s{1}".format(controller_index, slot) enclosure = "" @@ -457,6 +459,8 @@ def create_metrics_of_physical_drive( physical_drive["State"], attributes["Firmware Revision"].strip(), attributes["SN"].strip(), + attributes["Manufacturer Id"].strip(), + type_pd, ).set(1) if "Drive Temperature" in state and state["Drive Temperature"] != "N/A": diff --git a/ansible/roles/host_setup/defaults/main.yml b/ansible/roles/host_setup/defaults/main.yml index c9882840d..f9b6bc1c5 100644 --- a/ansible/roles/host_setup/defaults/main.yml +++ b/ansible/roles/host_setup/defaults/main.yml @@ -42,7 +42,8 @@ host_rp_filter_all: 0 host_rp_filter_default: 0 # Set the maximum size of the connection tracking table. -host_nf_conntrack_max: 262144 +host_nf_conntrack_max: 1048576 +host_nf_conntrack_buckets: 262144 # System control kernel tuning kernel_options: @@ -106,12 +107,24 @@ kernel_options: value: "{{ set_gc_val | int * 2 }}" - key: 'net.netfilter.nf_conntrack_max' value: "{{ host_nf_conntrack_max }}" + - key: 'net.netfilter.nf_conntrack_buckets' + value: "{{ host_nf_conntrack_buckets }}" - key: 'vm.dirty_background_ratio' value: 5 - key: 'vm.dirty_ratio' value: 10 - key: 'vm.swappiness' value: 5 + - key: 'net.ipv4.conf.all.secure_redirects' + value: 0 + - key: 'net.ipv4.conf.all.accept_redirects' + value: 0 + - key: 'net.ipv6.conf.all.accept_redirects' + value: 0 + - key: 'net.ipv4.conf.default.accept_redirects' + value: 0 + - key: 'net.ipv4.conf.default.secure_redirects' + value: 0 ## kernel modules for specific group hosts host_specific_kernel_modules: [] diff --git a/ansible/roles/host_setup/files/queue_max.sh b/ansible/roles/host_setup/files/queue_max.sh index af5aa0816..3f593c64b 100644 --- a/ansible/roles/host_setup/files/queue_max.sh +++ b/ansible/roles/host_setup/files/queue_max.sh @@ -25,7 +25,11 @@ function ethernetDevs () { } function functionSetMax () { - echo "Setting queue max $dev" + if grep -q "0x1af4" /sys/class/net/$1/device/vendor; then + echo "Skipping virtio device $1" + return + fi + echo "Setting queue max $1" # The RX value is set to 90% of the max value to avoid packet loss ethtool -G $1 rx $(ethtool --json -g $1 | jq '.[0] | ."rx-max" * .9 | round') # The TX value is set to the max value diff --git a/base-helm-configs/argocd/helm-argocd-overrides.yaml b/base-helm-configs/argocd/helm-argocd-overrides.yaml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/base-helm-configs/argocd/helm-argocd-overrides.yaml @@ -0,0 +1 @@ +--- diff --git a/base-helm-configs/barbican/barbican-helm-overrides.yaml b/base-helm-configs/barbican/barbican-helm-overrides.yaml index 39d5a8497..869771dcd 100644 --- a/base-helm-configs/barbican/barbican-helm-overrides.yaml +++ b/base-helm-configs/barbican/barbican-helm-overrides.yaml @@ -1,754 +1,101 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default values for barbican. -# This is a YAML-formatted file. -# Declare name/value pairs to be passed into your templates. -# name: value - --- -labels: - api: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - test: - node_selector_key: openstack-control-plane - node_selector_value: enabled - -release_group: null - -# NOTE(philsphicas): the pre-install hook breaks upgrade for helm2 -# Set to false to upgrade using helm2 -helm3_hook: true - images: tags: - bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" - scripted_test: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + barbican_api: "quay.io/rackspace/rackerlabs-barbican:2024.1-ubuntu_jammy" barbican_db_sync: "quay.io/rackspace/rackerlabs-barbican:2024.1-ubuntu_jammy" + bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" db_drop: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" + image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - barbican_api: "quay.io/rackspace/rackerlabs-barbican:2024.1-ubuntu_jammy" + ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" - image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -pod: - security_context: - barbican: - pod: - runAsUser: 42424 - container: - barbican_api: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - test: - pod: - runAsUser: 42424 - container: - barbican_test: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - tolerations: - barbican: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - mounts: - barbican_api: - init_container: null - barbican_api: - volumeMounts: - volumes: - barbican_bootstrap: - init_container: null - barbican_bootstrap: - volumeMounts: - volumes: - barbican_tests: - init_container: null - barbican_tests: - volumeMounts: - volumes: - barbican_db_sync: - barbican_db_sync: - volumeMounts: - volumes: - replicas: - api: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - disruption_budget: - api: - min_available: 0 - resources: - enabled: true - api: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - bootstrap: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - db_init: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - db_sync: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - db_drop: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - rabbit_init: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_endpoints: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_service: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_user: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - tests: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - image_repo_sync: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - -network: - api: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-cluster" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 30486 - -network_policy: - barbican: - ingress: - - {} - egress: - - {} - -bootstrap: - enabled: true - ks_user: barbican - script: | - openstack role create --or-show creator + scripted_test: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" dependencies: - dynamic: - common: - local_image_registry: - jobs: - - barbican-image-repo-sync - services: - - endpoint: node - service: local_image_registry static: + db_sync: + jobs: null api: jobs: - barbican-db-sync - barbican-ks-user - barbican-ks-endpoints -# - barbican-rabbit-init - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_messaging - db_drop: - services: - - endpoint: internal - service: oslo_db -# db_init: -# services: -# - endpoint: internal -# service: oslo_db - db_sync: - jobs: -# - barbican-db-init - services: - - endpoint: internal - service: oslo_db - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - ks_endpoints: - jobs: - - barbican-ks-service - services: - - endpoint: internal - service: identity - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity -# rabbit_init: -# services: -# - endpoint: internal -# service: oslo_messaging conf: - paste: - composite:main: - use: egg:Paste#urlmap - /: barbican_version - /v1: barbican-api-keystone - pipeline:barbican_version: - pipeline: cors http_proxy_to_wsgi versionapp - pipeline:barbican_api: - pipeline: cors http_proxy_to_wsgi unauthenticated-context apiapp - pipeline:barbican-profile: - pipeline: cors http_proxy_to_wsgi unauthenticated-context egg:Paste#cgitb egg:Paste#httpexceptions profile apiapp - pipeline:barbican-api-keystone: - pipeline: cors http_proxy_to_wsgi authtoken context apiapp - pipeline:barbican-api-keystone-audit: - pipeline: http_proxy_to_wsgi authtoken context audit apiapp - app:apiapp: - paste.app_factory: barbican.api.app:create_main_app - app:versionapp: - paste.app_factory: barbican.api.app:create_version_app - filter:simple: - paste.filter_factory: barbican.api.middleware.simple:SimpleFilter.factory - filter:unauthenticated-context: - paste.filter_factory: barbican.api.middleware.context:UnauthenticatedContextMiddleware.factory - filter:context: - paste.filter_factory: barbican.api.middleware.context:ContextMiddleware.factory - filter:audit: - paste.filter_factory: keystonemiddleware.audit:filter_factory - audit_map_file: /etc/barbican/api_audit_map.conf - filter:authtoken: - paste.filter_factory: keystonemiddleware.auth_token:filter_factory - filter:profile: - use: egg:repoze.profile - log_filename: myapp.profile - cachegrind_filename: cachegrind.out.myapp - discard_first_request: true - path: /__profile__ - flush_at_shutdown: true - unwind: false - filter:cors: - paste.filter_factory: oslo_middleware.cors:filter_factory - oslo_config_project: barbican - filter:http_proxy_to_wsgi: - paste.filter_factory: oslo_middleware:HTTPProxyToWSGI.factory - policy: {} - audit_map: - DEFAULT: - # default target endpoint type - # should match the endpoint type defined in service catalog - target_endpoint_type: key-manager - custom_actions: - # map urls ending with specific text to a unique action - # Don't need custom mapping for other resource operations - # Note: action should match action names defined in CADF taxonomy - acl/get: read - path_keywords: - # path of api requests for CADF target typeURI - # Just need to include top resource path to identify class of resources - secrets: null - containers: null - orders: null - cas: "None" - quotas: null - project-quotas: null - service_endpoints: - # map endpoint type defined in service catalog to CADF typeURI - key-manager: service/security/keymanager - barbican_api: - uwsgi: - add-header: "Connection: close" - buffer-size: 65535 - die-on-term: true - enable-threads: true - exit-on-reload: false - hook-master-start: unix_signal:15 gracefully_kill_them_all - lazy-apps: true - log-x-forwarded-for: true - master: true - procname-prefix-spaced: "barbiacan-api:" - route-user-agent: '^kube-probe.* donotlog:' - thunder-lock: true - worker-reload-mercy: 80 - wsgi-file: /var/lib/openstack/bin/barbican-wsgi-api - processes: 4 barbican: DEFAULT: - host_href: http://barbican-api.openstack.svc.cluster.local:9311 - transport_url: null - log_config_append: /etc/barbican/logging.conf + host_href: "http://barbican-api.openstack.svc.cluster.local:9311" keystone_authtoken: service_token_roles: service service_token_roles_required: true - auth_version: v3 auth_type: password + auth_version: v3 memcache_security_strategy: ENCRYPT service_type: key-manager - database: - max_retries: -1 - barbican_api: - # NOTE(portdirect): the bind port should not be defined, and is manipulated - # via the endpoints section. - bind_port: null - oslo_policy: - policy_file: /etc/barbican/policy.yaml - # When using the simple_crypto_plugin, a kek must be provided as: - # .conf.barbican.simple_crypto_plugin.kek - # If no kek is provided, barbican will use a well-known default. - # If upgrading the chart with a new kek, the old kek must be provided as: - # .conf.simple_crypto_plugin_rewrap.old_kek - # Please refer to the .conf.simple_crypto_key_rewrap section below. - # The barbican defaults are included here as a reference: - # secretstore: - # enabled_secretstore_plugins: - # - store_crypto - # crypto: - # enabled_crypto_plugins: - # - simple_crypto - # simple_crypto_plugin: - # # The kek should be a 32-byte value which is base64 encoded. - # kek: "dGhpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=" oslo_concurrency: lock_path: /tmp/barbican oslo_messaging_notifications: driver: messagingv2 - oslo_middleware: - enable_proxy_headers_parsing: true oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true rabbit_ha_queues: false rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly rabbit_interval_max: 10 # Send more frequent heartbeats and fail unhealthy nodes faster # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + heartbeat_timeout_threshold: 60 + # DEPRECIATION: (warning) heartbeat_in_pthread will be deprecated in 2024.2 + heartbeat_in_pthread: True + # Setting lower kombu_reconnect_delay should resolve issue with HA failing when one node is down # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 kombu_reconnect_delay: 0.5 - # KEK rotation for the simple_crypto plugin - simple_crypto_kek_rewrap: - - # To allow for chart upgrades when modifying the Key Encryption Key, the - # db-sync job can rewrap the existing project keys with the new kek, leaving - # each secret’s encrypted data unchanged. - - # This feature is enabled automatically, if a kek is specified at: - # .conf.barbican.simple_crypto_plugin.kek - # and the previous kek is also specified at: - # .conf.simple_crypto_kek_rewrap.old_kek - - # The project keys are decrypted with 'old_kek' and re-encrypted with the - # target kek (as defined in barbican.conf). - # This resembles the lightweight rotation described here, which was never - # implemented for the simple crypto plugin: - # https://specs.openstack.org/openstack/barbican-specs/specs/liberty/add-crypto-mkek-rotation-support-lightweight.html - - # The KEK value "dGhpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=" matches the - # plugin default, and is retained here for convenience, in case the chart was - # previously installed without explicitly specifying a kek. - old_kek: "dGhpcnR5X3R3b19ieXRlX2tleWJsYWhibGFoYmxhaGg=" + oslo_middleware: + enable_proxy_headers_parsing: true + barbican_api_uwsgi: + uwsgi: + processes: 4 logging: - loggers: - keys: - - root - - barbican - handlers: - keys: - - stdout - - stderr - - "null" - formatters: - keys: - - context - - default logger_root: - level: INFO handlers: - stdout - logger_barbican: level: INFO - handlers: - - stdout - qualname: barbican - logger_amqp: - level: WARNING - handlers: stderr - qualname: amqp - logger_amqplib: - level: WARNING - handlers: stderr - qualname: amqplib - logger_eventletwsgi: - level: WARNING - handlers: stderr - qualname: eventlet.wsgi.server - logger_sqlalchemy: - level: WARNING - handlers: stderr - qualname: sqlalchemy - logger_boto: - level: WARNING - handlers: stderr - qualname: boto - handler_null: - class: logging.NullHandler - formatter: default - args: () - handler_stdout: - class: StreamHandler - args: (sys.stdout,) - formatter: context - handler_stderr: - class: StreamHandler - args: (sys.stderr,) - formatter: context - formatter_context: - class: oslo_log.formatters.ContextFormatter - datefmt: "%Y-%m-%d %H:%M:%S" - formatter_default: - format: "%(message)s" - datefmt: "%Y-%m-%d %H:%M:%S" - -# Names of secrets used by bootstrap and environmental checks -secrets: - identity: - admin: barbican-keystone-admin - barbican: barbican-keystone-user - oslo_db: - admin: barbican-db-admin - barbican: barbican-db-user - oslo_messaging: - admin: barbican-rabbitmq-admin - barbican: barbican-rabbitmq-user - tls: - key_manager: - api: - public: barbican-tls-public - internal: barbican-tls-internal - oci_image_registry: - barbican: barbican-oci-image-registry endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - barbican: - username: barbican - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null - identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - barbican: - role: admin - region_name: RegionOne - username: barbican - password: password - project_name: service - user_domain_name: service - project_domain_name: service - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: http - port: - api: - default: 80 - internal: 5000 + fluentd: + namespace: fluentbit key_manager: - name: barbican hosts: - default: barbican-api public: barbican-api - host_fqdn_override: - default: - tls: - secretName: barbican-tls-internal - issuerRef: - kind: ClusterIssuer - name: ca-clusterissuer - path: - default: / - scheme: - default: http - service: http port: api: - default: 9311 public: 9311 - service: 9311 oslo_db: - auth: - admin: - username: root - password: password - secret: - tls: - internal: mariadb-tls-direct - barbican: - username: barbican - password: password - hosts: - default: mariadb-cluster-primary host_fqdn_override: - default: null - path: /barbican - scheme: mysql+pymysql - port: - mysql: - default: 3306 - oslo_messaging: - auth: - admin: - username: rabbitmq - password: password - secret: - tls: - internal: rabbitmq-tls-direct - barbican: - username: barbican - password: password - statefulset: - replicas: 2 - name: rabbitmq-server + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: - default: rabbitmq-nodes - host_fqdn_override: - default: rabbitmq.openstack.svc.cluster.local - path: /barbican - scheme: rabbit - port: - amqp: - default: 5672 - http: - default: 15672 + default: mariadb-cluster-primary oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null - hosts: - default: memcached host_fqdn_override: - default: null - port: - memcache: - default: 11211 - fluentd: - namespace: fluentbit - name: fluentd - hosts: - default: fluentd-logging - host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 - # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress - # They are using to enable the Egress K8s network policy. - kube_dns: - namespace: kube-system - name: kubernetes-dns + default: memcached.openstack.svc.cluster.local hosts: - default: kube-dns + default: memcached + oslo_messaging: host_fqdn_override: - default: null - path: - default: null - scheme: http - port: - dns: - default: 53 - protocol: UDP - ingress: - namespace: null - name: ingress + default: rabbitmq.openstack.svc.cluster.local hosts: - default: ingress - port: - ingress: - default: 80 - -tls: - identity: false - oslo_messaging: false - oslo_db: false + default: rabbitmq-nodes manifests: - certificates: false - configmap_bin: true - configmap_etc: true - deployment_api: true ingress_api: false - job_bootstrap: true job_db_init: false - job_db_sync: true - job_db_drop: false - job_image_repo_sync: true job_rabbit_init: false - job_ks_endpoints: true - job_ks_service: true - job_ks_user: true - pdb_api: true - pod_test: true - secret_db: true - network_policy: false secret_ingress_tls: false - secret_keystone: true - secret_rabbitmq: true - secret_registry: true service_ingress_api: false - service_api: true -... diff --git a/base-helm-configs/ceilometer/ceilometer-helm-overrides.yaml b/base-helm-configs/ceilometer/ceilometer-helm-overrides.yaml index dd9657333..e38cd4611 100644 --- a/base-helm-configs/ceilometer/ceilometer-helm-overrides.yaml +++ b/base-helm-configs/ceilometer/ceilometer-helm-overrides.yaml @@ -1,26 +1,4 @@ --- -release_group: null - -labels: - compute: - node_selector_key: openstack-compute-node - node_selector_value: enabled - central: - node_selector_key: openstack-control-plane - node_selector_value: enabled - ipmi: - node_selector_key: openstack-node - node_selector_value: enabled - notification: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - test: - node_selector_key: openstack-control-plane - node_selector_value: enabled - images: tags: test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" @@ -35,24 +13,19 @@ images: dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" pull_policy: "Always" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -ipmi_device: /dev/ipmi0 conf: ceilometer: DEFAULT: debug: "false" -# default_log_levels: >- -# amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO, -# oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=DEBUG, -# urllib3.connectionpool=DEBUG,websocket=WARN,requests.packages.urllib3.util.retry=DEBUG, -# urllib3.util.retry=DEBUG,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN, -# taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO + # NOTE: If you need to enable debug, it is highly recommended to uncomment the below lines + # to ensure logs are ceilometer does not spam the logs + # default_log_levels: >- + # amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO, + # oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=DEBUG, + # urllib3.connectionpool=DEBUG,websocket=WARN,requests.packages.urllib3.util.retry=DEBUG, + # urllib3.util.retry=DEBUG,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN, + # taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO event_dispatchers: type: multistring values: @@ -62,26 +35,12 @@ conf: values: - gnocchi api: - aodh_is_enabled: "False" aodh_url: "NotUsed" - dispatcher_gnocchi: - filter_service_activity: False - archive_policy: low - resources_definition_file: /etc/ceilometer/gnocchi_resources.yaml database: connection: "NotUsed" event_connection: "NotUsed" metering_connection: "NotUsed" max_retries: -1 - dispatcher: - archive_policy: low - filter_project: service - keystone_authtoken: - auth_type: password - auth_version: v3 - service_credentials: - auth_type: password - interface: internal notification: messaging_urls: type: multistring @@ -93,6 +52,7 @@ conf: - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/keystone - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/neutron - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/heat + - rabbit://rabbitmq:password@rabbitmq.openstack.svc.cluster.local:5672/swift oslo_messaging_notifications: driver: messagingv2 topics: @@ -102,33 +62,24 @@ conf: lock_path: /tmp/ceilometer oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true rabbit_ha_queues: false rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly rabbit_interval_max: 10 # Send more frequent heartbeats and fail unhealthy nodes faster # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + heartbeat_timeout_threshold: 60 + # DEPRECIATION: (warning) heartbeat_in_pthread will be deprecated in 2024.2 + heartbeat_in_pthread: True + # Setting lower kombu_reconnect_delay should resolve issue with HA failing when one node is down # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 kombu_reconnect_delay: 0.5 - oslo_policy: - policy_file: /etc/ceilometer/policy.yaml - cache: - enabled: true - backend: dogpile.cache.memcached - expiration_time: 86400 event_definitions: - - event_type: 'compute.instance.*' + - event_type: "compute.instance.*" traits: &instance_traits tenant_id: fields: payload.tenant_id @@ -200,7 +151,23 @@ conf: audit_period_ending: type: datetime fields: payload.audit_period_ending - - event_type: ['volume.exists', 'volume.retype', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*', 'volume.transfer.accept.end', 'snapshot.transfer.accept.end'] + - event_type: + [ + "volume.exists", + "volume.retype", + "volume.create.*", + "volume.delete.*", + "volume.resize.*", + "volume.attach.*", + "volume.detach.*", + "volume.update.*", + "snapshot.exists", + "snapshot.create.*", + "snapshot.delete.*", + "snapshot.update.*", + "volume.transfer.accept.end", + "snapshot.transfer.accept.end", + ] traits: &cinder_traits user_id: fields: payload.user_id @@ -221,7 +188,19 @@ conf: fields: payload.glance_metadata[?key=image_id].value instance_id: fields: payload.volume_attachment[0].instance_uuid - - event_type: ['volume.transfer.*', 'volume.exists', 'volume.retype', 'volume.create.*', 'volume.delete.*', 'volume.resize.*', 'volume.attach.*', 'volume.detach.*', 'volume.update.*', 'snapshot.transfer.accept.end'] + - event_type: + [ + "volume.transfer.*", + "volume.exists", + "volume.retype", + "volume.create.*", + "volume.delete.*", + "volume.resize.*", + "volume.attach.*", + "volume.detach.*", + "volume.update.*", + "snapshot.transfer.accept.end", + ] traits: <<: *cinder_traits resource_id: @@ -235,14 +214,15 @@ conf: fields: payload.volume_type replication_status: fields: payload.replication_status - - event_type: ['snapshot.transfer.accept.end'] + - event_type: ["snapshot.transfer.accept.end"] traits: <<: *cinder_traits resource_id: fields: payload.snapshot_id project_id: fields: payload.tenant_id - - event_type: ['share.create.*', 'share.delete.*', 'share.extend.*', 'share.shrink.*'] + - event_type: + ["share.create.*", "share.delete.*", "share.extend.*", "share.shrink.*"] traits: &share_traits share_id: fields: payload.share_id @@ -274,20 +254,27 @@ conf: fields: payload.description host: fields: payload.host - - event_type: ['snapshot.exists', 'snapshot.create.*', 'snapshot.delete.*', 'snapshot.update.*'] + - event_type: + [ + "snapshot.exists", + "snapshot.create.*", + "snapshot.delete.*", + "snapshot.update.*", + ] traits: <<: *cinder_traits resource_id: fields: payload.snapshot_id volume_id: fields: payload.volume_id - - event_type: ['image_volume_cache.*'] + - event_type: ["image_volume_cache.*"] traits: image_id: fields: payload.image_id host: fields: payload.host - - event_type: ['image.create', 'image.update', 'image.upload', 'image.delete'] + - event_type: + ["image.create", "image.update", "image.upload", "image.delete"] traits: &glance_crud project_id: fields: payload.owner @@ -328,7 +315,7 @@ conf: project_id: fields: payload.tenant_id user_id: - fields: ['ctxt.trustor_user_id', 'ctxt.user_id'] + fields: ["ctxt.trustor_user_id", "ctxt.user_id"] resource_id: fields: payload.stack_identity name: @@ -360,8 +347,18 @@ conf: updated_at: type: datetime fields: payload.updated_at - - event_type: ['identity.user.*', 'identity.project.*', 'identity.group.*', 'identity.role.*', 'identity.OS-TRUST:trust.*', - 'identity.region.*', 'identity.service.*', 'identity.endpoint.*', 'identity.policy.*'] + - event_type: + [ + "identity.user.*", + "identity.project.*", + "identity.group.*", + "identity.role.*", + "identity.OS-TRUST:trust.*", + "identity.region.*", + "identity.service.*", + "identity.endpoint.*", + "identity.policy.*", + ] traits: &identity_crud resource_id: fields: payload.resource_info @@ -453,7 +450,21 @@ conf: fields: payload.target.metadata.object observer_id: fields: payload.observer.id - - event_type: ['network.*', 'subnet.*', 'port.*', 'router.*', 'floatingip.*', 'firewall.*', 'firewall_policy.*', 'firewall_rule.*', 'vpnservice.*', 'ipsecpolicy.*', 'ikepolicy.*', 'ipsec_site_connection.*'] + - event_type: + [ + "network.*", + "subnet.*", + "port.*", + "router.*", + "floatingip.*", + "firewall.*", + "firewall_policy.*", + "firewall_rule.*", + "vpnservice.*", + "ipsecpolicy.*", + "ikepolicy.*", + "ipsec_site_connection.*", + ] traits: &network_traits user_id: fields: ctxt.user_id @@ -465,81 +476,81 @@ conf: name: fields: payload.network.name resource_id: - fields: ['payload.network.id', 'payload.id'] + fields: ["payload.network.id", "payload.id"] - event_type: subnet.* traits: <<: *network_traits name: fields: payload.subnet.name resource_id: - fields: ['payload.subnet.id', 'payload.id'] + fields: ["payload.subnet.id", "payload.id"] - event_type: port.* traits: <<: *network_traits name: fields: payload.port.name resource_id: - fields: ['payload.port.id', 'payload.id'] + fields: ["payload.port.id", "payload.id"] - event_type: router.* traits: <<: *network_traits name: fields: payload.router.name resource_id: - fields: ['payload.router.id', 'payload.id'] + fields: ["payload.router.id", "payload.id"] - event_type: floatingip.* traits: <<: *network_traits resource_id: - fields: ['payload.floatingip.id', 'payload.id'] + fields: ["payload.floatingip.id", "payload.id"] - event_type: firewall.* traits: <<: *network_traits name: fields: payload.firewall.name resource_id: - fields: ['payload.firewall.id', 'payload.id'] + fields: ["payload.firewall.id", "payload.id"] - event_type: firewall_policy.* traits: <<: *network_traits name: fields: payload.firewall_policy.name resource_id: - fields: ['payload.firewall_policy.id', 'payload.id'] + fields: ["payload.firewall_policy.id", "payload.id"] - event_type: firewall_rule.* traits: <<: *network_traits name: fields: payload.firewall_rule.name resource_id: - fields: ['payload.firewall_rule.id', 'payload.id'] + fields: ["payload.firewall_rule.id", "payload.id"] - event_type: vpnservice.* traits: <<: *network_traits name: fields: payload.vpnservice.name resource_id: - fields: ['payload.vpnservice.id', 'payload.id'] + fields: ["payload.vpnservice.id", "payload.id"] - event_type: ipsecpolicy.* traits: <<: *network_traits name: fields: payload.ipsecpolicy.name resource_id: - fields: ['payload.ipsecpolicy.id', 'payload.id'] + fields: ["payload.ipsecpolicy.id", "payload.id"] - event_type: ikepolicy.* traits: <<: *network_traits name: fields: payload.ikepolicy.name resource_id: - fields: ['payload.ikepolicy.id', 'payload.id'] + fields: ["payload.ikepolicy.id", "payload.id"] - event_type: ipsec_site_connection.* traits: <<: *network_traits resource_id: - fields: ['payload.ipsec_site_connection.id', 'payload.id'] - - event_type: '*http.*' + fields: ["payload.ipsec_site_connection.id", "payload.id"] + - event_type: "*http.*" traits: &http_audit project_id: fields: payload.initiator.project_id @@ -576,12 +587,13 @@ conf: fields: payload.initiator.name initiator_host_address: fields: payload.initiator.host.address - - event_type: '*http.response' + - event_type: "*http.response" traits: <<: *http_audit reason_code: fields: payload.reason.reasonCode - - event_type: ['dns.domain.create', 'dns.domain.update', 'dns.domain.delete'] + - event_type: + ["dns.domain.create", "dns.domain.update", "dns.domain.delete"] traits: &dns_domain_traits status: fields: payload.status @@ -648,7 +660,13 @@ conf: fields: payload.created_at region: fields: payload.region - - event_type: ['trove.instance.create', 'trove.instance.modify_volume', 'trove.instance.modify_flavor', 'trove.instance.delete'] + - event_type: + [ + "trove.instance.create", + "trove.instance.modify_volume", + "trove.instance.modify_flavor", + "trove.instance.delete", + ] traits: &trove_common_traits name: fields: payload.name @@ -731,7 +749,7 @@ conf: fields: payload.info.db.statement db.params: fields: payload.info.db.params - - event_type: 'magnum.cluster.*' + - event_type: "magnum.cluster.*" traits: &magnum_cluster_crud id: fields: payload.id @@ -764,7 +782,7 @@ conf: fields: payload.observer.id observer_typeURI: fields: payload.observer.typeURI - - event_type: 'alarm.*' + - event_type: "alarm.*" traits: id: fields: payload.alarm_id @@ -1008,11 +1026,11 @@ conf: - resource_type: ipmi_sensor metrics: - - 'hardware.ipmi.power' - - 'hardware.ipmi.temperature' - - 'hardware.ipmi.current' - - 'hardware.ipmi.voltage' - - 'hardware.ipmi.fan' + - "hardware.ipmi.power" + - "hardware.ipmi.temperature" + - "hardware.ipmi.current" + - "hardware.ipmi.voltage" + - "hardware.ipmi.fan" attributes: node: resource_metadata.node @@ -1035,8 +1053,11 @@ conf: - resource_type: swift_account metrics: storage.objects.incoming.bytes: + archive_policy_name: low storage.objects.outgoing.bytes: + archive_policy_name: low storage.objects.size: + archive_policy_name: low storage.objects: storage.objects.containers: storage.containers.objects: @@ -1058,6 +1079,7 @@ conf: display_name: resource_metadata.(display_name|name) volume_type: resource_metadata.volume_type volume_type_id: resource_metadata.volume_type_id + volume_id: resource_metadata.volume_id image_id: resource_metadata.image_id instance_id: resource_metadata.instance_id event_create: @@ -1074,6 +1096,7 @@ conf: project_id: project_id image_id: image_id instance_id: instance_id + volume_id: volume_id - resource_type: volume_provider metrics: @@ -1197,99 +1220,99 @@ conf: resource_id: $.payload.image_id project_id: $.payload.owner_id - - name: 'volume.provider.capacity.total' - event_type: 'capacity.backend.*' - type: 'gauge' - unit: 'GB' + - name: "volume.provider.capacity.total" + event_type: "capacity.backend.*" + type: "gauge" + unit: "GB" volume: $.payload.total resource_id: $.payload.name_to_id - - name: 'volume.provider.capacity.free' - event_type: 'capacity.backend.*' - type: 'gauge' - unit: 'GB' + - name: "volume.provider.capacity.free" + event_type: "capacity.backend.*" + type: "gauge" + unit: "GB" volume: $.payload.free resource_id: $.payload.name_to_id - - name: 'volume.provider.capacity.allocated' - event_type: 'capacity.backend.*' - type: 'gauge' - unit: 'GB' + - name: "volume.provider.capacity.allocated" + event_type: "capacity.backend.*" + type: "gauge" + unit: "GB" volume: $.payload.allocated resource_id: $.payload.name_to_id - - name: 'volume.provider.capacity.provisioned' - event_type: 'capacity.backend.*' - type: 'gauge' - unit: 'GB' + - name: "volume.provider.capacity.provisioned" + event_type: "capacity.backend.*" + type: "gauge" + unit: "GB" volume: $.payload.provisioned resource_id: $.payload.name_to_id - - name: 'volume.provider.capacity.virtual_free' - event_type: 'capacity.backend.*' - type: 'gauge' - unit: 'GB' + - name: "volume.provider.capacity.virtual_free" + event_type: "capacity.backend.*" + type: "gauge" + unit: "GB" volume: $.payload.virtual_free resource_id: $.payload.name_to_id - - name: 'volume.provider.pool.capacity.total' - event_type: 'capacity.pool.*' - type: 'gauge' - unit: 'GB' + - name: "volume.provider.pool.capacity.total" + event_type: "capacity.pool.*" + type: "gauge" + unit: "GB" volume: $.payload.total resource_id: $.payload.name_to_id metadata: &provider_pool_meta provider: $.payload.name_to_id.`split(#, 0, 1)` - - name: 'volume.provider.pool.capacity.free' - event_type: 'capacity.pool.*' - type: 'gauge' - unit: 'GB' + - name: "volume.provider.pool.capacity.free" + event_type: "capacity.pool.*" + type: "gauge" + unit: "GB" volume: $.payload.free resource_id: $.payload.name_to_id metadata: - <<: *provider_pool_meta + <<: *provider_pool_meta - - name: 'volume.provider.pool.capacity.allocated' - event_type: 'capacity.pool.*' - type: 'gauge' - unit: 'GB' + - name: "volume.provider.pool.capacity.allocated" + event_type: "capacity.pool.*" + type: "gauge" + unit: "GB" volume: $.payload.allocated resource_id: $.payload.name_to_id metadata: - <<: *provider_pool_meta + <<: *provider_pool_meta - - name: 'volume.provider.pool.capacity.provisioned' - event_type: 'capacity.pool.*' - type: 'gauge' - unit: 'GB' + - name: "volume.provider.pool.capacity.provisioned" + event_type: "capacity.pool.*" + type: "gauge" + unit: "GB" volume: $.payload.provisioned resource_id: $.payload.name_to_id metadata: - <<: *provider_pool_meta + <<: *provider_pool_meta - - name: 'volume.provider.pool.capacity.virtual_free' - event_type: 'capacity.pool.*' - type: 'gauge' - unit: 'GB' + - name: "volume.provider.pool.capacity.virtual_free" + event_type: "capacity.pool.*" + type: "gauge" + unit: "GB" volume: $.payload.virtual_free resource_id: $.payload.name_to_id metadata: - <<: *provider_pool_meta + <<: *provider_pool_meta - - name: 'volume.size' + - name: "volume.size" event_type: - - 'volume.exists' - - 'volume.retype' - - 'volume.create.*' - - 'volume.delete.*' - - 'volume.resize.*' - - 'volume.attach.*' - - 'volume.detach.*' - - 'volume.update.*' - - 'volume.manage.*' - type: 'gauge' - unit: 'GB' + - "volume.exists" + - "volume.retype" + - "volume.create.*" + - "volume.delete.*" + - "volume.resize.*" + - "volume.attach.*" + - "volume.detach.*" + - "volume.update.*" + - "volume.manage.*" + type: "gauge" + unit: "GB" volume: $.payload.size user_id: $.payload.user_id project_id: $.payload.tenant_id @@ -1300,29 +1323,30 @@ conf: image_id: $.payload.glance_metadata[?key=image_id].value instance_id: $.payload.volume_attachment[0].instance_uuid - - name: 'snapshot.size' + - name: "snapshot.size" event_type: - - 'snapshot.exists' - - 'snapshot.create.*' - - 'snapshot.delete.*' - - 'snapshot.manage.*' - type: 'gauge' - unit: 'GB' + - "snapshot.exists" + - "snapshot.create.*" + - "snapshot.delete.*" + - "snapshot.manage.*" + type: "gauge" + unit: "GB" volume: $.payload.volume_size user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.snapshot_id metadata: display_name: $.payload.display_name + volume_id: $.payload.volume_id - - name: 'backup.size' + - name: "backup.size" event_type: - - 'backup.exists' - - 'backup.create.*' - - 'backup.delete.*' - - 'backup.restore.*' - type: 'gauge' - unit: 'GB' + - "backup.exists" + - "backup.create.*" + - "backup.delete.*" + - "backup.restore.*" + type: "gauge" + unit: "GB" volume: $.payload.size user_id: $.payload.user_id project_id: $.payload.tenant_id @@ -1332,30 +1356,30 @@ conf: # Magnum - name: $.payload.metrics.[*].name - event_type: 'magnum.bay.metrics.*' - type: 'gauge' + event_type: "magnum.bay.metrics.*" + type: "gauge" unit: $.payload.metrics.[*].unit volume: $.payload.metrics.[*].value user_id: $.payload.user_id project_id: $.payload.project_id resource_id: $.payload.resource_id - lookup: ['name', 'unit', 'volume'] + lookup: ["name", "unit", "volume"] # Swift - name: $.payload.measurements.[*].metric.[*].name - event_type: 'objectstore.http.request' - type: 'delta' + event_type: "objectstore.http.request" + type: "delta" unit: $.payload.measurements.[*].metric.[*].unit volume: $.payload.measurements.[*].result resource_id: $.payload.target.id user_id: $.payload.initiator.id project_id: $.payload.initiator.project_id - lookup: ['name', 'unit', 'volume'] + lookup: ["name", "unit", "volume"] - - name: 'memory' + - name: "memory" event_type: &instance_events compute.instance.(?!create.start|update).* - type: 'gauge' - unit: 'MB' + type: "gauge" + unit: "MB" volume: $.payload.memory_mb user_id: $.payload.user_id project_id: $.payload.tenant_id @@ -1371,67 +1395,67 @@ conf: created_at: $.payload.created_at deleted_at: $.payload.deleted_at - - name: 'vcpus' + - name: "vcpus" event_type: *instance_events - type: 'gauge' - unit: 'vcpu' + type: "gauge" + unit: "vcpu" volume: $.payload.vcpus user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: - <<: *instance_meta + <<: *instance_meta - - name: 'compute.instance.booting.time' - event_type: 'compute.instance.create.end' - type: 'gauge' - unit: 'sec' + - name: "compute.instance.booting.time" + event_type: "compute.instance.create.end" + type: "gauge" + unit: "sec" volume: fields: [$.payload.created_at, $.payload.launched_at] - plugin: 'timedelta' + plugin: "timedelta" project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: <<: *instance_meta - - name: 'disk.root.size' + - name: "disk.root.size" event_type: *instance_events - type: 'gauge' - unit: 'GB' + type: "gauge" + unit: "GB" volume: $.payload.root_gb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: - <<: *instance_meta + <<: *instance_meta - - name: 'disk.ephemeral.size' + - name: "disk.ephemeral.size" event_type: *instance_events - type: 'gauge' - unit: 'GB' + type: "gauge" + unit: "GB" volume: $.payload.ephemeral_gb user_id: $.payload.user_id project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_metadata: $.payload.metadata metadata: - <<: *instance_meta + <<: *instance_meta - - name: 'bandwidth' - event_type: 'l3.meter' - type: 'delta' - unit: 'B' + - name: "bandwidth" + event_type: "l3.meter" + type: "delta" + unit: "B" volume: $.payload.bytes project_id: $.payload.tenant_id resource_id: $.payload.label_id - - name: 'compute.node.cpu.frequency' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'MHz' + - name: "compute.node.cpu.frequency" + event_type: "compute.metrics.update" + type: "gauge" + unit: "MHz" volume: $.payload.metrics[?(@.name='cpu.frequency')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.frequency')].timestamp @@ -1440,10 +1464,10 @@ conf: host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.frequency')].source - - name: 'compute.node.cpu.user.time' - event_type: 'compute.metrics.update' - type: 'cumulative' - unit: 'ns' + - name: "compute.node.cpu.user.time" + event_type: "compute.metrics.update" + type: "cumulative" + unit: "ns" volume: $.payload.metrics[?(@.name='cpu.user.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.user.time')].timestamp @@ -1452,10 +1476,10 @@ conf: host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.user.time')].source - - name: 'compute.node.cpu.kernel.time' - event_type: 'compute.metrics.update' - type: 'cumulative' - unit: 'ns' + - name: "compute.node.cpu.kernel.time" + event_type: "compute.metrics.update" + type: "cumulative" + unit: "ns" volume: $.payload.metrics[?(@.name='cpu.kernel.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.kernel.time')].timestamp @@ -1464,10 +1488,10 @@ conf: host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.kernel.time')].source - - name: 'compute.node.cpu.idle.time' - event_type: 'compute.metrics.update' - type: 'cumulative' - unit: 'ns' + - name: "compute.node.cpu.idle.time" + event_type: "compute.metrics.update" + type: "cumulative" + unit: "ns" volume: $.payload.metrics[?(@.name='cpu.idle.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.idle.time')].timestamp @@ -1476,10 +1500,10 @@ conf: host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.idle.time')].source - - name: 'compute.node.cpu.iowait.time' - event_type: 'compute.metrics.update' - type: 'cumulative' - unit: 'ns' + - name: "compute.node.cpu.iowait.time" + event_type: "compute.metrics.update" + type: "cumulative" + unit: "ns" volume: $.payload.metrics[?(@.name='cpu.iowait.time')].value resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.iowait.time')].timestamp @@ -1488,10 +1512,10 @@ conf: host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.iowait.time')].source - - name: 'compute.node.cpu.kernel.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' + - name: "compute.node.cpu.kernel.percent" + event_type: "compute.metrics.update" + type: "gauge" + unit: "percent" volume: $.payload.metrics[?(@.name='cpu.kernel.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.kernel.percent')].timestamp @@ -1500,10 +1524,10 @@ conf: host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.kernel.percent')].source - - name: 'compute.node.cpu.idle.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' + - name: "compute.node.cpu.idle.percent" + event_type: "compute.metrics.update" + type: "gauge" + unit: "percent" volume: $.payload.metrics[?(@.name='cpu.idle.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.idle.percent')].timestamp @@ -1512,10 +1536,10 @@ conf: host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.idle.percent')].source - - name: 'compute.node.cpu.user.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' + - name: "compute.node.cpu.user.percent" + event_type: "compute.metrics.update" + type: "gauge" + unit: "percent" volume: $.payload.metrics[?(@.name='cpu.user.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.user.percent')].timestamp @@ -1524,10 +1548,10 @@ conf: host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.user.percent')].source - - name: 'compute.node.cpu.iowait.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' + - name: "compute.node.cpu.iowait.percent" + event_type: "compute.metrics.update" + type: "gauge" + unit: "percent" volume: $.payload.metrics[?(@.name='cpu.iowait.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.iowait.percent')].timestamp @@ -1536,10 +1560,10 @@ conf: host: $.publisher_id source: $.payload.metrics[?(@.name='cpu.iowait.percent')].source - - name: 'compute.node.cpu.percent' - event_type: 'compute.metrics.update' - type: 'gauge' - unit: 'percent' + - name: "compute.node.cpu.percent" + event_type: "compute.metrics.update" + type: "gauge" + unit: "percent" volume: $.payload.metrics[?(@.name='cpu.percent')].value * 100 resource_id: $.payload.host + "_" + $.payload.nodename timestamp: $.payload.metrics[?(@.name='cpu.percent')].timestamp @@ -1551,22 +1575,23 @@ conf: # Identity # NOTE(gordc): hack because jsonpath-rw-ext can't concat starting with string. - name: $.payload.outcome.`sub(/.*/, )` + 'identity.authenticate.' + $.payload.outcome - type: 'delta' - unit: 'user' + type: "delta" + unit: "user" volume: 1 event_type: - - 'identity.authenticate' + - "identity.authenticate" resource_id: $.payload.initiator.id user_id: $.payload.initiator.id # DNS - - name: 'dns.domain.exists' - event_type: 'dns.domain.exists' - type: 'cumulative' - unit: 's' + - name: "dns.domain.exists" + event_type: "dns.domain.exists" + type: "cumulative" + unit: "s" volume: - fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] - plugin: 'timedelta' + fields: + [$.payload.audit_period_beginning, $.payload.audit_period_ending] + plugin: "timedelta" project_id: $.payload.tenant_id resource_id: $.payload.id user_id: $.ctxt.user @@ -1576,13 +1601,14 @@ conf: host: $.publisher_id # Trove - - name: 'trove.instance.exists' - event_type: 'trove.instance.exists' - type: 'cumulative' - unit: 's' + - name: "trove.instance.exists" + event_type: "trove.instance.exists" + type: "cumulative" + unit: "s" volume: - fields: [$.payload.audit_period_beginning, $.payload.audit_period_ending] - plugin: 'timedelta' + fields: + [$.payload.audit_period_beginning, $.payload.audit_period_ending] + plugin: "timedelta" project_id: $.payload.tenant_id resource_id: $.payload.instance_id user_id: $.payload.user_id @@ -1594,14 +1620,14 @@ conf: instance_type_id: $.payload.instance_type_id # Manila - - name: 'manila.share.size' + - name: "manila.share.size" event_type: - - 'share.create.*' - - 'share.delete.*' - - 'share.extend.*' - - 'share.shrink.*' - type: 'gauge' - unit: 'GB' + - "share.create.*" + - "share.delete.*" + - "share.extend.*" + - "share.shrink.*" + type: "gauge" + unit: "GB" volume: $.payload.size user_id: $.payload.user_id project_id: $.payload.project_id @@ -1637,93 +1663,11 @@ conf: - gnocchi:// sources: - events: - - '*' + - "*" name: event_source sinks: - event_sink - policy: {} - audit_api_map: - DEFAULT: - target_endpoint_type: None - path_keywords: - meters: meter_name - resources: resource_id - statistics: None - samples: sample_id - service_endpoints: - metering: service/metering - rally_tests: - CeilometerStats.create_meter_and_get_stats: - - args: - user_id: user-id - resource_id: resource-id - counter_volume: 1 - counter_unit: '' - counter_type: cumulative - runner: - type: constant - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - CeilometerMeters.list_meters: - - runner: - type: constant - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - context: - ceilometer: - counter_name: benchmark_meter - counter_type: gauge - counter_unit: "%" - counter_volume: 1 - resources_per_tenant: 1 - samples_per_resource: 1 - timestamp_interval: 10 - metadata_list: - - status: active - name: rally benchmark on - deleted: 'false' - - status: terminated - name: rally benchmark off - deleted: 'true' - args: - limit: 5 - metadata_query: - status: terminated - CeilometerQueries.create_and_query_samples: - - args: - filter: - "=": - counter_unit: instance - orderby: - limit: 10 - counter_name: cpu_util - counter_type: gauge - counter_unit: instance - counter_volume: 1 - resource_id: resource_id - runner: - type: constant - times: 1 - concurrency: 1 - sla: - failure_rate: - max: 0 - dependencies: - dynamic: - common: - local_image_registry: - jobs: - - ceilometer-image-repo-sync - services: - - endpoint: node - service: local_image_registry static: central: jobs: @@ -1758,18 +1702,6 @@ dependencies: db_sync: jobs: [] services: [] - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity - rabbit_init: - services: - - service: oslo_messaging - endpoint: internal notification: jobs: - ceilometer-db-sync @@ -1780,104 +1712,12 @@ dependencies: service: identity - endpoint: internal service: metric - tests: - services: - - endpoint: internal - service: identity - - endpoint: internal - service: metering - - endpoint: internal - service: metric - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - -# Names of secrets used by bootstrap and environmental checks -secrets: - identity: - admin: ceilometer-keystone-admin - ceilometer: ceilometer-keystone-user - test: ceilometer-keystone-test - oslo_messaging: - admin: ceilometer-rabbitmq-admin - ceilometer: ceilometer-rabbitmq-user - oci_image_registry: - ceilometer: ceilometer-oci-image-registry - -bootstrap: - enabled: false - ks_user: ceilometer - script: | - openstack token issue # typically overridden by environmental # values, but should include all endpoints # required by this chart endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - ceilometer: - username: ceilometer - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null - identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - ceilometer: - role: admin - region_name: RegionOne - username: ceilometer - password: password - project_name: service - user_domain_name: service - project_domain_name: service - test: - role: admin - region_name: RegionOne - username: ceilometer-test - password: password - project_name: test - user_domain_name: service - project_domain_name: service - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: 'http' + metering: port: api: default: 5000 @@ -1885,74 +1725,16 @@ endpoints: internal: 5000 service: 5000 metric: - name: gnocchi - hosts: - default: gnocchi-api - public: gnocchi - host_fqdn_override: - default: null - path: - default: null - scheme: - default: 'http' port: api: default: 8041 public: 80 internal: 8041 service: 8041 - alarming: - name: aodh - hosts: - default: aodh-api - public: aodh - host_fqdn_override: - default: null - path: - default: null - scheme: - default: 'http' - port: - api: - default: 8042 - public: 80 - oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null - hosts: - default: memcached - host_fqdn_override: - default: null - port: - memcache: - default: 11211 oslo_messaging: - auth: - admin: - username: rabbitmq - password: password - ceilometer: - username: ceilometer - password: password - statefulset: - replicas: 2 - name: rabbitmq-rabbitmq - hosts: - default: rabbitmq host_fqdn_override: default: rabbitmq.openstack.svc.cluster.local - path: /ceilometer - scheme: rabbit - port: - amqp: - default: 5672 - http: - default: 15672 + fluentd: namespace: fluentbit name: fluentd @@ -1962,61 +1744,13 @@ endpoints: default: null path: default: null - scheme: 'http' + scheme: "http" port: service: default: 24224 metrics: default: 24220 pod: - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - tolerations: - ceilometer: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - mounts: - ceilometer_tests: - init_container: null - ceilometer_tests: - volumeMounts: - volumes: - ceilometer_compute: - init_container: null - ceilometer_compute: - volumeMounts: - volumes: - ceilometer_central: - init_container: null - ceilometer_central: - volumeMounts: - volumes: - ceilometer_ipmi: - init_container: null - ceilometer_ipmi: - volumeMounts: - volumes: - ceilometer_notification: - init_container: null - ceilometer_notification: - volumeMounts: - volumes: - ceilometer_db_sync: - ceilometer_db_sync: - volumeMounts: - volumes: replicas: central: 1 notification: 1 @@ -2064,87 +1798,15 @@ pod: limits: memory: "6144Mi" cpu: "2000m" - jobs: - db_sync: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - rabbit_init: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_service: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_user: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - tests: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - image_repo_sync: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - -network_policy: - ceilometer: - ingress: - - {} - egress: - - {} manifests: - configmap_bin: true - configmap_etc: true deployment_api: false - deployment_central: true deployment_collector: false - daemonset_compute: true - daemonset_ipmi: false - deployment_notification: true ingress_api: false - job_bootstrap: true - job_db_drop: false # using gnocchi so no db init job_db_init: false job_db_init_mongodb: false - # runs ceilometer-upgrade which inits resource types in gnocchi! - job_db_sync: true - job_image_repo_sync: true job_ks_endpoints: false job_ks_service: false - job_ks_user: true - job_rabbit_init: true - pdb_api: true - pod_rally_test: true - network_policy: false - secret_db: true - secret_keystone: true secret_mongodb: false - secret_rabbitmq: true - secret_registry: true - service_api: true service_ingress_api: false -... diff --git a/base-helm-configs/cinder/cinder-helm-overrides.yaml b/base-helm-configs/cinder/cinder-helm-overrides.yaml index dd7d994ad..dc09f14a1 100644 --- a/base-helm-configs/cinder/cinder-helm-overrides.yaml +++ b/base-helm-configs/cinder/cinder-helm-overrides.yaml @@ -1,1484 +1,229 @@ +--- storage: lvm labels: - api: - node_selector_key: openstack-control-plane - node_selector_value: enabled - backup: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - scheduler: - node_selector_key: openstack-control-plane - node_selector_value: enabled - test: null - # test: - # node_selector_key: openstack-control-plane - # node_selector_value: enabled volume: node_selector_key: openstack-storage-node - node_selector_value: enabled - -release_group: null images: tags: - db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - cinder_db_sync: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" - db_drop: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - cinder_api: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + cinder_api: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" + cinder_backup: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" + cinder_backup_storage_init: "quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy" + cinder_db_sync: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" cinder_scheduler: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" + cinder_storage_init: "quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy" cinder_volume: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" cinder_volume_usage_audit: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" - cinder_storage_init: "quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy" - cinder_backup: "quay.io/rackspace/rackerlabs-cinder:2024.1-ubuntu_jammy" - cinder_backup_storage_init: "quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy" - test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" - rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" + db_drop: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -jobs: - volume_usage_audit: - cron: "5 * * * *" - starting_deadline: 600 - history: - success: 3 - failed: 1 + ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" + test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" pod: security_context: - volume_usage_audit: - pod: - runAsUser: 42424 - container: - cinder_volume_usage_audit: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false cinder_api: - pod: - runAsUser: 42424 container: - ceph_coordination_volume_perms: - runAsUser: 0 - readOnlyRootFilesystem: true cinder_api: - readOnlyRootFilesystem: true allowPrivilegeEscalation: true privileged: true cinder_backup: - pod: - runAsUser: 42424 container: - ceph_backup_keyring_placement: - runAsUser: 0 - readOnlyRootFilesystem: true - ceph_keyring_placement: - runAsUser: 0 - readOnlyRootFilesystem: true - ceph_backup_volume_perms: - runAsUser: 0 - readOnlyRootFilesystem: true - ceph_coordination_volume_perms: - runAsUser: 0 - readOnlyRootFilesystem: true cinder_backup: - # capabilities: - # add: - # - SYS_ADMIN privileged: true - readOnlyRootFilesystem: true - runAsUser: 0 - cinder_scheduler: - pod: - runAsUser: 42424 - container: - ceph_coordination_volume_perms: - runAsUser: 0 - readOnlyRootFilesystem: true - cinder_scheduler: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false cinder_volume: - pod: - runAsUser: 42424 container: - ceph_keyring_placement: - runAsUser: 0 - readOnlyRootFilesystem: true - ceph_coordination_volume_perms: - runAsUser: 0 - readOnlyRootFilesystem: true - init_cinder_conf: - runAsUser: 0 - readOnlyRootFilesystem: true cinder_volume: - # capabilities: - # add: - # - SYS_ADMIN - readOnlyRootFilesystem: true privileged: true - storage_init: - pod: - runAsUser: 42424 - container: - ceph_keyring_placement: - runAsUser: 0 - readOnlyRootFilesystem: true - cinder_backup_storage_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - clean: - pod: - runAsUser: 42424 - # container: - # cinder_volume_rbd_secret_clean: - # readOnlyRootFilesystem: true - # allowPrivilegeEscalation: false - create_internal_tenant: - pod: - runAsUser: 42424 - container: - create_internal_tenant: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - tolerations: - cinder: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule useHostNetwork: - volume: true backup: true - mounts: - cinder_api: - init_container: null - cinder_api: - volumeMounts: - volumes: - cinder_scheduler: - init_container: null - cinder_scheduler: - volumeMounts: - volumes: - cinder_volume: - init_container: null - cinder_volume: - volumeMounts: - volumes: - cinder_volume_usage_audit: - init_container: null - cinder_volume_usage_audit: - volumeMounts: - volumes: - cinder_backup: - init_container: null - cinder_backup: - volumeMounts: - volumes: - cinder_tests: - init_container: null - cinder_tests: - volumeMounts: - volumes: - cinder_db_sync: - cinder_db_sync: - volumeMounts: - volumes: - replicas: - api: 1 - volume: 1 - scheduler: 1 - backup: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - disruption_budget: - api: - min_available: 0 - termination_grace_period: - api: - timeout: 30 - resources: - enabled: true - api: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - scheduler: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - volume: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - volume_usage_audit: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - bootstrap: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - rabbit_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_drop: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - clean: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - backup_storage_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - storage_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_endpoints: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_service: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_user: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - tests: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - -bootstrap: - enabled: true - ks_user: admin - bootstrap_conf_backends: true - volume_types: - name: - group: - volume_backend_name: - # access_type: "private" - # If you set up access_type to private, only the creator - # will get an access to the volume type. You can extend - # the access to your volume type by providing a list of - # domain names and projects as shown below - # grant_access: - # : - # - - # - - # <...> - # : - # - - # <...> - # Volume QoS if any. By default, None QoS is created. - # Below values with a number at the end need to be replaced - # with real names. - # volume_qos: - # qos_name_1: - # consumer: front-end - # properties: - # key_1: value_1 - # key_2: value_2 - # associates: - # - volume_type_1 - # - volume_type_2 - -network: - api: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 30877 + volume: true + oslo_db: + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local + hosts: + default: mariadb-cluster-primary + oslo_cache: + host_fqdn_override: + default: memcached.openstack.svc.cluster.local + hosts: + default: memcached + oslo_messaging: + host_fqdn_override: + default: rabbitmq.openstack.svc.cluster.local + hosts: + default: rabbitmq-nodes -ceph_client: - # enable this when there is a need to create second ceph backed pointing - # to external ceph cluster - enable_external_ceph_backend: false -# # change this in case of first ceph backend name pointing to internal ceph cluster -# # is diffrent -# internal_ceph_backend: rbd1 -# configmap: ceph-etc -# user_secret_name: pvc-ceph-client-key -# external_ceph: -# # Only when enable_external_ceph_backend is true and rbd_user is NOT null -# # secret for external ceph keyring will be created. -# rbd_user: null -# rbd_user_keyring: null -# configmap: null -# conf: -# global: null -# osd: null conf: - paste: - composite:osapi_volume: - use: call:cinder.api:root_app_factory - /: apiversions - /v1: openstack_volume_api_v1 - /v2: openstack_volume_api_v2 - /v3: openstack_volume_api_v3 - composite:openstack_volume_api_v1: - use: call:cinder.api.middleware.auth:pipeline_factory - noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv1 - keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv1 - keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv1 - composite:openstack_volume_api_v2: - use: call:cinder.api.middleware.auth:pipeline_factory - noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv2 - keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv2 - keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv2 - composite:openstack_volume_api_v3: - use: call:cinder.api.middleware.auth:pipeline_factory - noauth: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler noauth apiv3 - keystone: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv3 - keystone_nolimit: cors http_proxy_to_wsgi request_id faultwrap sizelimit osprofiler authtoken audit keystonecontext apiv3 - filter:request_id: - paste.filter_factory: oslo_middleware.request_id:RequestId.factory - filter:http_proxy_to_wsgi: - paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory - filter:cors: - paste.filter_factory: oslo_middleware.cors:filter_factory - oslo_config_project: cinder - filter:faultwrap: - paste.filter_factory: cinder.api.middleware.fault:FaultWrapper.factory - filter:osprofiler: - paste.filter_factory: osprofiler.web:WsgiMiddleware.factory - filter:noauth: - paste.filter_factory: cinder.api.middleware.auth:NoAuthMiddleware.factory - filter:sizelimit: - paste.filter_factory: oslo_middleware.sizelimit:RequestBodySizeLimiter.factory - app:apiv1: - paste.app_factory: cinder.api.v1.router:APIRouter.factory - app:apiv2: - paste.app_factory: cinder.api.v2.router:APIRouter.factory - app:apiv3: - paste.app_factory: cinder.api.v3.router:APIRouter.factory - pipeline:apiversions: - pipeline: cors http_proxy_to_wsgi faultwrap osvolumeversionapp - app:osvolumeversionapp: - paste.app_factory: cinder.api.versions:Versions.factory - filter:keystonecontext: - paste.filter_factory: cinder.api.middleware.auth:CinderKeystoneContext.factory - filter:authtoken: - paste.filter_factory: keystonemiddleware.auth_token:filter_factory - filter:audit: - paste.filter_factory: keystonemiddleware.audit:filter_factory - audit_map_file: /etc/cinder/api_audit_map.conf - policy: {} - api_audit_map: - DEFAULT: - target_endpoint_type: None - custom_actions: - associate: update/associate - disassociate: update/disassociate_all - disassociate_all: update/disassociate_all - associations: read/list/associations - path_keywords: - defaults: None - detail: None - limits: None - os-quota-specs: project - qos-specs: qos-spec - snapshots: snapshot - types: type - volumes: volume - service_endpoints: - volume: service/storage/block - volumev2: service/storage/block - volumev3: service/storage/block - cinder_sudoers: | - # This sudoers file supports rootwrap for both Kolla and LOCI Images. - Defaults !requiretty - Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin" - cinder ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/cinder-rootwrap /etc/cinder/rootwrap.conf *, /var/lib/openstack/bin/cinder-rootwrap /etc/cinder/rootwrap.conf * - rootwrap: | - # Configuration for cinder-rootwrap - # This file should be owned by (and only-writeable by) the root user - - [DEFAULT] - # List of directories to load filter definitions from (separated by ','). - # These directories MUST all be only writeable by root ! - filters_path=/etc/cinder/rootwrap.d - - # List of directories to search executables in, in case filters do not - # explicitely specify a full path (separated by ',') - # If not specified, defaults to system PATH environment variable. - # These directories MUST all be only writeable by root ! - exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin - - # Enable logging to syslog - # Default value is False - use_syslog=False - - # Which syslog facility to use. - # Valid values include auth, authpriv, syslog, local0, local1... - # Default value is 'syslog' - syslog_log_facility=syslog - - # Which messages to log. - # INFO means log all usage - # ERROR means only log unsuccessful attempts - syslog_log_level=ERROR - rootwrap_filters: - volume: - pods: - - volume - content: | - # cinder-rootwrap command filters for volume nodes - # This file should be owned by (and only-writeable by) the root user - - [Filters] - # cinder/volume/iscsi.py: iscsi_helper '--op' ... - ietadm: CommandFilter, ietadm, root - tgtadm: CommandFilter, tgtadm, root - iscsictl: CommandFilter, iscsictl, root - tgt-admin: CommandFilter, tgt-admin, root - cinder-rtstool: CommandFilter, cinder-rtstool, root - scstadmin: CommandFilter, scstadmin, root - - # LVM related show commands - pvs: EnvFilter, env, root, LC_ALL=C, pvs - vgs: EnvFilter, env, root, LC_ALL=C, vgs - lvs: EnvFilter, env, root, LC_ALL=C, lvs - lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay - - # -LVM related show commands with suppress fd warnings - pvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs - vgs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs - lvs_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs - lvdisplay_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay - - - # -LVM related show commands conf var - pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, pvs - vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, vgs - lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvs - lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvdisplay - - # -LVM conf var with suppress fd_warnings - pvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, pvs - vgs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, vgs - lvs_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvs - lvdisplay_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvdisplay - - # os-brick library commands - # os_brick.privileged.run_as_root oslo.privsep context - # This line ties the superuser privs with the config files, context name, - # and (implicitly) the actual python code invoked. - privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.* - # The following and any cinder/brick/* entries should all be obsoleted - # by privsep, and may be removed once the os-brick version requirement - # is updated appropriately. - scsi_id: CommandFilter, /lib/udev/scsi_id, root - drbdadm: CommandFilter, drbdadm, root - - # cinder/brick/local_dev/lvm.py: 'vgcreate', vg_name, pv_list - vgcreate: CommandFilter, vgcreate, root - - # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', sizestr, '-n', volume_name,.. - # cinder/brick/local_dev/lvm.py: 'lvcreate', '-L', ... - lvcreate: EnvFilter, env, root, LC_ALL=C, lvcreate - lvcreate_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvcreate - lvcreate_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvcreate - lvcreate_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LVM_SUPPRESS_FD_WARNINGS=, LC_ALL=C, lvcreate - - # cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,... - dd: CommandFilter, dd, root - - # cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ... - lvremove: CommandFilter, lvremove, root - - # cinder/volume/driver.py: 'lvrename', '%(vg)s', '%(orig)s' '(new)s'... - lvrename: CommandFilter, lvrename, root - - # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ... - # cinder/brick/local_dev/lvm.py: 'lvextend', '-L' '%(new_size)s', '%(thin_pool)s' ... - lvextend: EnvFilter, env, root, LC_ALL=C, lvextend - lvextend_lvmconf: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, lvextend - lvextend_fdwarn: EnvFilter, env, root, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend - lvextend_lvmconf_fdwarn: EnvFilter, env, root, LVM_SYSTEM_DIR=, LC_ALL=C, LVM_SUPPRESS_FD_WARNINGS=, lvextend - - # cinder/brick/local_dev/lvm.py: 'lvchange -a y -K ' - lvchange: CommandFilter, lvchange, root - - # cinder/brick/local_dev/lvm.py: 'lvconvert', '--merge', snapshot_name - lvconvert: CommandFilter, lvconvert, root - - # cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',... - # cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ... - iscsiadm: CommandFilter, iscsiadm, root - - # cinder/volume/utils.py: utils.temporary_chown(path, 0) - chown: CommandFilter, chown, root - - # cinder/volume/utils.py: copy_volume(..., ionice='...') - ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7] - ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3] - - # cinder/volume/utils.py: setup_blkio_cgroup() - cgcreate: CommandFilter, cgcreate, root - cgset: CommandFilter, cgset, root - cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\S+ - - # cinder/volume/driver.py - dmsetup: CommandFilter, dmsetup, root - ln: CommandFilter, ln, root - - # cinder/image/image_utils.py - qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img - qemu-img_convert: CommandFilter, qemu-img, root - - udevadm: CommandFilter, udevadm, root - - # cinder/volume/driver.py: utils.read_file_as_root() - cat: CommandFilter, cat, root - - # cinder/volume/nfs.py - stat: CommandFilter, stat, root - mount: CommandFilter, mount, root - df: CommandFilter, df, root - du: CommandFilter, du, root - truncate: CommandFilter, truncate, root - chmod: CommandFilter, chmod, root - rm: CommandFilter, rm, root - - # cinder/volume/drivers/remotefs.py - mkdir: CommandFilter, mkdir, root - - # cinder/volume/drivers/netapp/nfs.py: - netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -name, img-cache.*, -amin, \+\d+ - - # cinder/volume/drivers/glusterfs.py - chgrp: CommandFilter, chgrp, root - umount: CommandFilter, umount, root - fallocate: CommandFilter, fallocate, root - - # cinder/volumes/drivers/hds/hds.py: - hus-cmd: CommandFilter, hus-cmd, root - hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root - - # cinder/volumes/drivers/hds/hnas_backend.py - ssc: CommandFilter, ssc, root - - # cinder/brick/initiator/connector.py: - ls: CommandFilter, ls, root - tee: CommandFilter, tee, root - multipath: CommandFilter, multipath, root - multipathd: CommandFilter, multipathd, root - systool: CommandFilter, systool, root - - # cinder/volume/drivers/block_device.py - blockdev: CommandFilter, blockdev, root - - # cinder/volume/drivers/ibm/gpfs.py - # cinder/volume/drivers/tintri.py - mv: CommandFilter, mv, root - - # cinder/volume/drivers/ibm/gpfs.py - cp: CommandFilter, cp, root - mmgetstate: CommandFilter, /usr/lpp/mmfs/bin/mmgetstate, root - mmclone: CommandFilter, /usr/lpp/mmfs/bin/mmclone, root - mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root - mmchattr: CommandFilter, /usr/lpp/mmfs/bin/mmchattr, root - mmlsconfig: CommandFilter, /usr/lpp/mmfs/bin/mmlsconfig, root - mmlsfs: CommandFilter, /usr/lpp/mmfs/bin/mmlsfs, root - mmlspool: CommandFilter, /usr/lpp/mmfs/bin/mmlspool, root - mkfs: CommandFilter, mkfs, root - mmcrfileset: CommandFilter, /usr/lpp/mmfs/bin/mmcrfileset, root - mmlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmlinkfileset, root - mmunlinkfileset: CommandFilter, /usr/lpp/mmfs/bin/mmunlinkfileset, root - mmdelfileset: CommandFilter, /usr/lpp/mmfs/bin/mmdelfileset, root - mmcrsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmcrsnapshot, root - mmdelsnapshot: CommandFilter, /usr/lpp/mmfs/bin/mmdelsnapshot, root - - # cinder/volume/drivers/ibm/gpfs.py - # cinder/volume/drivers/ibm/ibmnas.py - find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -ignore_readdir_race, -inum, \d+, -print0, -quit - - # cinder/brick/initiator/connector.py: - aoe-revalidate: CommandFilter, aoe-revalidate, root - aoe-discover: CommandFilter, aoe-discover, root - aoe-flush: CommandFilter, aoe-flush, root - - # cinder/brick/initiator/linuxscsi.py: - sg_scan: CommandFilter, sg_scan, root - - #cinder/backup/services/tsm.py - dsmc:CommandFilter,/usr/bin/dsmc,root - - # cinder/volume/drivers/hitachi/hbsd_horcm.py - raidqry: CommandFilter, raidqry, root - raidcom: CommandFilter, raidcom, root - pairsplit: CommandFilter, pairsplit, root - paircreate: CommandFilter, paircreate, root - pairdisplay: CommandFilter, pairdisplay, root - pairevtwait: CommandFilter, pairevtwait, root - horcmstart.sh: CommandFilter, horcmstart.sh, root - horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root - horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr - - # cinder/volume/drivers/hitachi/hbsd_snm2.py - auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman - auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref - auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef - aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1 - auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn - auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap - autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap - aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol - auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd - auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel - auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize - auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser - autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef - autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt - autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini - auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi - audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool - aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal - aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon - - # cinder/volume/drivers/hgst.py - vgc-cluster: CommandFilter, vgc-cluster, root - - # cinder/volume/drivers/vzstorage.py - pstorage-mount: CommandFilter, pstorage-mount, root - pstorage: CommandFilter, pstorage, root - ploop: CommandFilter, ploop, root - - # initiator/connector.py: - drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid - ceph: - override: - append: - monitors: [] - admin_keyring: null - pools: - backup: - replication: 3 - crush_rule: replicated_rule - chunk_size: 8 - app_name: cinder-backup - cinder.volumes: - replication: 3 - crush_rule: replicated_rule - chunk_size: 8 - app_name: cinder-volume + backends: + lvmdriver-1: + image_volume_cache_enabled: true + iscsi_iotype: fileio + iscsi_num_targets: 100 + lvm_type: default + target_helper: tgtadm + target_port: 3260 + target_protocol: iscsi + volume_backend_name: LVM_iSCSI + volume_clear: zero + volume_driver: cinder_rxt.rackspace.RXTLVM + volume_group: cinder-volumes-1 cinder: DEFAULT: - storage_availability_zone: az1 - default_availability_zone: az1 allow_availability_zone_fallback: true - scheduler_default_filters: AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter - volume_usage_audit_period: hour - resource_query_filters_file: /etc/cinder/resource_filters.json - log_config_append: /etc/cinder/logging.conf - use_syslog: false - use_stderr: true - enable_v1_api: false - enable_v2_api: false - volume_name_template: "%s" - osapi_volume_workers: 8 - glance_api_version: 2 - os_region_name: RegionOne - host: cinder-volume-worker - # NOTE(portdirect): the bind port should not be defined, and is manipulated - # via the endpoints section. - osapi_volume_listen_port: null - enabled_backends: "lvmdriver-1" - default_volume_type: "lvmdriver-1" - # NOTE(portdirect): "cinder.backup.drivers.ceph" and - # "cinder.backup.drivers.posix" also supported - # NOTE(rchurch): As of Stein, drivers by class name are required - # - cinder.backup.drivers.swift.SwiftBackupDriver - # - cinder.backup.drivers.ceph.CephBackupDriver - # - cinder.backup.drivers.posix.PosixBackupDriver - backup_driver: "cinder.backup.drivers.swift.SwiftBackupDriver" + backup_compression_algorithm: zstd backup_swift_auth: per_user backup_swift_auth_version: 3 - backup_compression_algorithm: zstd - # # Backup: Ceph RBD options - # backup_ceph_conf: "/etc/ceph/ceph.conf" - # backup_ceph_user: cinderbackup - # backup_ceph_pool: cinder.backups - # Backup: Posix options - backup_posix_path: /var/lib/cinder/backup - auth_strategy: keystone - # Internal tenant id - internal_project_name: internal_cinder - internal_user_name: internal_cinder + default_availability_zone: az1 + default_volume_type: lvmdriver-1 + enabled_backends: lvmdriver-1 + osapi_volume_workers: 2 rootwrap_config: /etc/cinder/rootwrap.conf - use_multipath_for_image_xfer: False #Add Cinder Multipath support for image xfer - database: - max_retries: -1 + scheduler_default_filters: "AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter" + storage_availability_zone: az1 + use_multipath_for_image_xfer: false barbican: barbican_endpoint_type: internal key_manager: backend: barbican keystone_authtoken: - service_token_roles: service - service_token_roles_required: true - auth_version: v3 auth_type: password + auth_version: v3 memcache_security_strategy: ENCRYPT + service_token_roles: service + service_token_roles_required: true service_type: volumev3 - nova: - auth_type: password - auth_version: v3 - interface: internal - oslo_policy: - policy_file: /etc/cinder/policy.yaml - oslo_concurrency: - lock_path: /tmp/cinder - oslo_messaging_notifications: - driver: messagingv2 - oslo_middleware: - enable_proxy_headers_parsing: true oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true rabbit_ha_queues: false rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly rabbit_interval_max: 10 # Send more frequent heartbeats and fail unhealthy nodes faster # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + heartbeat_timeout_threshold: 60 + # DEPRECIATION: (warning) heartbeat_in_pthread will be deprecated in 2024.2 + heartbeat_in_pthread: True + # Setting lower kombu_reconnect_delay should resolve issue with HA failing when one node is down # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 kombu_reconnect_delay: 0.5 - coordination: - backend_url: file:///var/lib/cinder/coordination - service_user: - auth_type: password - send_service_user_token: true + cinder_api_uwsgi: + uwsgi: + processes: 4 + threads: 2 + enable_iscsi: true logging: - loggers: - keys: - - root - - cinder - handlers: - keys: - - stdout - - stderr - - "null" - formatters: - keys: - - context - - default logger_root: - level: INFO handlers: - stdout - logger_cinder: level: INFO - handlers: - - stdout - qualname: cinder - logger_amqp: - level: WARNING - handlers: stderr - qualname: amqp - logger_amqplib: - level: WARNING - handlers: stderr - qualname: amqplib - logger_eventletwsgi: - level: WARNING - handlers: stderr - qualname: eventlet.wsgi.server - logger_sqlalchemy: - level: WARNING - handlers: stderr - qualname: sqlalchemy - logger_boto: - level: WARNING - handlers: stderr - qualname: boto - handler_null: - class: logging.NullHandler - formatter: default - args: () - handler_stdout: - class: StreamHandler - args: (sys.stdout,) - formatter: context - handler_stderr: - class: StreamHandler - args: (sys.stderr,) - formatter: context - formatter_context: - class: oslo_log.formatters.ContextFormatter - datefmt: "%Y-%m-%d %H:%M:%S" - formatter_default: - format: "%(message)s" - datefmt: "%Y-%m-%d %H:%M:%S" rabbitmq: policies: [] - backends: - # Those options will be written to backends.conf as-is. - lvmdriver-1: - volume_group: cinder-volumes-1 - volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver - volume_backend_name: LVM_iSCSI - volume_clear: zero - volume_driver: cinder_rxt.rackspace.RXTLVM - lvm_type: default - image_volume_cache_enabled: True - iscsi_iotype: fileio - iscsi_num_targets: 100 - target_protocol: iscsi - target_helper: tgtadm - target_port: 3260 - rally_tests: - run_tempest: false - clean_up: | - VOLUMES=$(openstack volume list -f value | grep -e "^s_rally_" | awk '{ print $1 }') - if [ -n "$VOLUMES" ]; then - echo $VOLUMES | xargs openstack volume delete - fi - tests: - CinderVolumes.create_and_delete_volume: - - args: - size: 1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - - args: - size: - max: 5 - min: 1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - resource_filters: - volume: - - name - - status - - metadata - - bootable - - migration_status - - availability_zone - - group_id - backup: - - name - - status - - volume_id - snapshot: - - name - - status - - volume_id - - metadata - - availability_zone - group: [] - group_snapshot: - - status - - group_id - attachment: - - volume_id - - status - - instance_id - - attach_status - message: - - resource_uuid - - resource_type - - event_id - - request_id - - message_level - pool: - - name - - volume_type - volume_type: [] - enable_iscsi: true -backup: - # external_ceph_rbd: - # enabled: false - # admin_keyring: null - # configmap: null - # conf: - # global: null - # osd: null - posix: - volume: - class_name: general - size: 10Gi dependencies: - dynamic: - common: - local_image_registry: - jobs: - - cinder-image-repo-sync - services: - - endpoint: node - service: local_image_registry static: api: jobs: - cinder-db-sync - cinder-ks-user - cinder-ks-endpoints - # - cinder-rabbit-init - # - cinder-storage-init - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity backup: jobs: - cinder-db-sync - cinder-ks-user - cinder-ks-endpoints - # - cinder-rabbit-init - # - cinder-storage-init - # - cinder-backup-storage-init - services: - - endpoint: internal - service: identity - - endpoint: internal - service: volume - backup_storage_init: - jobs: null - bootstrap: - services: - - endpoint: internal - service: identity - - endpoint: internal - service: volume - pod: - - requireSameNode: false - labels: - application: cinder - component: volume - clean: - jobs: null - db_drop: - services: - - endpoint: internal - service: oslo_db - db_init: - services: - - endpoint: internal - service: oslo_db db_sync: jobs: null - services: - - endpoint: internal - service: oslo_db - ks_endpoints: - jobs: - - cinder-ks-service - services: - - endpoint: internal - service: identity - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity - rabbit_init: - services: - - service: oslo_messaging - endpoint: internal scheduler: jobs: - cinder-db-sync - cinder-ks-user - cinder-ks-endpoints - # - cinder-rabbit-init - # - cinder-storage-init - services: - - endpoint: internal - service: identity - - endpoint: internal - service: volume - storage_init: - jobs: null - tests: - services: - - endpoint: internal - service: identity - - endpoint: internal - service: volume volume: jobs: - cinder-db-sync - cinder-ks-user - cinder-ks-endpoints - # - cinder-rabbit-init - # - cinder-storage-init - services: - - endpoint: internal - service: identity - - endpoint: internal - service: volume volume_usage_audit: jobs: - cinder-db-sync - cinder-ks-user - cinder-ks-endpoints - # - cinder-rabbit-init - # - cinder-storage-init - services: - - endpoint: internal - service: identity - - endpoint: internal - service: volume - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - create_internal_tenant: - services: - - endpoint: internal - service: identity -# Names of secrets used by bootstrap and environmental checks -secrets: - identity: - admin: cinder-keystone-admin - cinder: cinder-keystone-user - # test: cinder-keystone-test - oslo_db: - admin: mariadb - cinder: cinder-db-password - rbd: - backup: cinder-backup-rbd-keyring - volume: cinder-volume-rbd-keyring - volume_external: cinder-volume-external-rbd-keyring - oslo_messaging: - admin: rabbitmq-default-user - cinder: cinder-rabbitmq-user - tls: - volume: - api: - public: cinder-tls-public - internal: cinder-tls-api - oci_image_registry: - cinder: cinder-oci-image-registry - -# We use a different layout of the endpoints here to account for versioning -# this swaps the service name and type, and should be rolled out to other -# services. endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - cinder: - username: cinder - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null + fluentd: + namespace: fluentbit identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - cinder: - role: admin,service - region_name: RegionOne - username: cinder - password: password - project_name: service - user_domain_name: service - project_domain_name: service - # test: - # role: admin - # region_name: RegionOne - # username: cinder-test - # password: password - # project_name: test - # user_domain_name: service - # project_domain_name: service - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: http port: api: default: 5000 - public: 80 internal: 5000 + public: 80 service: 5000 image: - name: glance - hosts: - default: glance-api - public: glance - host_fqdn_override: - default: null - path: - default: null - scheme: - default: http port: api: default: 9292 - public: 80 internal: 9292 + public: 80 service: 9292 - volume: - name: cinder + oslo_db: + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: - default: cinder-api - public: cinder + default: mariadb-cluster-primary + oslo_cache: host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: '/v1/%(tenant_id)s' - scheme: - default: 'http' + default: memcached.openstack.svc.cluster.local + hosts: + default: memcached + oslo_messaging: + host_fqdn_override: + default: rabbitmq.openstack.svc.cluster.local + hosts: + default: rabbitmq-nodes + volume: port: api: default: 8776 - public: 80 internal: 8776 + public: 80 service: 8776 volumev2: - name: cinderv2 - hosts: - default: cinder-api - public: cinder - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: '/v2/%(tenant_id)s' - scheme: - default: 'http' port: api: default: 8776 - public: 80 internal: 8776 + public: 80 service: 8776 volumev3: - name: cinderv3 - hosts: - default: cinder-api - public: cinder - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: '/v3/%(tenant_id)s' - scheme: - default: 'http' port: api: default: 8776 - public: 80 internal: 8776 + public: 80 service: 8776 - oslo_db: - auth: - admin: - username: root - password: password - secret: - tls: - internal: mariadb-tls-direct - cinder: - username: cinder - password: password - hosts: - default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /cinder - scheme: mysql+pymysql - port: - mysql: - default: 3306 - oslo_messaging: - auth: - admin: - username: rabbitmq - password: password - secret: - tls: - internal: rabbitmq-tls-direct - cinder: - username: cinder - password: password - statefulset: - replicas: 3 - name: rabbitmq-server - hosts: - default: rabbitmq-nodes - host_fqdn_override: - default: rabbitmq.openstack.svc.cluster.local - path: /cinder - scheme: rabbit - port: - amqp: - default: 5672 - http: - default: 15672 - oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null - hosts: - default: memcached - host_fqdn_override: - default: null - port: - memcache: - default: 11211 - fluentd: - namespace: fluentbit - name: fluentd - hosts: - default: fluentd-logging - host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 - kube_dns: - namespace: kube-system - name: kubernetes-dns - hosts: - default: kube-dns - host_fqdn_override: - default: null - path: - default: null - scheme: http - port: - dns: - default: 53 - protocol: UDP - ingress: - namespace: null - name: ingress - hosts: - default: ingress - port: - ingress: - default: 80 - -network_policy: - cinder: - ingress: - - {} - egress: - - {} - -# NOTE(helm_hook): helm_hook might break for helm2 binary. -# set helm3_hook: false when using the helm2 binary. -helm3_hook: true - -tls: - identity: false - oslo_messaging: false - oslo_db: false manifests: - certificates: false - configmap_bin: true - configmap_etc: true - cron_volume_usage_audit: true - deployment_api: true deployment_backup: false - deployment_scheduler: true deployment_volume: false ingress_api: false - job_backup_storage_init: true job_bootstrap: false - job_clean: true - job_create_internal_tenant: true job_db_init: false - job_image_repo_sync: true job_rabbit_init: false - job_db_sync: true - job_db_drop: false - job_ks_endpoints: true - job_ks_service: true - job_ks_user: true job_storage_init: false - pdb_api: true pod_rally_test: false - pvc_backup: true - network_policy: false secret_db: false secret_ingress_tls: false - secret_keystone: true secret_rabbitmq: false - secret_registry: true - service_api: true service_ingress_api: false diff --git a/base-helm-configs/designate/designate-helm-overrides.yaml b/base-helm-configs/designate/designate-helm-overrides.yaml index 184909d3b..8ad0a9dda 100644 --- a/base-helm-configs/designate/designate-helm-overrides.yaml +++ b/base-helm-configs/designate/designate-helm-overrides.yaml @@ -477,22 +477,19 @@ conf: driver: messagingv2 oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true rabbit_ha_queues: false rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly rabbit_interval_max: 10 # Send more frequent heartbeats and fail unhealthy nodes faster # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + heartbeat_timeout_threshold: 60 + # DEPRECIATION: (warning) heartbeat_in_pthread will be deprecated in 2024.2 + heartbeat_in_pthread: True + # Setting lower kombu_reconnect_delay should resolve issue with HA failing when one node is down # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 kombu_reconnect_delay: 0.5 @@ -656,7 +653,7 @@ endpoints: path: default: / scheme: - default: 'http' + default: "http" port: api: default: 9001 @@ -671,7 +668,7 @@ endpoints: path: default: null scheme: - default: 'tcp' + default: "tcp" port: ipc: default: 5354 @@ -754,7 +751,7 @@ endpoints: default: null path: default: null - scheme: 'http' + scheme: "http" port: service: default: 24224 @@ -791,4 +788,3 @@ manifests: service_api: true service_mdns: true service_ingress_api: false -... diff --git a/base-helm-configs/envoyproxy-gateway/envoy-gateway-helm-overrides.yaml b/base-helm-configs/envoyproxy-gateway/envoy-gateway-helm-overrides.yaml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/base-helm-configs/envoyproxy-gateway/envoy-gateway-helm-overrides.yaml @@ -0,0 +1 @@ +--- diff --git a/base-helm-configs/glance/glance-helm-overrides.yaml b/base-helm-configs/glance/glance-helm-overrides.yaml index cb67cdcfa..53a0a9476 100644 --- a/base-helm-configs/glance/glance-helm-overrides.yaml +++ b/base-helm-configs/glance/glance-helm-overrides.yaml @@ -1,19 +1,7 @@ +--- # radosgw, rbd, swift or pvc storage: pvc -labels: - api: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - test: - node_selector_key: openstack-control-plane - node_selector_value: enabled - -release_group: null - images: tags: test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" @@ -31,12 +19,6 @@ images: bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync bootstrap: enabled: true @@ -71,167 +53,9 @@ network_policy: - {} conf: - software: - rbd: - rbd_store_pool_app_name: glance-image - rally_tests: - run_tempest: false - tests: - GlanceImages.create_and_delete_image: - - args: - container_format: bare - disk_format: qcow2 - image_location: http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - GlanceImages.create_and_list_image: - - args: - container_format: bare - disk_format: qcow2 - image_location: http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - ceph: - monitors: [] - admin_keyring: null - override: - append: - ceph_client: - override: - append: - paste: - pipeline:glance-api: - pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context rootapp - pipeline:glance-api-caching: - pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context cache rootapp - pipeline:glance-api-cachemanagement: - pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler unauthenticated-context cache cachemanage rootapp - pipeline:glance-api-keystone: - pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken audit context rootapp - pipeline:glance-api-keystone+caching: - pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken audit context cache rootapp - pipeline:glance-api-keystone+cachemanagement: - pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler authtoken audit context cache cachemanage rootapp - pipeline:glance-api-trusted-auth: - pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler context rootapp - pipeline:glance-api-trusted-auth+cachemanagement: - pipeline: cors healthcheck http_proxy_to_wsgi versionnegotiation osprofiler context cache cachemanage rootapp - composite:rootapp: - paste.composite_factory: glance.api:root_app_factory - /: apiversions - /v1: apiv1app - /v2: apiv2app - app:apiversions: - paste.app_factory: glance.api.versions:create_resource - app:apiv1app: - paste.app_factory: glance.api.v1.router:API.factory - app:apiv2app: - paste.app_factory: glance.api.v2.router:API.factory - filter:healthcheck: - paste.filter_factory: oslo_middleware:Healthcheck.factory - backends: disable_by_file - disable_by_file_path: /etc/glance/healthcheck_disable - filter:versionnegotiation: - paste.filter_factory: glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory - filter:cache: - paste.filter_factory: glance.api.middleware.cache:CacheFilter.factory - filter:cachemanage: - paste.filter_factory: glance.api.middleware.cache_manage:CacheManageFilter.factory - filter:context: - paste.filter_factory: glance.api.middleware.context:ContextMiddleware.factory - filter:unauthenticated-context: - paste.filter_factory: glance.api.middleware.context:UnauthenticatedContextMiddleware.factory - filter:authtoken: - paste.filter_factory: keystonemiddleware.auth_token:filter_factory - delay_auth_decision: true - filter:audit: - paste.filter_factory: keystonemiddleware.audit:filter_factory - audit_map_file: /etc/glance/api_audit_map.conf - filter:gzip: - paste.filter_factory: glance.api.middleware.gzip:GzipMiddleware.factory - filter:osprofiler: - paste.filter_factory: osprofiler.web:WsgiMiddleware.factory - hmac_keys: SECRET_KEY # DEPRECATED - enabled: yes # DEPRECATED - filter:cors: - paste.filter_factory: oslo_middleware.cors:filter_factory - oslo_config_project: glance - oslo_config_program: glance-api - filter:http_proxy_to_wsgi: - paste.filter_factory: oslo_middleware:HTTPProxyToWSGI.factory - policy: {} - glance_sudoers: | - # This sudoers file supports rootwrap for both Kolla and LOCI Images. - Defaults !requiretty - Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin" - glance ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/glance-rootwrap /etc/glance/rootwrap.conf *, /var/lib/openstack/bin/glance-rootwrap /etc/glance/rootwrap.conf * - rootwrap: | - # Configuration for glance-rootwrap - # This file should be owned by (and only-writable by) the root user - - [DEFAULT] - # List of directories to load filter definitions from (separated by ','). - # These directories MUST all be only writeable by root ! - filters_path=/etc/glance/rootwrap.d,/usr/share/glance/rootwrap - - # List of directories to search executables in, in case filters do not - # explicitely specify a full path (separated by ',') - # If not specified, defaults to system PATH environment variable. - # These directories MUST all be only writeable by root ! - exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin - - # Enable logging to syslog - # Default value is False - use_syslog=False - - # Which syslog facility to use. - # Valid values include auth, authpriv, syslog, local0, local1... - # Default value is 'syslog' - syslog_log_facility=syslog - - # Which messages to log. - # INFO means log all usage - # ERROR means only log unsuccessful attempts - syslog_log_level=ERROR - rootwrap_filters: - glance_cinder_store: - pods: - - api - content: | - # glance-rootwrap command filters for glance cinder store - # This file should be owned by (and only-writable by) the root user - - [Filters] - # cinder store driver - disk_chown: RegExpFilter, chown, root, chown, \d+, /dev/(?!.*/\.\.).* - - # os-brick library commands - # os_brick.privileged.run_as_root oslo.privsep context - # This line ties the superuser privs with the config files, context name, - # and (implicitly) the actual python code invoked. - privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.* - - chown: CommandFilter, chown, root - mount: CommandFilter, mount, root - umount: CommandFilter, umount, root glance: DEFAULT: - log_config_append: /etc/glance/logging.conf - # NOTE(portdirect): the bind port should not be defined, and is manipulated - # via the endpoints section. - bind_port: null - workers: 8 - enable_v1_api: False + workers: 2 # NOTE(cloudnull): This option is required when using the new glance multi-backend feature. # The example below is for the rxt_swift backend, but could easily be used # for other backends. @@ -241,139 +65,58 @@ conf: oslo_middleware: enable_proxy_headers_parsing: true keystone_authtoken: - service_token_roles: service - service_token_roles_required: true auth_type: password auth_version: v3 memcache_security_strategy: ENCRYPT + service_token_roles: service + service_token_roles_required: true service_type: image glance_store: # NOTE(cloudnull): When using the glance multi-backend feature, the default_backend # option should be set to the name of the default backend section. # default_backend: rxt_swift - cinder_catalog_info: volumev3::internalURL - rbd_store_chunk_size: 8 - rbd_store_replication: 3 - rbd_store_crush_rule: replicated_rule - rbd_store_pool: glance.images - rbd_store_user: glance - rbd_store_ceph_conf: /etc/ceph/ceph.conf filesystem_store_datadir: /var/lib/glance/images - default_swift_reference: ref1 - swift_store_container: glance swift_auth_address: https://swift.cluster.local swift_auth_version: 3 swift_user: glance:glance-store swift_password: override_from_your_secrets_files - swift_store_create_container_on_put: true - swift_store_config_file: /etc/glance/swift-store.conf - swift_store_endpoint_type: internalURL rxt_swift: swift_store_auth_address: http://keystone-api.openstack.svc.cluster.local:5000/v3 swift_store_create_container_on_put: true swift_store_multi_tenant: true swift_store_container: glance swift_store_admin_tenants: admin,image-services - os_glance_tasks_store: - filesystem_store_datadir: /var/lib/glance/tmp - os_glance_staging_store: - filesystem_store_datadir: /var/lib/glance/tmp - paste_deploy: - flavor: keystone database: idle_timeout: 3600 connection_recycle_time: 3600 pool_timeout: 60 max_retries: -1 - oslo_concurrency: - lock_path: /tmp/glance - oslo_messaging_notifications: - driver: messagingv2 oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true rabbit_ha_queues: false rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly rabbit_interval_max: 10 # Send more frequent heartbeats and fail unhealthy nodes faster # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + heartbeat_timeout_threshold: 60 + # DEPRECIATION: (warning) heartbeat_in_pthread will be deprecated in 2024.2 + heartbeat_in_pthread: True + # Setting lower kombu_reconnect_delay should resolve issue with HA failing when one node is down # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 kombu_reconnect_delay: 0.5 - oslo_policy: - policy_file: /etc/glance/policy.yaml - cors: {} + glance_api_uwsgi: + uwsgi: + processes: 4 logging: - loggers: - keys: - - root - - glance - handlers: - keys: - - stdout - - stderr - - "null" - formatters: - keys: - - context - - default logger_root: level: INFO handlers: - stdout - logger_glance: - level: INFO - handlers: - - stdout - qualname: glance - logger_amqp: - level: WARNING - handlers: stderr - qualname: amqp - logger_amqplib: - level: WARNING - handlers: stderr - qualname: amqplib - logger_eventletwsgi: - level: WARNING - handlers: stderr - qualname: eventlet.wsgi.server - logger_sqlalchemy: - level: WARNING - handlers: stderr - qualname: sqlalchemy - logger_boto: - level: WARNING - handlers: stderr - qualname: boto - handler_null: - class: logging.NullHandler - formatter: default - args: () - handler_stdout: - class: StreamHandler - args: (sys.stdout,) - formatter: context - handler_stderr: - class: StreamHandler - args: (sys.stderr,) - formatter: context - formatter_context: - class: oslo_log.formatters.ContextFormatter - datefmt: "%Y-%m-%d %H:%M:%S" - formatter_default: - format: "%(message)s" - datefmt: "%Y-%m-%d %H:%M:%S" api_audit_map: DEFAULT: target_endpoint_type: None @@ -384,7 +127,7 @@ conf: members: member tags: tag service_endpoints: - image: 'service/storage/image' + image: "service/storage/image" swift_store: | [{{ .Values.conf.glance.glance_store.default_swift_reference }}] {{- if eq .Values.storage "radosgw" }} @@ -411,180 +154,26 @@ conf: rabbitmq: policies: [] -network: - api: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - nginx.ingress.kubernetes.io/proxy-body-size: "0" - external_policy_local: false - node_port: - enabled: false - port: 30092 - volume: - class_name: general-multi-attach # This can be changed as needed - size: 10Gi # This should be set to 100Gi in production + class_name: general-multi-attach # This can be changed as needed + size: 10Gi # This should be set to 100Gi in production dependencies: - dynamic: - common: - local_image_registry: - jobs: - - glance-image-repo-sync - services: - - endpoint: node - service: local_image_registry static: api: jobs: - glance-db-sync - glance-ks-user - glance-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_messaging bootstrap: jobs: null - services: - - endpoint: internal - service: identity - - endpoint: internal - service: image clean: jobs: null - db_drop: - services: - - endpoint: internal - service: oslo_db - db_init: - services: - - endpoint: internal - service: oslo_db db_sync: jobs: null - services: - - endpoint: internal - service: oslo_db - ks_endpoints: - jobs: - - glance-ks-service - services: - - endpoint: internal - service: identity - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity - rabbit_init: - services: - - endpoint: internal - service: oslo_messaging - storage_init: - jobs: - - glance-ks-user - services: null - metadefs_load: - jobs: - - glance-db-sync - services: null - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - -# Names of secrets used by bootstrap and environmental checks -secrets: - identity: - admin: glance-keystone-admin - glance: glance-keystone-user - test: glance-keystone-test - oslo_db: - admin: glance-db-admin - glance: glance-db-user - rbd: images-rbd-keyring - oslo_messaging: - admin: glance-rabbitmq-admin - glance: glance-rabbitmq-user - tls: - image: - api: - public: glance-tls-public - internal: glance-tls-api - oci_image_registry: - glance: glance-oci-image-registry -# typically overridden by environmental -# values, but should include all endpoints -# required by this chart endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - glance: - username: glance - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - glance: - role: admin - region_name: RegionOne - username: glance - password: password - project_name: service - user_domain_name: service - project_domain_name: service - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: http port: api: default: 5000 @@ -592,24 +181,6 @@ endpoints: internal: 5000 service: 5000 image: - name: glance - hosts: - default: glance-api - public: glance - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: null - scheme: - default: http - service: http port: api: default: 9292 @@ -617,80 +188,21 @@ endpoints: internal: 9292 service: 9292 oslo_db: - auth: - admin: - username: root - password: password - secret: - tls: - internal: mariadb-tls-direct - glance: - username: glance - password: password + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /glance - scheme: mysql+pymysql - port: - mysql: - default: 3306 oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null + host_fqdn_override: + default: memcached.openstack.svc.cluster.local hosts: default: memcached - host_fqdn_override: - default: null - port: - memcache: - default: 11211 oslo_messaging: - auth: - admin: - username: rabbitmq - password: password - secret: - tls: - internal: rabbitmq-tls-direct - glance: - username: glance - password: password - statefulset: - replicas: 3 - name: rabbitmq-server - hosts: - default: rabbitmq-nodes host_fqdn_override: default: rabbitmq.openstack.svc.cluster.local - path: /glance - scheme: rabbit - port: - amqp: - default: 5672 - http: - default: 15672 - object_store: - name: swift - namespace: ceph - auth: - glance: - tmpurlkey: supersecret hosts: - default: ceph-rgw - public: radosgw - host_fqdn_override: - default: null - path: - default: /swift/v1/KEY_$(tenant_id)s - scheme: - default: http + default: rabbitmq-nodes + object_store: port: api: default: 8088 @@ -698,22 +210,6 @@ endpoints: internal: 8088 service: 8088 ceph_object_store: - name: radosgw - namespace: ceph - auth: - glance: - username: glance - password: password - tmpurlkey: supersecret - hosts: - default: ceph-rgw - public: radosgw - host_fqdn_override: - default: null - path: - default: /auth/v1.0 - scheme: - default: http port: api: default: 8088 @@ -722,159 +218,17 @@ endpoints: service: 8088 fluentd: namespace: fluentbit - name: fluentd - hosts: - default: fluentd-logging - host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 dashboard: - name: horizon - hosts: - default: horizon-int - public: horizon - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: null - scheme: - default: http - public: https port: web: default: 80 public: 443 internal: 80 service: 80 - # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress - # They are using to enable the Egress K8s network policy. - kube_dns: - namespace: kube-system - name: kubernetes-dns - hosts: - default: kube-dns - host_fqdn_override: - default: null - path: - default: null - scheme: http - port: - dns: - default: 53 - protocol: UDP - ingress: - namespace: null - name: ingress - hosts: - default: ingress - port: - ingress: - default: 80 pod: - security_context: - glance: - pod: - runAsUser: 42424 - container: - glance_perms: - readOnlyRootFilesystem: true - runAsUser: 0 - ceph_keyring_placement: - readOnlyRootFilesystem: true - runAsUser: 0 - glance_api: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nginx: - readOnlyRootFilesystem: false - runAsUser: 0 - clean: - pod: - runAsUser: 42424 - container: - glance_secret_clean: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - metadefs_load: - pod: - runAsUser: 42424 - container: - glance_metadefs_load: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - storage_init: - pod: - runAsUser: 42424 - container: - ceph_keyring_placement: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - glance_storage_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - test: - pod: - runAsUser: 42424 - container: - glance_test_ks_user: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - glance_test: - runAsUser: 65500 - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - tolerations: - glance: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - useHostNetwork: - api: false - mounts: - glance_api: - init_container: null - glance_api: - volumeMounts: - volumes: - glance_tests: - init_container: null - glance_tests: - volumeMounts: - volumes: - glance_db_sync: - glance_db_sync: - volumeMounts: - volumes: replicas: - api: 1 # Set to 3 in production when attached to shared storage. + api: 1 lifecycle: upgrades: deployments: @@ -904,130 +258,13 @@ pod: periodSeconds: 15 timeoutSeconds: 10 resources: - enabled: true - api: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - storage_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - metadefs_load: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_drop: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_user: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_service: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_endpoints: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - rabbit_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - bootstrap: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - tests: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - -# NOTE(helm_hook): helm_hook might break for helm2 binary. -# set helm3_hook: false when using the helm2 binary. -helm3_hook: true - -tls: - identity: false - oslo_messaging: false - oslo_db: false + enabled: false manifests: - certificates: false - configmap_bin: true - configmap_etc: true - deployment_api: true ingress_api: false - job_bootstrap: true - job_clean: true job_db_init: false - job_db_sync: true - job_db_drop: false - job_image_repo_sync: true - job_ks_endpoints: true - job_ks_service: true - job_ks_user: true - job_storage_init: false # This is set to false because we're using PVC storage. - job_metadefs_load: true + job_storage_init: false job_rabbit_init: false - pdb_api: true pod_rally_test: false - pvc_images: true - network_policy: false - secret_db: true secret_ingress_tls: false - secret_keystone: true - secret_rabbitmq: true - secret_registry: true service_ingress_api: false - service_api: true - -# NOTE: This is for enable helm resource-policy to keep glance-images PVC. -# set keep_pvc: true when allow helm resource-policy to keep for PVC. -# This will requires mannual delete for PVC. -# set keep_pvc: false when disallow helm resource-policy to keep for PVC. -# This will allow helm to delete the PVC. -keep_pvc: true diff --git a/base-helm-configs/gnocchi/gnocchi-helm-overrides.yaml b/base-helm-configs/gnocchi/gnocchi-helm-overrides.yaml index 6b660ffe4..0163cf413 100644 --- a/base-helm-configs/gnocchi/gnocchi-helm-overrides.yaml +++ b/base-helm-configs/gnocchi/gnocchi-helm-overrides.yaml @@ -1,691 +1,144 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default values for gnocchi. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - --- -labels: - api: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - metricd: - node_selector_key: openstack-control-plane - node_selector_value: enabled - statsd: - node_selector_key: openstack-control-plane - node_selector_value: enabled - test: - node_selector_key: openstack-control-plane - node_selector_value: enabled - -release_group: null - images: tags: - dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" - gnocchi_storage_init: "quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy" - db_init_indexer: "quay.io/rackspace/rackerlabs-postgres:14.5" db_init: "quay.io/rackspace/rackerlabs-gnocchi:2024.1-ubuntu_jammy" + db_init_indexer: "quay.io/rackspace/rackerlabs-postgres:14.5" db_sync: "quay.io/rackspace/rackerlabs-gnocchi:2024.1-ubuntu_jammy" - ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" gnocchi_api: "quay.io/rackspace/rackerlabs-gnocchi:2024.1-ubuntu_jammy" - gnocchi_statsd: "quay.io/rackspace/rackerlabs-gnocchi:2024.1-ubuntu_jammy" gnocchi_metricd: "quay.io/rackspace/rackerlabs-gnocchi:2024.1-ubuntu_jammy" gnocchi_resources_cleaner: "quay.io/rackspace/rackerlabs-gnocchi:2024.1-ubuntu_jammy" + gnocchi_statsd: "quay.io/rackspace/rackerlabs-gnocchi:2024.1-ubuntu_jammy" + gnocchi_storage_init: "quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy" image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - pull_policy: "Always" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -jobs: - resources_cleaner: - # daily - cron: "0 */24 * * *" - deleted_resources_ttl: '1day' - history: - success: 3 - failed: 1 - -network: - api: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 8041 - statsd: - node_port: - enabled: false - port: 8125 - -dependencies: - dynamic: - common: - local_image_registry: - jobs: - - gnocchi-image-repo-sync - services: - - endpoint: node - service: local_image_registry - static: - api: - jobs: - - gnocchi-storage-init - - gnocchi-db-sync - - gnocchi-ks-endpoints - - gnocchi-ks-service - - gnocchi-ks-user - services: - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_db - clean: - services: null - db_init: - services: - - endpoint: internal - service: oslo_db - db_init_postgresql: - jobs: null - services: - - endpoint: internal - service: oslo_db_postgresql - db_sync: - jobs: - - gnocchi-storage-init - - gnocchi-db-init - - gnocchi-db-init-indexer - services: - - endpoint: internal - service: oslo_db_postgresql - ks_endpoints: - jobs: - - gnocchi-ks-service - services: - - endpoint: internal - service: identity - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity - metricd: - jobs: - - gnocchi-storage-init - - gnocchi-db-sync - - gnocchi-ks-user - - gnocchi-ks-service - - gnocchi-ks-endpoints - services: - - endpoint: internal - service: oslo_db_postgresql - - endpoint: internal - service: metric - statsd: - jobs: - - gnocchi-storage-init - - gnocchi-db-sync - - gnocchi-ks-user - - gnocchi-ks-service - - gnocchi-ks-endpoints - services: - - endpoint: internal - service: oslo_db_postgresql - - endpoint: internal - service: metric - resources_cleaner: - jobs: - - gnocchi-storage-init - - gnocchi-db-sync - - gnocchi-ks-user - - gnocchi-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: metric - storage_init: - services: null - tests: - jobs: - - gnocchi-storage-init - - gnocchi-db-sync - services: - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_db_postgresql - - endpoint: internal - service: metric - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry + ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" -pod: - user: - gnocchi: - uid: 1000 - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - tolerations: - gnocchi: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - mounts: - gnocchi_api: - init_container: null - gnocchi_api: - gnocchi_statsd: - init_container: null - gnocchi_statsd: - gnocchi_metricd: - init_container: null - gnocchi_metricd: - gnocchi_resources_cleaner: - init_container: null - gnocchi_resources_cleaner: - gnocchi_tests: - init_container: null - gnocchi_tests: - replicas: - api: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - daemonsets: - pod_replacement_strategy: RollingUpdate - metricd: - enabled: true - min_ready_seconds: 0 - max_unavailable: 20% - statsd: - enabled: true - min_ready_seconds: 0 - max_unavailable: 20% - disruption_budget: - api: - min_available: 0 - termination_grace_period: - api: - timeout: 30 - resources: - enabled: true - api: - requests: - memory: "124Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - statsd: - requests: - memory: "124Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - metricd: - requests: - memory: "124Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - clean: - requests: - memory: "124Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - db_init: - requests: - memory: "124Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - db_sync: - requests: - memory: "124Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_endpoints: - requests: - memory: "124Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_service: - requests: - memory: "124Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_user: - requests: - memory: "124Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - resources_cleaner: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - tests: - requests: - memory: "124Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - image_repo_sync: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" +ceph_client: + user_secret_name: gnocchi-temp-keyring conf: apache: | - Listen 0.0.0.0:{{ tuple "metric" "internal" "api" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }} - + Listen 0.0.0.0:{{ tuple "metric" "internal" "api" . | include + "helm-toolkit.endpoints.endpoint_port_lookup" }} SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded CustomLog /dev/stdout combined env=!forwarded CustomLog /dev/stdout proxy env=forwarded - - - WSGIDaemonProcess gnocchi processes=1 threads=2 user=gnocchi group=gnocchi display-name=%{GROUP} + + WSGIDaemonProcess gnocchi processes={{ .Values.conf.gnocchi_api_wsgi.wsgi.processes }} threads={{ .Values.conf.gnocchi_api_wsgi.wsgi.threads }} user=gnocchi group=gnocchi display-name=%{GROUP} WSGIProcessGroup gnocchi WSGIScriptAlias / "/usr/local/lib/python3.10/dist-packages/gnocchi/rest/wsgi.py" WSGIApplicationGroup %{GLOBAL} - ErrorLog /dev/stderr SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded CustomLog /dev/stdout combined env=!forwarded CustomLog /dev/stdout proxy env=forwarded - Require all granted - ceph: - monitors: [] - admin_keyring: null - override: - append: + gnocchi: + metricd: + workers: 2 + gnocchi_api_wsgi: + wsgi: + processes: 2 + threads: 4 paste: - app:gnocchiv1: - paste.app_factory: gnocchi.rest.app:app_factory + "app:gnocchiv1": + paste.app_factory: "gnocchi.rest.app:app_factory" root: gnocchi.rest.api.V1Controller - app:gnocchiversions: - paste.app_factory: gnocchi.rest.app:app_factory + "app:gnocchiversions": + paste.app_factory: "gnocchi.rest.app:app_factory" root: gnocchi.rest.api.VersionsController - app:healthcheck: + "app:healthcheck": oslo_config_project: gnocchi - use: egg:oslo.middleware#healthcheck - composite:gnocchi+basic: + use: "egg:oslo.middleware#healthcheck" + "composite:gnocchi+basic": /: gnocchiversions_pipeline /healthcheck: healthcheck /v1: gnocchiv1+noauth - use: egg:Paste#urlmap - composite:gnocchi+keystone: + use: "egg:Paste#urlmap" + "composite:gnocchi+keystone": /: gnocchiversions_pipeline /healthcheck: healthcheck /v1: gnocchiv1+keystone - use: egg:Paste#urlmap - composite:gnocchi+remoteuser: + use: "egg:Paste#urlmap" + "composite:gnocchi+remoteuser": /: gnocchiversions_pipeline /healthcheck: healthcheck /v1: gnocchiv1+noauth - use: egg:Paste#urlmap - filter:keystone_authtoken: + use: "egg:Paste#urlmap" + "filter:keystone_authtoken": oslo_config_project: gnocchi - use: egg:keystonemiddleware#auth_token - pipeline:gnocchiv1+keystone: + use: "egg:keystonemiddleware#auth_token" + "pipeline:gnocchiv1+keystone": pipeline: keystone_authtoken gnocchiv1 - pipeline:gnocchiv1+noauth: + "pipeline:gnocchiv1+noauth": pipeline: gnocchiv1 - pipeline:gnocchiversions_pipeline: + "pipeline:gnocchiversions_pipeline": pipeline: gnocchiversions - pipeline:main: + "pipeline:main": pipeline: gnocchi+keystone policy: - admin_or_creator: 'role:admin or project_id:%(created_by_project_id)s' - create archive policy rule: 'role:admin' - create archive policy: 'role:admin' - create metric: '' - create resource: '' - create resource type: 'role:admin' - delete archive policy: 'role:admin' - delete archive policy rule: 'role:admin' - delete metric: 'rule:admin_or_creator' - delete resource: 'rule:admin_or_creator' - delete resource type: 'role:admin' - delete resources: 'rule:admin_or_creator' - get archive policy: '' - get archive policy rule: '' - get measures: 'rule:admin_or_creator or rule:metric_owner' - get metric: 'rule:admin_or_creator or rule:metric_owner' - get resource type: '' - get resource: 'rule:admin_or_creator or rule:resource_owner' - get status: 'role:admin' - list all metric: 'role:admin' - list archive policy: '' - list archive policy rule: '' - list metric: '' - list resource: 'rule:admin_or_creator or rule:resource_owner' - list resource type: '' - metric_owner: 'project_id:%(resource.project_id)s' - post measures: 'rule:admin_or_creator' - resource_owner: 'project_id:%(project_id)s' - search metric: 'rule:admin_or_creator or rule:metric_owner' - search resource: 'rule:admin_or_creator or rule:resource_owner' - update archive policy: 'role:admin' - update resource: 'rule:admin_or_creator' - update resource type: 'role:admin' - context_is_admin: 'role:admin' - update archive policy rule: 'role:admin' - gnocchi: - DEFAULT: - debug: false - token: - provider: uuid - api: - auth_mode: keystone - # NOTE(portdirect): the bind port should not be defined, and is manipulated - # via the endpoints section. - port: null - statsd: - # NOTE(portdirect): the bind port should not be defined, and is manipulated - # via the endpoints section. - port: null - # Increase worker count for production - metricd: - workers: 8 - database: - max_retries: -1 - storage: - driver: ceph - ceph_pool: gnocchi.metrics - ceph_username: gnocchi - ceph_keyring: /etc/ceph/ceph.client.gnocchi.keyring - ceph_conffile: /etc/ceph/ceph.conf - file_basepath: /var/lib/gnocchi - provided_keyring: null - indexer: - driver: postgresql - keystone_authtoken: - auth_type: password - auth_version: v3 - memcache_security_strategy: ENCRYPT - -ceph_client: - configmap: ceph-etc - user_secret_name: gnocchi-temp-keyring - -secrets: - identity: - admin: gnocchi-keystone-admin - gnocchi: gnocchi-keystone-user - oslo_db: - admin: gnocchi-db-admin - gnocchi: gnocchi-db-user - oslo_db_indexer: - admin: gnocchi-db-indexer-admin - gnocchi: gnocchi-db-indexer-user - rbd: gnocchi-rbd-keyring - tls: - metric: - api: - public: gnocchi-tls-public + admin_or_creator: "role:admin or project_id:%(created_by_project_id)s" + context_is_admin: "role:admin" + update archive policy rule: "role:admin" -bootstrap: - enabled: false - ks_user: gnocchi - script: | - openstack token issue +pod: + lifecycle: + upgrades: + daemonsets: + metricd: + enabled: true + max_unavailable: 20% + pod_replacement_strategy: RollingUpdate + statsd: + enabled: true + max_unavailable: 20% -# typically overridden by environmental -# values, but should include all endpoints -# required by this chart endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null + fluentd: + namespace: fluentbit port: - registry: - node: 5000 + metrics: + default: 24220 + service: + default: 24224 identity: - name: keystone - auth: - admin: - username: "admin" - user_domain_name: "default" - password: "password" - project_name: "admin" - project_domain_name: "default" - region_name: "RegionOne" - os_auth_type: "password" - os_tenant_name: "admin" - gnocchi: - username: "gnocchi" - role: "admin" - password: "password" - project_name: "service" - region_name: "RegionOne" - os_auth_type: "password" - os_tenant_name: "service" - user_domain_name: service - project_domain_name: service - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: 'http' port: api: default: 5000 - public: 80 internal: 5000 + public: 80 service: 5000 metric: - name: gnocchi - hosts: - default: gnocchi-api - public: gnocchi - host_fqdn_override: - default: null - # NOTE: this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: null - scheme: - default: 'http' port: api: default: 8041 - public: 80 internal: 8041 + public: 80 service: 8041 - metric_statsd: - name: gnocchi-statsd - hosts: - default: gnocchi-statsd + # TODO: (cbreu) do we still need to define maraidb when we are + # using postreg? What breaks if we remove it + oslo_db: host_fqdn_override: - default: null - path: - default: null - scheme: - default: null - port: - statsd: - default: 8125 + default: mariadb-cluster-primary.openstack.svc.cluster.local + hosts: + default: mariadb-cluster-primary oslo_db_postgresql: - auth: - admin: - username: postgres - password: password - gnocchi: - username: gnocchi - password: password hosts: default: postgres-cluster - host_fqdn_override: - default: null - path: /gnocchi - scheme: postgresql - port: - postgresql: - default: 5432 - oslo_db: - auth: - admin: - username: root - password: password - gnocchi: - username: gnocchi - password: password - hosts: - default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /gnocchi - scheme: mysql+pymysql - port: - mysql: - default: 3306 oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null - hosts: - default: memcached host_fqdn_override: - default: null - port: - memcache: - default: 11211 - fluentd: - namespace: fluentbit - name: fluentd + default: memcached.openstack.svc.cluster.local hosts: - default: fluentd-logging - host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 + default: memcached + manifests: - configmap_bin: true - configmap_etc: true - cron_job_resources_cleaner: true - daemonset_metricd: true - daemonset_statsd: true - deployment_api: true ingress_api: false - job_bootstrap: true - job_clean: true - job_db_drop: false - job_db_init_indexer: true - job_db_init: true - job_image_repo_sync: true - secret_db_indexer: true - job_db_sync: true - job_ks_endpoints: true - job_ks_service: true - job_ks_user: true - job_storage_init: true - pdb_api: true pod_gnocchi_test: false - secret_db: true - secret_keystone: true secret_ingress_tls: false - service_api: true service_ingress_api: false - service_statsd: true -... diff --git a/base-helm-configs/heat/heat-helm-overrides.yaml b/base-helm-configs/heat/heat-helm-overrides.yaml index 7fd6f85c2..53e2990bd 100644 --- a/base-helm-configs/heat/heat-helm-overrides.yaml +++ b/base-helm-configs/heat/heat-helm-overrides.yaml @@ -1,551 +1,86 @@ --- -release_group: null - -labels: - api: - node_selector_key: openstack-control-plane - node_selector_value: enabled - cfn: - node_selector_key: openstack-control-plane - node_selector_value: enabled - cloudwatch: - node_selector_key: openstack-control-plane - node_selector_value: enabled - engine: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - test: - node_selector_key: openstack-control-plane - node_selector_value: enabled - images: tags: bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" db_drop: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - heat_db_sync: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" heat_api: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" heat_cfn: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" heat_cloudwatch: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + heat_db_sync: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" heat_engine: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" heat_engine_cleaner: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" heat_purge_deleted: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" - rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" - dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -jobs: - engine_cleaner: - cron: "*/5 * * * *" - starting_deadline: 600 - history: - success: 3 - failed: 1 - - purge_deleted: - cron: "20 */24 * * *" - purge_age: 60 - history: - success: 3 - failed: 1 + ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" + test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" conf: - rally_tests: - run_tempest: false - tests: - HeatStacks.create_update_delete_stack: - - args: - template_path: /tmp/rally-jobs/random_strings.yaml - updated_template_path: /tmp/rally-jobs/updated_random_strings_replace.yaml - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - HeatStacks.create_check_delete_stack: - - args: - template_path: /tmp/rally-jobs/random_strings.yaml - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - HeatStacks.create_and_delete_stack: - - args: - template_path: /tmp/rally-jobs/resource_group_with_constraint.yaml - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - HeatStacks.create_and_list_stack: - - args: - template_path: /tmp/rally-jobs/default.yaml - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - HeatStacks.create_snapshot_restore_delete_stack: - - args: - template_path: /tmp/rally-jobs/random_strings.yaml - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - HeatStacks.create_stack_and_list_output: - - args: - template_path: /tmp/rally-jobs/resource_group_with_outputs.yaml - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - HeatStacks.create_stack_and_list_output_via_API: - - args: - template_path: /tmp/rally-jobs/resource_group_with_outputs.yaml - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - templates: - - name: /tmp/rally-jobs/default.yaml - template: | - heat_template_version: 2014-10-16 - - name: /tmp/rally-jobs/random_strings.yaml - template: | - heat_template_version: 2014-10-16 - description: Test template for rally create-update-delete scenario - resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 20 - - name: /tmp/rally-jobs/resource_group_with_constraint.yaml - template: | - heat_template_version: 2013-05-23 - description: Template for testing caching. - parameters: - count: - type: number - default: 40 - delay: - type: number - default: 0.1 - resources: - rg: - type: OS::Heat::ResourceGroup - properties: - count: - get_param: count - resource_def: - type: OS::Heat::TestResource - properties: - constraint_prop_secs: - get_param: delay - - name: /tmp/rally-jobs/resource_group_with_outputs.yaml - template: | - heat_template_version: 2013-05-23 - parameters: - attr_wait_secs: - type: number - default: 0.5 - resources: - rg: - type: OS::Heat::ResourceGroup - properties: - count: 10 - resource_def: - type: OS::Heat::TestResource - properties: - attr_wait_secs: - get_param: attr_wait_secs - outputs: - val1: - value: - get_attr: - - rg - - resource.0.output - val2: - value: - get_attr: - - rg - - resource.1.output - val3: - value: - get_attr: - - rg - - resource.2.output - val4: - value: - get_attr: - - rg - - resource.3.output - val5: - value: - get_attr: - - rg - - resource.4.output - val6: - value: - get_attr: - - rg - - resource.5.output - val7: - value: - get_attr: - - rg - - resource.6.output - val8: - value: - get_attr: - - rg - - resource.7.output - val9: - value: - get_attr: - - rg - - resource.8.output - val10: - value: - get_attr: - - rg - - resource.9.output - - name: /tmp/rally-jobs/updated_random_strings_replace.yaml - template: | - heat_template_version: 2014-10-16 - description: | - Test template for create-update-delete-stack scenario in rally. - The template deletes one resource from the stack defined by - random-strings.yaml.template and re-creates it with the updated parameters - (so-called update-replace). That happens because some parameters cannot be - changed without resource re-creation. The template allows to measure performance - of update-replace operation. - resources: - test_string_one: - type: OS::Heat::RandomString - properties: - length: 20 - test_string_two: - type: OS::Heat::RandomString - properties: - length: 40 - paste: - pipeline:heat-api: - pipeline: cors request_id faultwrap http_proxy_to_wsgi versionnegotiation osprofiler authurl authtoken audit context apiv1app - pipeline:heat-api-standalone: - pipeline: cors request_id faultwrap http_proxy_to_wsgi versionnegotiation authurl authpassword context apiv1app - pipeline:heat-api-custombackend: - pipeline: cors request_id faultwrap versionnegotiation context custombackendauth apiv1app - pipeline:heat-api-cfn: - pipeline: cors http_proxy_to_wsgi cfnversionnegotiation osprofiler ec2authtoken authtoken audit context apicfnv1app - pipeline:heat-api-cfn-standalone: - pipeline: cors http_proxy_to_wsgi cfnversionnegotiation ec2authtoken context apicfnv1app - pipeline:heat-api-cloudwatch: - pipeline: cors versionnegotiation osprofiler ec2authtoken authtoken audit context apicwapp - pipeline:heat-api-cloudwatch-standalone: - pipeline: cors versionnegotiation ec2authtoken context apicwapp - app:apiv1app: - paste.app_factory: heat.common.wsgi:app_factory - heat.app_factory: heat.api.openstack.v1:API - app:apicfnv1app: - paste.app_factory: heat.common.wsgi:app_factory - heat.app_factory: heat.api.cfn.v1:API - app:apicwapp: - paste.app_factory: heat.common.wsgi:app_factory - heat.app_factory: heat.api.cloudwatch:API - filter:versionnegotiation: - paste.filter_factory: heat.common.wsgi:filter_factory - heat.filter_factory: heat.api.openstack:version_negotiation_filter - filter:cors: - paste.filter_factory: oslo_middleware.cors:filter_factory - oslo_config_project: heat - filter:faultwrap: - paste.filter_factory: heat.common.wsgi:filter_factory - heat.filter_factory: heat.api.openstack:faultwrap_filter - filter:cfnversionnegotiation: - paste.filter_factory: heat.common.wsgi:filter_factory - heat.filter_factory: heat.api.cfn:version_negotiation_filter - filter:cwversionnegotiation: - paste.filter_factory: heat.common.wsgi:filter_factory - heat.filter_factory: heat.api.cloudwatch:version_negotiation_filter - filter:context: - paste.filter_factory: heat.common.context:ContextMiddleware_filter_factory - filter:ec2authtoken: - paste.filter_factory: heat.api.aws.ec2token:EC2Token_filter_factory - filter:http_proxy_to_wsgi: - paste.filter_factory: oslo_middleware:HTTPProxyToWSGI.factory - filter:authurl: - paste.filter_factory: heat.common.auth_url:filter_factory - filter:authtoken: - paste.filter_factory: keystonemiddleware.auth_token:filter_factory - filter:authpassword: - paste.filter_factory: heat.common.auth_password:filter_factory - filter:custombackendauth: - paste.filter_factory: heat.common.custom_backend_auth:filter_factory - filter:audit: - paste.filter_factory: keystonemiddleware.audit:filter_factory - audit_map_file: /etc/heat/api_audit_map.conf - filter:request_id: - paste.filter_factory: oslo_middleware.request_id:RequestId.factory - filter:osprofiler: - paste.filter_factory: osprofiler.web:WsgiMiddleware.factory - policy: {} heat: DEFAULT: + num_engine_workers: 4 server_keystone_endpoint_type: public - log_config_append: /etc/heat/logging.conf - num_engine_workers: 8 - trusts_delegated_roles: "" - host: heat-engine - keystone_authtoken: - service_token_roles: service - service_token_roles_required: true - auth_type: password - auth_version: v3 - memcache_security_strategy: ENCRYPT - service_type: orchestration - interface: public + clients_keystone: + endpoint_type: publicURL database: - idle_timeout: 3600 connection_recycle_time: 3600 + idle_timeout: 3600 pool_timeout: 60 - max_retries: -1 - trustee: - auth_type: password - auth_version: v3 heat_api: - # NOTE(portdirect): the bind port should not be defined, and is manipulated - # via the endpoints section. - bind_port: null - workers: 8 - heat_api_cloudwatch: - # NOTE(portdirect): the bind port should not be defined, and is manipulated - # via the endpoints section. - bind_port: null - workers: 8 + workers: 1 heat_api_cfn: - # NOTE(portdirect): the bind port should not be defined, and is manipulated - # via the endpoints section. - bind_port: null - workers: 8 - paste_deploy: - api_paste_config: /etc/heat/api-paste.ini - clients: - endpoint_type: internalURL - clients_heat: - endpoint_type: publicURL - clients_keystone: - endpoint_type: publicURL - oslo_messaging_notifications: - driver: messagingv2 - oslo_middleware: - enable_proxy_headers_parsing: true + workers: 1 + heat_api_cloudwatch: + workers: 1 + keystone_authtoken: + auth_type: password + auth_version: v3 + interface: public + memcache_security_strategy: ENCRYPT + service_token_roles: service + service_token_roles_required: true + service_type: orchestration oslo_concurrency: lock_path: /tmp/heat oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true rabbit_ha_queues: false rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly rabbit_interval_max: 10 # Send more frequent heartbeats and fail unhealthy nodes faster # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + heartbeat_timeout_threshold: 60 + # DEPRECIATION: (warning) heartbeat_in_pthread will be deprecated in 2024.2 + heartbeat_in_pthread: True + # Setting lower kombu_reconnect_delay should resolve issue with HA failing when one node is down # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 kombu_reconnect_delay: 0.5 - oslo_policy: - policy_file: /etc/heat/policy.yaml - api_audit_map: - DEFAULT: - target_endpoint_type: None - path_keywords: - stacks: stack - resources: resource - preview: None - detail: None - abandon: None - snapshots: snapshot - restore: None - outputs: output - metadata: server - signal: None - events: event - template: None - template_versions: template_version - functions: None - validate: None - resource_types: resource_type - build_info: None - actions: None - software_configs: software_config - software_deployments: software_deployment - services: None - service_endpoints: - orchestration:service/orchestration + heat_api_cfn_uwsgi: + uwsgi: + processes: 4 + threads: 2 + heat_api_uwsgi: + uwsgi: + processes: 4 + threads: 2 logging: - loggers: - keys: - - root - - heat - handlers: - keys: - - stdout - - stderr - - "null" - formatters: - keys: - - context - - default logger_root: - level: INFO handlers: - stdout - logger_heat: level: INFO - handlers: - - stdout - qualname: heat - logger_amqp: - level: WARNING - handlers: stderr - qualname: amqp - logger_amqplib: - level: WARNING - handlers: stderr - qualname: amqplib - logger_eventletwsgi: - level: WARNING - handlers: stderr - qualname: eventlet.wsgi.server - logger_sqlalchemy: - level: WARNING - handlers: stderr - qualname: sqlalchemy - logger_boto: - level: WARNING - handlers: stderr - qualname: boto - handler_null: - class: logging.NullHandler - formatter: default - args: () - handler_stdout: - class: StreamHandler - args: (sys.stdout,) - formatter: context - handler_stderr: - class: StreamHandler - args: (sys.stderr,) - formatter: context - formatter_context: - class: oslo_log.formatters.ContextFormatter - datefmt: "%Y-%m-%d %H:%M:%S" - formatter_default: - format: "%(message)s" - datefmt: "%Y-%m-%d %H:%M:%S" - rabbitmq: policies: [] -network: - api: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 30004 - cfn: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - node_port: - enabled: false - port: 30800 - cloudwatch: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - node_port: - enabled: false - port: 30003 - -bootstrap: - enabled: true - ks_user: admin - script: | - #NOTE(portdirect): The Orchestration service automatically assigns the - # 'heat_stack_user' role to users that it creates during stack deployment. - # By default, this role restricts API operations. To avoid conflicts, do - # not add this role to actual users. - openstack role create --or-show heat_stack_user - dependencies: - dynamic: - common: - local_image_registry: - jobs: - - heat-image-repo-sync - services: - - endpoint: node - service: local_image_registry static: api: jobs: @@ -555,13 +90,6 @@ dependencies: - heat-domain-ks-user - heat-ks-endpoints - heat-bootstrap - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: identity cfn: jobs: - heat-db-sync @@ -570,13 +98,6 @@ dependencies: - heat-domain-ks-user - heat-ks-endpoints - heat-bootstrap - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: identity cloudwatch: jobs: - heat-db-sync @@ -585,30 +106,8 @@ dependencies: - heat-domain-ks-user - heat-ks-endpoints - heat-bootstrap - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: identity - db_drop: - services: - - endpoint: internal - service: oslo_db - db_init: - services: - - endpoint: internal - service: oslo_db db_sync: jobs: [] - services: - - endpoint: internal - service: oslo_db - bootstrap: - services: - - endpoint: internal - service: identity engine: jobs: - heat-db-sync @@ -617,653 +116,61 @@ dependencies: - heat-domain-ks-user - heat-ks-endpoints - heat-bootstrap - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: identity - engine_cleaner: - jobs: - - heat-db-sync - - heat-ks-user - - heat-trustee-ks-user - - heat-domain-ks-user - - heat-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: identity - purge_deleted: - jobs: - - heat-db-sync - - heat-ks-user - - heat-trustee-ks-user - - heat-domain-ks-user - - heat-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: identity - ks_endpoints: - jobs: - - heat-ks-service - services: - - endpoint: internal - service: identity - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity - rabbit_init: - services: - - endpoint: internal - service: oslo_messaging - trusts: - jobs: - - heat-ks-user - - heat-trustee-ks-user - - heat-domain-ks-user - services: - - endpoint: internal - service: identity - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - tests: - services: - - endpoint: internal - service: identity - - endpoint: internal - service: orchestration - -# Names of secrets used by bootstrap and environmental checks -secrets: - identity: - admin: heat-keystone-admin - heat: heat-keystone-user - heat_trustee: heat-keystone-trustee - heat_stack_user: heat-keystone-stack-user - test: heat-keystone-test - oslo_db: - admin: heat-db-admin - heat: heat-db-user - oslo_messaging: - admin: heat-rabbitmq-admin - heat: heat-rabbitmq-user - tls: - orchestration: - api: - public: heat-tls-public - internal: heat-tls-api - cloudformation: - cfn: - public: cloudformation-tls-public - internal: heat-tls-cfn - oci_image_registry: - heat: heat-oci-image-registry -# typically overridden by environmental -# values, but should include all endpoints -# required by this chart endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null + cloudformation: port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - heat: - username: heat - password: password - hosts: - default: localhost - host_fqdn_override: - default: null + api: + default: 8000 + internal: 8000 + public: 80 + service: 8000 + cloudwatch: port: - registry: - default: null + api: + default: 8003 + internal: 8003 + public: 80 + service: 8003 + fluentd: + namespace: fluentbit identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - heat: - role: admin - region_name: RegionOne - username: heat - password: password - project_name: service - user_domain_name: service - project_domain_name: service - heat_trustee: - role: admin - region_name: RegionOne - username: heat-trust - password: password - project_name: service - user_domain_name: service - project_domain_name: service - heat_stack_user: - role: admin - region_name: RegionOne - username: heat-domain - password: password - domain_name: heat - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: 'http' port: api: default: 5000 - public: 80 internal: 5000 + public: 80 service: 5000 orchestration: - name: heat - hosts: - default: heat-api - public: heat - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: '/v1/%(project_id)s' - scheme: - default: 'http' - service: 'http' port: api: default: 8004 - public: 80 internal: 8004 - service: 8004 - cloudformation: - name: heat-cfn - hosts: - default: heat-cfn - public: cloudformation - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: /v1 - scheme: - default: 'http' - service: 'http' - port: - api: - default: 8000 public: 80 - internal: 8000 - service: 8000 - # Cloudwatch does not get an entry in the keystone service catalog - cloudwatch: - name: heat-cloudwatch - hosts: - default: heat-cloudwatch - public: cloudwatch - host_fqdn_override: - default: null - path: - default: null - type: null - scheme: - default: 'http' - service: 'http' - port: - api: - default: 8003 - public: 80 - internal: 8003 - service: 8003 + service: 8004 oslo_db: - auth: - admin: - username: root - password: password - secret: - tls: - internal: mariadb-tls-direct - heat: - username: heat - password: password + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /heat - scheme: mysql+pymysql - port: - mysql: - default: 3306 oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null + host_fqdn_override: + default: memcached.openstack.svc.cluster.local hosts: default: memcached - host_fqdn_override: - default: null - port: - memcache: - default: 11211 oslo_messaging: - auth: - admin: - username: rabbitmq - password: password - secret: - tls: - internal: rabbitmq-tls-direct - heat: - username: heat - password: password - statefulset: - replicas: 3 - name: rabbitmq-server - hosts: - default: rabbitmq-nodes host_fqdn_override: default: rabbitmq.openstack.svc.cluster.local - path: /heat - scheme: rabbit - port: - amqp: - default: 5672 - http: - default: 15672 - fluentd: - namespace: fluentbit - name: fluentd - hosts: - default: fluentd-logging - host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 - # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress - # They are using to enable the Egress K8s network policy. - kube_dns: - namespace: kube-system - name: kubernetes-dns - hosts: - default: kube-dns - host_fqdn_override: - default: null - path: - default: null - scheme: http - port: - dns: - default: 53 - protocol: UDP - ingress: - namespace: null - name: ingress hosts: - default: ingress - port: - ingress: - default: 80 - -pod: - security_context: - heat: - pod: - runAsUser: 42424 - container: - heat_api: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - heat_cfn: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - heat_cloudwatch: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - heat_engine: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - trusts: - pod: - runAsUser: 42424 - container: - heat_trusts: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - ks_user: - pod: - runAsUser: 42424 - container: - heat_ks_domain_user: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - engine_cleaner: - pod: - runAsUser: 42424 - container: - heat_engine_cleaner: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - tolerations: - heat: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - mounts: - heat_api: - init_container: null - heat_api: - volumeMounts: - volumes: - heat_cfn: - init_container: null - heat_cfn: - volumeMounts: - volumes: - heat_cloudwatch: - init_container: null - heat_cloudwatch: - volumeMounts: - volumes: - heat_engine: - init_container: null - heat_engine: - volumeMounts: - volumes: - heat_bootstrap: - init_container: null - heat_bootstrap: - volumeMounts: - volumes: - heat_trusts: - init_container: null - heat_trusts: - volumeMounts: - volumes: - heat_engine_cleaner: - init_container: null - heat_engine_cleaner: - volumeMounts: - volumes: - heat_purge_deleted: - init_container: null - heat_purge_deleted: - volumeMounts: - volumes: - heat_tests: - init_container: null - heat_tests: - volumeMounts: - volumes: - heat_db_sync: - heat_db_sync: - volumeMounts: - volumes: - replicas: - api: 1 - cfn: 1 - cloudwatch: 1 - engine: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - disruption_budget: - api: - min_available: 0 - cfn: - min_available: 0 - cloudwatch: - min_available: 0 - termination_grace_period: - api: - timeout: 30 - cfn: - timeout: 30 - cloudwatch: - timeout: 30 - engine: - timeout: 30 - resources: - enabled: true - api: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - cfn: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - cloudwatch: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - engine: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - bootstrap: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_drop: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_endpoints: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_service: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_user: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - rabbit_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - tests: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - trusts: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - engine_cleaner: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - purge_deleted: - requests: - memory: "124Mi" - cpu: "100m" - limits: - memory: "4096Mi" - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - -network_policy: - heat: - ingress: - - {} - egress: - - {} - -# NOTE(helm_hook): helm_hook might break for helm2 binary. -# set helm3_hook: false when using the helm2 binary. -helm3_hook: true - -tls: - identity: false - oslo_messaging: false - oslo_db: false + default: rabbitmq-nodes manifests: - certificates: false - configmap_bin: true - configmap_etc: true - cron_job_engine_cleaner: true - cron_job_purge_deleted: true - deployment_api: true - deployment_cfn: true - deployment_cloudwatch: false - deployment_engine: true ingress_api: false ingress_cfn: false - ingress_cloudwatch: false - job_bootstrap: true job_db_init: false - job_db_sync: true - job_db_drop: false - job_image_repo_sync: true - job_ks_endpoints: true - job_ks_service: true - job_ks_user_domain: true - job_ks_user_trustee: true - job_ks_user: true job_rabbit_init: false - pdb_api: true - pdb_cfn: true - pdb_cloudwatch: false pod_rally_test: false - network_policy: false - secret_db: true secret_ingress_tls: false - secret_keystone: true - secret_rabbitmq: true - secret_registry: true - service_api: true - service_cfn: true - service_cloudwatch: false service_ingress_api: false service_ingress_cfn: false service_ingress_cloudwatch: false - statefulset_engine: false diff --git a/base-helm-configs/horizon/horizon-helm-overrides.yaml b/base-helm-configs/horizon/horizon-helm-overrides.yaml index 5f3a89d64..73437f5a0 100644 --- a/base-helm-configs/horizon/horizon-helm-overrides.yaml +++ b/base-helm-configs/horizon/horizon-helm-overrides.yaml @@ -1,20 +1,3 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default values for horizon. -# This is a YAML-formatted file. -# Declare name/value pairs to be passed into your templates. -# name: value - --- images: tags: @@ -25,63 +8,8 @@ images: test: "quay.io/rackspace/rackerlabs-osh-selenium:latest-ubuntu_jammy" dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -# Use selenium v4 syntax -selenium_v4: true - -release_group: null - -labels: - dashboard: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - test: - node_selector_key: openstack-control-plane - node_selector_value: enabled - -network: - dashboard: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - nginx.ingress.kubernetes.io/affinity: cookie - nginx.ingress.kubernetes.io/affinity-mode: persistent - nginx.ingress.kubernetes.io/session-cookie-name: GENESTACKCOOKIE - nginx.ingress.kubernetes.io/session-cookie-secure: "true" - nginx.ingress.kubernetes.io/session-cookie-expires: "172800" - nginx.ingress.kubernetes.io/session-cookie-max-age: "172800" - nginx.ingress.kubernetes.io/session-cookie-hash: sha1 - external_policy_local: false - node_port: - enabled: false - port: 31000 conf: - software: - apache2: - binary: apache2 - start_parameters: -DFOREGROUND - site_dir: /etc/apache2/sites-available - conf_dir: /etc/apache2/conf-available - mods_dir: /etc/apache2/mods-available - a2enmod: - - headers - - rewrite - a2dismod: - - status horizon: branding: logo_splash: |- @@ -6087,62 +6015,13 @@ conf: local_settings: config: - # Use "True" and "False" as Titlecase strings with quotes, boolean - # values will not work - horizon_secret_key: 9aee62c0-5253-4a86-b189-e0fb71fa503c - debug: "False" - use_ssl: "True" - endpoint_type: "internalURL" - keystone_multidomain_support: "True" - keystone_default_domain: Default - disable_password_reveal: "True" - show_openrc_file: "True" - csrf_cookie_secure: "True" - csrf_cookie_httponly: "True" - enforce_password_check: "True" - # Set enable_pwd_validator to true to enforce password validator settings. - enable_pwd_validator: false - pwd_validator_regex: '(?=.*[a-zA-Z])(?=.*\d).{8,}|(?=.*\d)(?=.*\W).{8,}|(?=.*\W)(?=.*[a-zA-Z]).{8,}' - pwd_validator_help_text: '_("Your password must be at least eight (8) characters in length and must include characters from at least two (2) of these groupings: alpha, numeric, and special characters.")' - session_cookie_secure: "True" - session_cookie_httponly: "True" + csrf_cookie_httponly: 'True' + csrf_cookie_secure: 'True' + disallow_iframe_embed: 'True' secure_proxy_ssl_header: true - password_autocomplete: "False" - disallow_iframe_embed: "True" - allowed_hosts: - - '*' - horizon_images_upload_mode: 'legacy' - openstack_cinder_features: - enable_backup: "True" - openstack_neutron_network: - enable_router: "True" - enable_quotas: "True" - enable_ipv6: "True" - enable_distributed_router: "False" - enable_ha_router: "False" - enable_lb: "True" - enable_firewall: "True" - enable_vpn: "True" - enable_fip_topology_check: "True" - openstack_enable_password_retrieve: "False" - auth: - sso: - enabled: False - initial_choice: "credentials" - idp_mapping: - - name: "acme_oidc" - label: "Acme Corporation - OpenID Connect" - idp: "myidp1" - protocol: "oidc" - - name: "acme_saml2" - label: "Acme Corporation - SAML2" - idp: "myidp2" - protocol: "saml2" - log_level: "DEBUG" - # Pass any settings to the end of local_settings.py - raw: {} - openstack_api_versions: - container_infra: "1.10" + session_cookie_httponly: 'True' + session_cookie_secure: 'True' + use_ssl: 'True' template: | import os @@ -6194,6 +6073,10 @@ conf: SESSION_COOKIE_HTTPONLY = {{ .Values.conf.horizon.local_settings.config.session_cookie_httponly }} + # https://docs.djangoproject.com/en/dev/ref/settings/#csrf-trusted-origins + + CSRF_TRUSTED_ORIGINS = [{{ include "helm-toolkit.utils.joinListWithCommaAndSingleQuotes" .Values.conf.horizon.local_settings.config.csrf_trusted_origins }}] + # Overrides for OpenStack API versions. Use this setting to force the # OpenStack dashboard to use a specific API version for a given service API. # Versions specified here should be integers or floats, not strings. @@ -6219,6 +6102,8 @@ conf: ('Default', _('Default')), ('rackspace_cloud_domain', _('Rackspace')), ) + OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = '{{ .Values.conf.horizon.local_settings.config.keystone_default_domain }}' + # Set Console type: # valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None # Set to None explicitly if you want to deactivate the console. @@ -6339,6 +6224,12 @@ conf: # Determines which authentication choice to show as default. WEBSSO_INITIAL_CHOICE = "{{ .Values.conf.horizon.local_settings.config.auth.sso.initial_choice }}" + {{- if .Values.conf.horizon.local_settings.config.auth.sso.websso_keystone_url }} + # The full auth URL for the Keystone endpoint used for web + single-sign-on authentication. + WEBSSO_KEYSTONE_URL = "{{ .Values.conf.horizon.local_settings.config.auth.sso.websso_keystone_url }}" + {{- end }} + # The list of authentication mechanisms # which include keystone federation protocols. # Current supported protocol IDs are 'saml2' and 'oidc' @@ -6855,470 +6746,49 @@ conf: {{- range $option, $value := .Values.conf.horizon.local_settings.config.raw }} {{ $option }} = {{ toJson $value }} {{- end }} - policy: - ceilometer: - context_is_admin: 'role:admin' - context_is_owner: 'user_id:%(target.user_id)s' - context_is_project: 'project_id:%(target.project_id)s' - segregation: 'rule:context_is_admin' - heat: - 'actions:action': 'rule:deny_stack_user' - 'build_info:build_info': 'rule:deny_stack_user' - 'cloudformation:CancelUpdateStack': 'rule:deny_stack_user' - 'cloudformation:CreateStack': 'rule:deny_stack_user' - 'cloudformation:DeleteStack': 'rule:deny_stack_user' - 'cloudformation:DescribeStackEvents': 'rule:deny_stack_user' - 'cloudformation:DescribeStackResource': '' - 'cloudformation:DescribeStackResources': 'rule:deny_stack_user' - 'cloudformation:DescribeStacks': 'rule:deny_stack_user' - 'cloudformation:EstimateTemplateCost': 'rule:deny_stack_user' - 'cloudformation:GetTemplate': 'rule:deny_stack_user' - 'cloudformation:ListStackResources': 'rule:deny_stack_user' - 'cloudformation:ListStacks': 'rule:deny_stack_user' - 'cloudformation:UpdateStack': 'rule:deny_stack_user' - 'cloudformation:ValidateTemplate': 'rule:deny_stack_user' - 'cloudwatch:DeleteAlarms': 'rule:deny_stack_user' - 'cloudwatch:DescribeAlarmHistory': 'rule:deny_stack_user' - 'cloudwatch:DescribeAlarms': 'rule:deny_stack_user' - 'cloudwatch:DescribeAlarmsForMetric': 'rule:deny_stack_user' - 'cloudwatch:DisableAlarmActions': 'rule:deny_stack_user' - 'cloudwatch:EnableAlarmActions': 'rule:deny_stack_user' - 'cloudwatch:GetMetricStatistics': 'rule:deny_stack_user' - 'cloudwatch:ListMetrics': 'rule:deny_stack_user' - 'cloudwatch:PutMetricAlarm': 'rule:deny_stack_user' - 'cloudwatch:PutMetricData': '' - 'cloudwatch:SetAlarmState': 'rule:deny_stack_user' - 'context_is_admin': 'role:admin' - 'deny_everybody': '!' - 'deny_stack_user': 'not role:heat_stack_user' - 'events:index': 'rule:deny_stack_user' - 'events:show': 'rule:deny_stack_user' - 'resource:index': 'rule:deny_stack_user' - 'resource:mark_unhealthy': 'rule:deny_stack_user' - 'resource:metadata': '' - 'resource:show': 'rule:deny_stack_user' - 'resource:signal': '' - 'resource_types:OS::Cinder::EncryptedVolumeType': 'rule:context_is_admin' - 'resource_types:OS::Cinder::VolumeType': 'rule:context_is_admin' - 'resource_types:OS::Manila::ShareType': 'rule:context_is_admin' - 'resource_types:OS::Neutron::QoSBandwidthLimitRule': 'rule:context_is_admin' - 'resource_types:OS::Neutron::QoSPolicy': 'rule:context_is_admin' - 'resource_types:OS::Nova::Flavor': 'rule:context_is_admin' - 'resource_types:OS::Nova::HostAggregate': 'rule:context_is_admin' - 'service:index': 'rule:context_is_admin' - 'software_configs:create': 'rule:deny_stack_user' - 'software_configs:delete': 'rule:deny_stack_user' - 'software_configs:global_index': 'rule:deny_everybody' - 'software_configs:index': 'rule:deny_stack_user' - 'software_configs:show': 'rule:deny_stack_user' - 'software_deployments:create': 'rule:deny_stack_user' - 'software_deployments:delete': 'rule:deny_stack_user' - 'software_deployments:index': 'rule:deny_stack_user' - 'software_deployments:metadata': '' - 'software_deployments:show': 'rule:deny_stack_user' - 'software_deployments:update': 'rule:deny_stack_user' - 'stacks:abandon': 'rule:deny_stack_user' - 'stacks:create': 'rule:deny_stack_user' - 'stacks:delete': 'rule:deny_stack_user' - 'stacks:delete_snapshot': 'rule:deny_stack_user' - 'stacks:detail': 'rule:deny_stack_user' - 'stacks:environment': 'rule:deny_stack_user' - 'stacks:export': 'rule:deny_stack_user' - 'stacks:generate_template': 'rule:deny_stack_user' - 'stacks:global_index': 'rule:deny_everybody' - 'stacks:index': 'rule:deny_stack_user' - 'stacks:list_outputs': 'rule:deny_stack_user' - 'stacks:list_resource_types': 'rule:deny_stack_user' - 'stacks:list_snapshots': 'rule:deny_stack_user' - 'stacks:list_template_functions': 'rule:deny_stack_user' - 'stacks:list_template_versions': 'rule:deny_stack_user' - 'stacks:lookup': '' - 'stacks:preview': 'rule:deny_stack_user' - 'stacks:preview_update': 'rule:deny_stack_user' - 'stacks:preview_update_patch': 'rule:deny_stack_user' - 'stacks:resource_schema': 'rule:deny_stack_user' - 'stacks:restore_snapshot': 'rule:deny_stack_user' - 'stacks:show': 'rule:deny_stack_user' - 'stacks:show_output': 'rule:deny_stack_user' - 'stacks:show_snapshot': 'rule:deny_stack_user' - 'stacks:snapshot': 'rule:deny_stack_user' - 'stacks:template': 'rule:deny_stack_user' - 'stacks:update': 'rule:deny_stack_user' - 'stacks:update_patch': 'rule:deny_stack_user' - 'stacks:validate_template': 'rule:deny_stack_user' - # list of panels to enable for horizon - # this requires that the panels are already installed in the horizon image, if they are not - # nothing will be added - # the name of the panel should be the name of the dir where the panel is installed - # for example heat_dashboard, cloudkittydashboard or neutron_taas_dashboard - extra_panels: - - heat_dashboard - - octavia_dashboard dependencies: - dynamic: - common: - local_image_registry: - jobs: - - horizon-image-repo-sync - services: - - endpoint: node - service: local_image_registry static: - dashboard: - jobs: - - horizon-db-sync - services: - - endpoint: internal - service: oslo_cache - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - db_drop: - services: - - endpoint: internal - service: oslo_db - db_init: - services: - - endpoint: internal - service: oslo_db db_sync: jobs: [] - services: - - endpoint: internal - service: oslo_db - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - tests: - services: - - endpoint: internal - service: dashboard - -pod: - security_context: - horizon: - pod: - runAsUser: 42424 - container: - horizon: - readOnlyRootFilesystem: false - allowPrivilegeEscalation: false - runAsUser: 0 - db_sync: - pod: - runAsUser: 42424 - container: - horizon_db_sync: - readOnlyRootFilesystem: false - allowPrivilegeEscalation: false - runAsUser: 0 - test: - pod: - runAsUser: 42424 - container: - horizon_test: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - tolerations: - horizon: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - mounts: - horizon_db_init: - init_container: null - horizon_db_init: - volumeMounts: - volumes: - horizon_db_sync: - init_container: null - horizon_db_sync: - volumeMounts: - volumes: - horizon: - init_container: null - horizon: - volumeMounts: - volumes: - horizon_tests: - init_container: null - horizon_tests: - volumeMounts: - volumes: - replicas: - server: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - disruption_budget: - horizon: - min_available: 0 - termination_grace_period: - horizon: - timeout: 30 - resources: - enabled: true - server: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - db_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_drop: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - tests: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" -# Names of secrets used by bootstrap and environmental checks -secrets: - identity: - admin: horizon-keystone-admin - oslo_db: - admin: horizon-db-admin - horizon: horizon-db-user - tls: - dashboard: - dashboard: - public: horizon-tls-public - internal: horizon-tls-web - oci_image_registry: - horizon: horizon-oci-image-registry - -tls: - identity: false -# typically overridden by environmental -# values, but should include all endpoints -# required by this chart endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - horizon: - username: horizon - password: password - hosts: - default: localhost - host_fqdn_override: - default: null + dashboard: port: - registry: - default: null + web: + default: 80 + internal: 80 + public: 443 + service: 80 + fluentd: + namespace: fluentbit identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: http port: api: default: 5000 - public: 80 internal: 5000 + public: 80 service: 5000 - oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null - hosts: - default: memcached - host_fqdn_override: - default: null - port: - memcache: - default: 11211 - dashboard: - name: horizon - hosts: - default: horizon-int - public: horizon - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: null - scheme: - default: http - port: - web: - default: 80 - public: 443 - internal: 80 - service: 80 oslo_db: - auth: - admin: - username: root - password: password - secret: - tls: - internal: mariadb-tls-direct - horizon: - username: horizon - password: password + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: default: mariadb-cluster-primary + oslo_cache: host_fqdn_override: - default: null - path: /horizon - scheme: mysql+pymysql - port: - mysql: - default: 3306 - # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress - # They are using to enable the Egress K8s network policy. - kube_dns: - namespace: kube-system - name: kubernetes-dns + default: memcached.openstack.svc.cluster.local hosts: - default: kube-dns + default: memcached + oslo_messaging: host_fqdn_override: - default: null - path: - default: null - scheme: http - port: - dns: - default: 53 - protocol: UDP - ingress: - namespace: null - name: ingress - hosts: - default: ingress - port: - ingress: - default: 80 - fluentd: - namespace: fluentbit - name: fluentd + default: rabbitmq.openstack.svc.cluster.local hosts: - default: fluentd-logging - host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 - -network_policy: - horizon: - ingress: - - {} - egress: - - {} - -# NOTE(helm_hook): helm_hook might break for helm2 binary. -# set helm3_hook: false when using the helm2 binary. -helm3_hook: true + default: rabbitmq-nodes manifests: - certificates: false - configmap_bin: true - configmap_etc: true configmap_logo: true - deployment: true ingress_api: false job_db_init: false - job_db_sync: true - job_db_drop: false - job_image_repo_sync: true - pdb: true pod_helm_test: false - network_policy: false - secret_db: true secret_ingress_tls: false - secret_keystone: true - secret_registry: true service_ingress: false - service: true diff --git a/base-helm-configs/keystone/keystone-helm-overrides.yaml b/base-helm-configs/keystone/keystone-helm-overrides.yaml index b9c69b691..335e00f53 100644 --- a/base-helm-configs/keystone/keystone-helm-overrides.yaml +++ b/base-helm-configs/keystone/keystone-helm-overrides.yaml @@ -1,28 +1,13 @@ --- -labels: - api: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - test: - node_selector_key: openstack-control-plane - node_selector_value: enabled - -release_group: null - -# NOTE(gagehugo): the pre-install hook breaks upgrade for helm2 -# Set to false to upgrade using helm2 -helm3_hook: true - images: tags: bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" db_drop: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" + image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" keystone_api: "quay.io/rackspace/rackerlabs-keystone-rxt:2024.1-ubuntu_jammy-1739377879" - keystone_bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + keystone_credential_cleanup: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" keystone_credential_rotate: "quay.io/rackspace/rackerlabs-keystone-rxt:2024.1-ubuntu_jammy-1739377879" keystone_credential_setup: "quay.io/rackspace/rackerlabs-keystone-rxt:2024.1-ubuntu_jammy-1739377879" keystone_db_sync: "quay.io/rackspace/rackerlabs-keystone-rxt:2024.1-ubuntu_jammy-1739377879" @@ -30,733 +15,62 @@ images: keystone_fernet_rotate: "quay.io/rackspace/rackerlabs-keystone-rxt:2024.1-ubuntu_jammy-1739377879" keystone_fernet_setup: "quay.io/rackspace/rackerlabs-keystone-rxt:2024.1-ubuntu_jammy-1739377879" ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" - keystone_credential_cleanup: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" - image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -bootstrap: - enabled: true - ks_user: admin - script: | - # admin needs the admin role for the default domain - openstack role add \ - --user="${OS_USERNAME}" \ - --domain="${OS_DEFAULT_DOMAIN}" \ - "admin" - -network: - api: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 30500 - admin: - node_port: - enabled: false - port: 30357 + test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" dependencies: - dynamic: - common: - local_image_registry: - jobs: - - keystone-image-repo-sync - services: - - endpoint: node - service: local_image_registry - rabbit_init: - services: - - service: oslo_messaging - endpoint: internal static: - api: - jobs: - - keystone-db-sync - - keystone-credential-setup - - keystone-fernet-setup - services: - - endpoint: internal - service: oslo_cache - - endpoint: internal - service: oslo_db - bootstrap: - jobs: - - keystone-domain-manage - services: - - endpoint: internal - service: identity - credential_rotate: - jobs: - - keystone-credential-setup - credential_setup: null - credential_cleanup: - services: - - endpoint: internal - service: oslo_db - db_drop: - services: - - endpoint: internal - service: oslo_db - db_init: - services: - - endpoint: internal - service: oslo_db db_sync: jobs: - # - keystone-db-init - keystone-credential-setup - keystone-fernet-setup - services: - - endpoint: internal - service: oslo_db - domain_manage: - services: - - endpoint: internal - service: identity - fernet_rotate: - jobs: - - keystone-fernet-setup - fernet_setup: null - tests: - services: - - endpoint: internal - service: identity - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - -pod: - security_context: - keystone: - pod: - runAsUser: 42424 - container: - keystone_api: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - credential_setup: - pod: - runAsUser: 42424 - container: - keystone_credential_setup: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - fernet_setup: - pod: - runAsUser: 42424 - container: - keystone_fernet_setup: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - fernet_rotate: - pod: - runAsUser: 42424 - container: - keystone_fernet_rotate: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - domain_manage: - pod: - runAsUser: 42424 - container: - keystone_domain_manage_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - keystone_domain_manage: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - test: - pod: - runAsUser: 42424 - container: - keystone_test_ks_user: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - keystone_test: - runAsUser: 65500 - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - tolerations: - keystone: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - mounts: - keystone_db_init: - init_container: null - keystone_db_init: - volumeMounts: - volumes: - keystone_db_sync: - init_container: null - keystone_db_sync: - volumeMounts: - volumes: - keystone_api: - init_container: null - keystone_api: - volumeMounts: - volumes: - keystone_tests: - init_container: null - keystone_tests: - volumeMounts: - volumes: - keystone_bootstrap: - init_container: null - keystone_bootstrap: - volumeMounts: - volumes: - keystone_fernet_setup: - init_container: null - keystone_fernet_setup: - volumeMounts: - volumes: - keystone_fernet_rotate: - init_container: null - keystone_fernet_rotate: - volumeMounts: - volumes: - keystone_credential_setup: - init_container: null - keystone_credential_setup: - volumeMounts: - volumes: - keystone_credential_rotate: - init_container: null - keystone_credential_rotate: - volumeMounts: - volumes: - keystone_credential_cleanup: - init_container: null - keystone_credential_cleanup: - volumeMounts: - volumes: - keystone_domain_manage: - init_container: null - keystone_domain_manage: - volumeMounts: - volumes: - replicas: - api: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - disruption_budget: - api: - min_available: 0 - termination_grace_period: - api: - timeout: 30 - resources: - enabled: true - api: - requests: - memory: "256Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - bootstrap: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - domain_manage: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_drop: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - rabbit_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - tests: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - fernet_setup: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - fernet_rotate: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - credential_setup: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - credential_rotate: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - credential_cleanup: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - probes: - api: - api: - readiness: - enabled: true - params: - initialDelaySeconds: 15 - periodSeconds: 60 - timeoutSeconds: 15 - liveness: - enabled: true - params: - initialDelaySeconds: 50 - periodSeconds: 60 - timeoutSeconds: 15 -jobs: - fernet_setup: - user: keystone - group: keystone - fernet_rotate: - # NOTE(rk760n): key rotation frequency, token expiration, active keys should statisfy the formula - # max_active_keys = (token_expiration / rotation_frequency) + 2 - # as expiration is 12h, and max_active_keys set to 3 by default, rotation_frequency need to be adjusted - # 12 hours - cron: "0 */12 * * *" - user: keystone - group: keystone - history: - success: 3 - failed: 1 - credential_setup: - user: keystone - group: keystone - credential_rotate: - # monthly - cron: "0 0 1 * *" - migrate_wait: 120 - user: keystone - group: keystone - history: - success: 3 - failed: 1 - -network_policy: - keystone: - ingress: - - {} - egress: - - {} conf: - security: | - # - # Disable access to the entire file system except for the directories that - # are explicitly allowed later. - # - # This currently breaks the configurations that come with some web application - # Debian packages. - # - # - # AllowOverride None - # Require all denied - # - - # Changing the following options will not really affect the security of the - # server, but might make attacks slightly more difficult in some cases. - - # - # ServerTokens - # This directive configures what you return as the Server HTTP response - # Header. The default is 'Full' which sends information about the OS-Type - # and compiled in modules. - # Set to one of: Full | OS | Minimal | Minor | Major | Prod - # where Full conveys the most information, and Prod the least. - ServerTokens Prod - - # - # Optionally add a line containing the server version and virtual host - # name to server-generated pages (internal error documents, FTP directory - # listings, mod_status and mod_info output etc., but not CGI generated - # documents or custom error documents). - # Set to "EMail" to also include a mailto: link to the ServerAdmin. - # Set to one of: On | Off | EMail - ServerSignature Off - - # - # Allow TRACE method - # - # Set to "extended" to also reflect the request body (only for testing and - # diagnostic purposes). - # - # Set to one of: On | Off | extended - TraceEnable Off - - # - # Forbid access to version control directories - # - # If you use version control systems in your document root, you should - # probably deny access to their directories. For example, for subversion: - # - # - # Require all denied - # - - # - # Setting this header will prevent MSIE from interpreting files as something - # else than declared by the content type in the HTTP headers. - # Requires mod_headers to be enabled. - # - #Header set X-Content-Type-Options: "nosniff" - - # - # Setting this header will prevent other sites from embedding pages from this - # site as frames. This defends against clickjacking attacks. - # Requires mod_headers to be enabled. - # - #Header set X-Frame-Options: "sameorigin" - software: - apache2: - binary: apache2 - start_parameters: -DFOREGROUND - site_dir: /etc/apache2/sites-enable - conf_dir: /etc/apache2/conf-enabled - mods_dir: /etc/apache2/mods-available - a2enmod: null - a2dismod: null + keystone_api_wsgi: + wsgi: + processes: 2 + threads: 4 keystone: DEFAULT: - log_config_append: /etc/keystone/logging.conf max_token_size: 300 - # NOTE(rk760n): if you need auth notifications to be sent, uncomment it - # notification_opt_out: "" - token: - provider: fernet - # 12 hours - expiration: 43200 - identity: - domain_specific_drivers_enabled: True - domain_config_dir: /etc/keystone/domains - fernet_tokens: - key_repository: /etc/keystone/fernet-keys/ - max_active_keys: 7 - credential: - key_repository: /etc/keystone/credential-keys/ + auth: + methods: "password,token,application_credential,totp" + password: rxt + totp: rxt database: - idle_timeout: 3600 connection_recycle_time: 3600 + idle_timeout: 3600 pool_timeout: 60 - max_retries: -1 - cache: - enabled: true - backend: dogpile.cache.memcached + fernet_tokens: + max_active_keys: 7 oslo_concurrency: lock_path: /tmp/keystone - oslo_messaging_notifications: - driver: messagingv2 oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true rabbit_ha_queues: false rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly rabbit_interval_max: 10 # Send more frequent heartbeats and fail unhealthy nodes faster # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + heartbeat_timeout_threshold: 60 + # DEPRECIATION: (warning) heartbeat_in_pthread will be deprecated in 2024.2 + heartbeat_in_pthread: True + # Setting lower kombu_reconnect_delay should resolve issue with HA failing when one node is down # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 kombu_reconnect_delay: 0.5 - oslo_middleware: - enable_proxy_headers_parsing: true - oslo_policy: - policy_file: /etc/keystone/policy.yaml - security_compliance: - # NOTE(vdrok): The following two options have effect only for SQL backend - lockout_failure_attempts: 5 - lockout_duration: 1800 - auth: - methods: password,token,application_credential,totp - password: rxt - totp: rxt rackspace: role_attribute: os_flex - role_attribute_enforcement: False # This should be set to true in production environments. - - # NOTE(lamt) We can leverage multiple domains with different - # configurations as outlined in - # https://docs.openstack.org/keystone/pike/admin/identity-domain-specific-config.html. - # A sample of the value override can be found in sample file: - # tools/overrides/example/keystone_domain_config.yaml - # ks_domains: - policy: - "identity:list_system_grants_for_user": "rule:admin_required or (role:reader and system_scope:all) or rule:owner" - access_rules: {} - rabbitmq: - policies: [] - rally_tests: - run_tempest: false - tests: - KeystoneBasic.add_and_remove_user_role: - - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.authenticate_user_and_validate_token: - - args: {} - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_add_and_list_user_roles: - - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_and_delete_ec2credential: - - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_and_list_ec2credentials: - - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_and_delete_role: - - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_and_delete_service: - - args: - description: test_description - service_type: Rally_test_type - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_and_get_role: - - args: {} - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_and_list_services: - - args: - description: test_description - service_type: Rally_test_type - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_and_list_tenants: - - args: {} - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_and_list_users: - - args: {} - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_delete_user: - - args: {} - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_tenant: - - args: {} - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_tenant_with_users: - - args: - users_per_tenant: 1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_update_and_delete_tenant: - - args: {} - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_user: - - args: {} - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_user_set_enabled_and_delete: - - args: - enabled: true - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - - args: - enabled: false - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.create_user_update_password: - - args: {} - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - KeystoneBasic.get_entities: - - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 + role_attribute_enforcement: false + logging: + logger_root: + handlers: + - stdout + level: INFO mpm_event: | ServerLimit 16 @@ -769,6 +83,10 @@ conf: MaxMemFree 256 MaxConnectionsPerChild 0 + policy: + "identity:list_system_grants_for_user": "rule:admin_required or (role:reader and system_scope:all) or rule:owner" + rabbitmq: + policies: [] wsgi_keystone: | {{- $portInt := tuple "identity" "service" "api" $ | include "helm-toolkit.endpoints.endpoint_port_lookup" }} @@ -782,7 +100,7 @@ conf: CustomLog /dev/stdout proxy env=forwarded - WSGIDaemonProcess keystone-public processes=2 threads=8 user=keystone group=keystone display-name=%{GROUP} + WSGIDaemonProcess keystone-public processes={{ .Values.conf.keystone_api_wsgi.wsgi.processes }} threads={{ .Values.conf.keystone_api_wsgi.wsgi.threads }} user=keystone group=keystone display-name=%{GROUP} WSGIProcessGroup keystone-public WSGIScriptAlias / /var/www/cgi-bin/keystone/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} @@ -797,339 +115,45 @@ conf: CustomLog /dev/stdout combined env=!forwarded CustomLog /dev/stdout proxy env=forwarded - sso_callback_template: | - - - - Keystone WebSSO redirect - - -
- Please wait... -
- - -
- - - - logging: - loggers: - keys: - - root - - keystone - handlers: - keys: - - stdout - - stderr - - "null" - formatters: - keys: - - context - - default - logger_root: - level: INFO - handlers: - - stdout - logger_keystone: - level: INFO - handlers: - - stdout - qualname: keystone - logger_amqp: - level: WARNING - handlers: stderr - qualname: amqp - logger_amqplib: - level: WARNING - handlers: stderr - qualname: amqplib - logger_eventletwsgi: - level: WARNING - handlers: stderr - qualname: eventlet.wsgi.server - logger_sqlalchemy: - level: WARNING - handlers: stderr - qualname: sqlalchemy - logger_boto: - level: WARNING - handlers: stderr - qualname: boto - handler_null: - class: logging.NullHandler - formatter: default - args: () - handler_stdout: - class: StreamHandler - args: (sys.stdout,) - formatter: context - handler_stderr: - class: StreamHandler - args: (sys.stderr,) - formatter: context - formatter_context: - class: oslo_log.formatters.ContextFormatter - datefmt: "%Y-%m-%d %H:%M:%S" - formatter_default: - format: "%(message)s" - datefmt: "%Y-%m-%d %H:%M:%S" -# Names of secrets used by bootstrap and environmental checks secrets: - identity: - admin: keystone-keystone-admin - test: keystone-keystone-test - oslo_db: - admin: keystone-db-admin - keystone: keystone-db-user - oslo_messaging: - admin: keystone-rabbitmq-admin - keystone: keystone-rabbitmq-user - ldap: - tls: keystone-ldap-tls tls: identity: api: - public: keystone-tls-public internal: keystone-tls-public - oci_image_registry: - keystone: keystone-oci-image-registry -# typically overridden by environmental -# values, but should include all endpoints -# required by this chart endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - keystone: - username: keystone - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null + fluentd: + namespace: fluentbit identity: - namespace: null - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - default_domain_id: default - # test: - # role: admin - # region_name: RegionOne - # username: keystone-test - # password: password - # project_name: test - # user_domain_name: default - # project_domain_name: default - # default_domain_id: default - hosts: - default: keystone-api - admin: keystone-api - internal: keystone-api - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: /v3 - scheme: - default: http - service: http port: api: admin: 5000 default: 80 - # NOTE(portdirect): to retain portability across images, and allow - # running under a unprivileged user simply, we default to a port > 1000. internal: 5000 service: 5000 oslo_db: - namespace: null - auth: - admin: - username: root - password: password - secret: - tls: - internal: mariadb-tls-direct - keystone: - username: keystone - password: password - hosts: - default: mariadb-cluster-primary host_fqdn_override: - default: null - path: /keystone - scheme: mysql+pymysql - port: - mysql: - default: 3306 - oslo_messaging: - namespace: null - auth: - admin: - username: rabbitmq - password: password - secret: - tls: - internal: rabbitmq-tls-direct - keystone: - username: keystone - password: password - statefulset: - replicas: 3 - name: rabbitmq-server + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: - default: rabbitmq-nodes - host_fqdn_override: - default: rabbitmq.openstack.svc.cluster.local - path: /keystone - scheme: rabbit - port: - amqp: - default: 5672 - http: - default: 15672 + default: mariadb-cluster-primary oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null - namespace: null - hosts: - default: memcached host_fqdn_override: - default: null - port: - memcache: - default: 11211 - ldap: - auth: - client: - tls: - # NOTE(lamt): Specify a CA value here will place a LDAPS certificate at - # /etc/certs/tls.ca. To ensure keystone uses LDAPS, the - # following key will need to be overrided under section [ldap] or the - # correct domain-specific setting, else it will not be enabled: - # - # use_tls: true - # tls_req_cert: allow # Valid values: demand, never, allow - # tls_cacertfile: /etc/certs/tls.ca # abs path to the CA cert - ca: null - fluentd: - namespace: fluentbit - name: fluentd + default: memcached.openstack.svc.cluster.local hosts: - default: fluentd-logging - host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 - # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress - # They are using to enable the Egress K8s network policy. - kube_dns: - namespace: kube-system - name: kubernetes-dns - hosts: - default: kube-dns + default: memcached + oslo_messaging: host_fqdn_override: - default: null - path: - default: null - scheme: http - port: - dns: - default: 53 - protocol: UDP - ingress: - namespace: null - name: ingress + default: rabbitmq.openstack.svc.cluster.local hosts: - default: ingress - port: - ingress: - default: 80 - -tls: - identity: false - oslo_messaging: false - oslo_db: false + default: rabbitmq-nodes manifests: - certificates: false - configmap_bin: true - configmap_etc: true - cron_credential_rotate: true - cron_fernet_rotate: true - deployment_api: true ingress_api: false - job_bootstrap: true job_credential_cleanup: false job_credential_setup: true job_db_init: false - job_db_sync: true - job_db_drop: false - job_domain_manage: true - job_fernet_setup: true - job_image_repo_sync: true job_rabbit_init: false - pdb_api: true pod_rally_test: false - network_policy: false - secret_credential_keys: true - secret_db: true - secret_fernet_keys: true secret_ingress_tls: false - secret_keystone: true - secret_rabbitmq: true - secret_registry: true service_ingress_api: false - service_api: true diff --git a/base-helm-configs/kube-ovn/kube-ovn-helm-overrides.yaml b/base-helm-configs/kube-ovn/kube-ovn-helm-overrides.yaml index 7a1df870d..cdbca9d79 100644 --- a/base-helm-configs/kube-ovn/kube-ovn-helm-overrides.yaml +++ b/base-helm-configs/kube-ovn/kube-ovn-helm-overrides.yaml @@ -10,8 +10,7 @@ global: repository: kube-ovn dpdkRepository: kube-ovn-dpdk vpcRepository: vpc-nat-gateway - # Change "tag" when PR https://github.com/kubeovn/kube-ovn/pull/5005 is merged - tag: v1.12.32-gc-disable + tag: v1.12.32 support_arm: true thirdparty: true diff --git a/base-helm-configs/libvirt/libvirt-helm-overrides.yaml b/base-helm-configs/libvirt/libvirt-helm-overrides.yaml index 821de2461..fb36e8157 100644 --- a/base-helm-configs/libvirt/libvirt-helm-overrides.yaml +++ b/base-helm-configs/libvirt/libvirt-helm-overrides.yaml @@ -1,296 +1,20 @@ -release_group: null -labels: - agent: - libvirt: - node_selector_key: openstack-compute-node - node_selector_value: enabled +--- images: tags: - libvirt: docker.io/openstackhelm/libvirt:2024.1-ubuntu_jammy # We want to use jammy - libvirt_exporter: vexxhost/libvirtd-exporter:latest - ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_focal_18.2.0-1-20231013' - dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 - image_repo_sync: docker.io/library/docker:17.07.0 - kubectl: docker.io/bitnami/kubectl:latest - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync + libvirt: docker.io/openstackhelm/libvirt:2024.1-ubuntu_jammy + ceph_config_helper: docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_18.2.2-1-20240312 + dep_check: quay.io/airshipit/kubernetes-entrypoint:latest-ubuntu_jammy network: - # provide what type of network wiring will be used - # possible options: ovn, openvswitch, linuxbridge, sriov backend: - ovn -endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - libvirt: - username: libvirt - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null - libvirt_exporter: - port: - metrics: - default: 9474 -network_policy: - libvirt: - ingress: - - {} - egress: - - {} -ceph_client: - configmap: ceph-etc - user_secret_name: pvc-ceph-client-key conf: ceph: - enabled: false # Set to true when we has ceph support for openstack. - admin_keyring: null - cinder: - user: "cinder" - keyring: null - secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337 - # Cinder Ceph backend that is not configured by the k8s cluter - external_ceph: - enabled: false - user: null - secret_uuid: null - user_secret_name: null - libvirt: - listen_tcp: "1" - listen_tls: "0" - auth_tcp: "none" - ca_file: "/etc/pki/CA/cacert.pem" - cert_file: "/etc/pki/libvirt/servercert.pem" - key_file: "/etc/pki/libvirt/private/serverkey.pem" - auth_unix_rw: "none" - listen_addr: 0.0.0.0 - log_level: "3" - log_outputs: "1:file:/var/log/libvirt/libvirtd.log" - qemu: - vnc_tls: "0" - vnc_tls_x509_verify: "0" - stdio_handler: "file" - user: "nova" - group: "kvm" - kubernetes: - cgroup: "kubepods.slice" - vencrypt: - # Issuer to use for the vencrypt certs. - issuer: - kind: ClusterIssuer - name: ca-clusterissuer - # Script is included here (vs in bin/) to allow overriding, in the case that - # communication happens over an IP other than the pod IP for some reason. - cert_init_sh: | - #!/bin/bash - set -x - - HOSTNAME_FQDN=$(hostname --fqdn) - - # Script to create certs for each libvirt pod based on pod IP (by default). - cat < /tmp/${TYPE}.crt - kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.tls\.key}' | base64 -d > /tmp/${TYPE}.key - kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.ca\.crt}' | base64 -d > /tmp/${TYPE}-ca.crt -pod: - probes: - libvirt: - libvirt: - liveness: - enabled: true - params: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 5 - readiness: - enabled: true - params: - initialDelaySeconds: 15 - periodSeconds: 60 - timeoutSeconds: 5 - security_context: - libvirt: - pod: - runAsUser: 0 - container: - ceph_admin_keyring_placement: - readOnlyRootFilesystem: false - ceph_keyring_placement: - readOnlyRootFilesystem: false - libvirt: - privileged: true - readOnlyRootFilesystem: false - libvirt_exporter: - privileged: true - sidecars: - libvirt_exporter: false - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - tolerations: - libvirt: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - dns_policy: "ClusterFirstWithHostNet" - mounts: - libvirt: - init_container: null - libvirt: - lifecycle: - upgrades: - daemonsets: - pod_replacement_strategy: RollingUpdate - libvirt: - enabled: true - min_ready_seconds: 0 - max_unavailable: 20% - resources: enabled: false - libvirt: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - jobs: - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - libvirt_exporter: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "256Mi" + libvirt: + listen_addr: "0.0.0.0" dependencies: dynamic: - common: - local_image_registry: - jobs: - - libvirt-image-repo-sync - services: - - endpoint: node - service: local_image_registry targeted: ovn: libvirt: - pod: [] # In a hybrid deployment, we don't want to run ovn-controller on the same node as libvirt - # - requireSameNode: true - # labels: - # application: ovn - # component: ovn-controller - openvswitch: - libvirt: - pod: - - requireSameNode: true - labels: - application: neutron - component: neutron-ovs-agent - linuxbridge: - libvirt: - pod: - - requireSameNode: true - labels: - application: neutron - component: neutron-lb-agent - sriov: - libvirt: - pod: - - requireSameNode: true - labels: - application: neutron - component: neutron-sriov-agent - static: - libvirt: - services: null - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry -manifests: - configmap_bin: true - configmap_etc: true - daemonset_libvirt: true - job_image_repo_sync: true - network_policy: false - role_cert_manager: false - secret_registry: true -secrets: - oci_image_registry: - libvirt: libvirt-oci-image-registry-key - tls: - server: libvirt-tls-server - client: libvirt-tls-client + pod: [] # In a hybrid deployment, we don't want to run ovn-controller on the same node as libvirt diff --git a/base-helm-configs/magnum/magnum-helm-overrides.yaml b/base-helm-configs/magnum/magnum-helm-overrides.yaml index 1feba47cb..0dd6c9143 100644 --- a/base-helm-configs/magnum/magnum-helm-overrides.yaml +++ b/base-helm-configs/magnum/magnum-helm-overrides.yaml @@ -1,262 +1,97 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default values for magnum. -# This is a YAML-formatted file. -# Declare name/value pairs to be passed into your templates. -# name: value - --- -release_group: null - -labels: - api: - node_selector_key: openstack-control-plane - node_selector_value: enabled - conductor: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - images: tags: bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - magnum_db_sync: "quay.io/rackspace/rackerlabs-magnum:2024.1-ubuntu_jammy" db_drop: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" - ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" + image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" magnum_api: "quay.io/rackspace/rackerlabs-magnum:2024.1-ubuntu_jammy" magnum_conductor: "quay.io/rackspace/rackerlabs-magnum:2024.1-ubuntu_jammy" - dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" - image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync + magnum_db_sync: "quay.io/rackspace/rackerlabs-magnum:2024.1-ubuntu_jammy" + rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" conf: - paste: - pipeline:main: - pipeline: cors healthcheck request_id authtoken api_v1 - app:api_v1: - paste.app_factory: magnum.api.app:app_factory - filter:authtoken: - acl_public_routes: /, /v1 - paste.filter_factory: magnum.api.middleware.auth_token:AuthTokenMiddleware.factory - filter:request_id: - paste.filter_factory: oslo_middleware:RequestId.factory - filter:cors: - paste.filter_factory: oslo_middleware.cors:filter_factory - oslo_config_project: magnum - filter:healthcheck: - paste.filter_factory: oslo_middleware:Healthcheck.factory - backends: disable_by_file - disable_by_file_path: /etc/magnum/healthcheck_disable - policy: {} + logging: + logger_root: + handlers: + - stdout + level: INFO magnum: - DEFAULT: - log_config_append: /etc/magnum/logging.conf - transport_url: null - glance_client: - api_version: 2 + barbican_client: endpoint_type: publicURL region_name: RegionOne - nova_client: + cinder_client: endpoint_type: publicURL region_name: RegionOne - cinder_client: + database: + idle_timeout: 3600 + connection_recycle_time: 3600 + pool_timeout: 60 + glance_client: + api_version: 2 endpoint_type: publicURL region_name: RegionOne - neutron_client: + heat_client: endpoint_type: publicURL region_name: RegionOne - barbican_client: + keystone_auth: + auth_section: keystone_authtoken + keystone_authtoken: + auth_type: password + auth_version: v3 + interface: public + memcache_security_strategy: ENCRYPT + service_token_roles: service + service_token_roles_required: true + service_type: container-infra + magnum_client: endpoint_type: publicURL region_name: RegionOne - heat_client: + neutron_client: endpoint_type: publicURL region_name: RegionOne - magnum_client: + nova_client: endpoint_type: publicURL region_name: RegionOne octavia_client: endpoint_type: publicURL region_name: RegionOne - cluster: - temp_cache_dir: /var/lib/magnum/certificate-cache - oslo_messaging_notifications: - driver: messagingv2 oslo_concurrency: lock_path: /tmp/magnum + oslo_messaging_notifications: + driver: messagingv2 oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true rabbit_ha_queues: false rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly rabbit_interval_max: 10 # Send more frequent heartbeats and fail unhealthy nodes faster # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + heartbeat_timeout_threshold: 60 + # NOTE (deprecation warning) heartbeat_in_pthread will be deprecated in 2024.2 + heartbeat_in_pthread: True + # Setting lower kombu_reconnect_delay should resolve issue with HA failing when one node is down # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 kombu_reconnect_delay: 0.5 - oslo_policy: - policy_file: /etc/magnum/policy.yaml - certificates: - cert_manager_type: barbican - database: - max_retries: -1 trust: cluster_user_trust: true - trustee_keystone_interface: public trustee_domain_name: magnum - keystone_auth: - auth_section: keystone_authtoken - keystone_authtoken: - interface: public - service_token_roles: service - service_token_roles_required: true - auth_type: password - auth_version: v3 - memcache_security_strategy: ENCRYPT - service_type: container-infra - api: - # NOTE(portdirect): the bind port should not be defined, and is manipulated - # via the endpoints section. - port: null - host: 0.0.0.0 - logging: - loggers: - keys: - - root - - magnum - handlers: - keys: - - stdout - - stderr - - "null" - formatters: - keys: - - context - - default - logger_root: - level: INFO - handlers: - - stdout - logger_magnum: - level: INFO - handlers: - - stdout - qualname: magnum - logger_amqp: - level: WARNING - handlers: stderr - qualname: amqp - logger_amqplib: - level: WARNING - handlers: stderr - qualname: amqplib - logger_eventletwsgi: - level: WARNING - handlers: stderr - qualname: eventlet.wsgi.server - logger_sqlalchemy: - level: WARNING - handlers: stderr - qualname: sqlalchemy - logger_boto: - level: WARNING - handlers: stderr - qualname: boto - handler_null: - class: logging.NullHandler - formatter: default - args: () - handler_stdout: - class: StreamHandler - args: (sys.stdout,) - formatter: context - handler_stderr: - class: StreamHandler - args: (sys.stderr,) - formatter: context - formatter_context: - class: oslo_log.formatters.ContextFormatter - datefmt: "%Y-%m-%d %H:%M:%S" - formatter_default: - format: "%(message)s" - datefmt: "%Y-%m-%d %H:%M:%S" + trustee_keystone_interface: public magnum_api_uwsgi: uwsgi: - add-header: "Connection: close" - buffer-size: 65535 - die-on-term: true - enable-threads: true - exit-on-reload: false - hook-master-start: unix_signal:15 gracefully_kill_them_all - lazy-apps: true - log-x-forwarded-for: true - master: true - procname-prefix-spaced: "magnum-api:" - route-user-agent: '^kube-probe.* donotlog:' - thunder-lock: true - worker-reload-mercy: 80 - wsgi-file: /var/lib/openstack/bin/magnum-api-wsgi - -network: - api: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 30511 - -bootstrap: - enabled: false - ks_user: magnum - script: | - openstack token issue + processes: 4 + threads: 2 dependencies: - dynamic: - common: - local_image_registry: - jobs: - - magnum-image-repo-sync - services: - - endpoint: node - service: local_image_registry static: api: jobs: @@ -264,440 +99,50 @@ dependencies: - magnum-ks-user - magnum-domain-ks-user - magnum-ks-endpoints - #- magnum-rabbit-init - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: key_manager - - endpoint: internal - service: orchestration conductor: jobs: - magnum-db-sync - magnum-ks-user - magnum-domain-ks-user - magnum-ks-endpoints - #- magnum-rabbit-init - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: key_manager - - endpoint: internal - service: orchestration - db_drop: - services: - - endpoint: internal - service: oslo_db - db_init: - services: - - endpoint: internal - service: oslo_db db_sync: jobs: null - services: - - endpoint: internal - service: oslo_db - ks_endpoints: - jobs: - - magnum-ks-service - services: - - endpoint: internal - service: identity - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity - rabbit_init: - services: - - endpoint: internal - service: oslo_messaging - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry - -# Names of secrets used by bootstrap and environmental checks -secrets: - identity: - admin: magnum-keystone-admin - magnum: magnum-keystone-user - magnum_stack_user: magnum-keystone-stack-user - oslo_db: - admin: magnum-db-admin - magnum: magnum-db-user - oslo_messaging: - admin: magnum-rabbitmq-admin - magnum: magnum-rabbitmq-user - oci_image_registry: - magnum: magnum-oci-image-registry -# typically overridden by environmental -# values, but should include all endpoints -# required by this chart endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - magnum: - username: magnum - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null + fluentd: + namespace: fluentbit identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - magnum: - role: admin - region_name: RegionOne - username: magnum - password: password - project_name: service - user_domain_name: service - project_domain_name: service - magnum_stack_user: - role: admin - region_name: RegionOne - username: magnum-domain - password: password - domain_name: magnum - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: http port: api: default: 5000 - public: 80 internal: 5000 - service: 5000 - container_infra: - name: magnum - hosts: - default: magnum-api - public: magnum - host_fqdn_override: - default: null - path: - default: /v1 - scheme: - default: http - port: - api: - default: 9511 public: 80 + service: 5000 key_manager: - name: barbican hosts: - default: barbican-api public: barbican-api - host_fqdn_override: - default: null - path: - default: /v1 - scheme: - default: http port: api: default: 9311 public: 9311 - orchestration: - name: heat - hosts: - default: heat-api - public: heat - host_fqdn_override: - default: null - path: - default: '/v1/%(project_id)s' - scheme: - default: 'http' - port: - api: - default: 8004 - public: 80 oslo_db: - auth: - admin: - username: root - password: password - magnum: - username: magnum - password: password + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /magnum - scheme: mysql+pymysql - port: - mysql: - default: 3306 oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null + host_fqdn_override: + default: memcached.openstack.svc.cluster.local hosts: default: memcached - host_fqdn_override: - default: null - port: - memcache: - default: 11211 oslo_messaging: - auth: - admin: - username: rabbitmq - password: password - magnum: - username: magnum - password: password - statefulset: - replicas: 2 - name: rabbitmq-server - hosts: - default: rabbitmq-nodes host_fqdn_override: default: rabbitmq.openstack.svc.cluster.local - path: /magnum - scheme: rabbit - port: - amqp: - default: 5672 - http: - default: 15672 - fluentd: - namespace: fluentbit - name: fluentd hosts: - default: fluentd-logging - host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 - -pod: - user: - magnum: - uid: 42424 - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - mounts: - magnum_api: - init_container: null - magnum_api: - volumeMounts: - volumes: - magnum_conductor: - init_container: null - magnum_conductor: - volumeMounts: - volumes: - magnum_bootstrap: - init_container: null - magnum_bootstrap: - volumeMounts: - volumes: - magnum_db_sync: - magnum_db_sync: - volumeMounts: - volumes: - replicas: - api: 1 - conductor: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - disruption_budget: - api: - min_available: 0 - termination_grace_period: - api: - timeout: 30 - resources: - enabled: true - api: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - conductor: - requests: - memory: "512Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - bootstrap: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - db_init: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - db_sync: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - db_drop: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_endpoints: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_service: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_user: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - rabbit_init: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - tests: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - image_repo_sync: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - - -network_policy: - magnum: - ingress: - - {} - egress: - - {} + default: rabbitmq-nodes manifests: - configmap_bin: true - configmap_etc: true - deployment_api: true ingress_api: false - job_bootstrap: true job_db_init: false - job_db_sync: true - job_db_drop: false - job_image_repo_sync: true - job_ks_endpoints: true - job_ks_service: true - job_ks_user_domain: true - job_ks_user: true job_rabbit_init: false - pdb_api: true - network_policy: false - secret_db: true - secret_keystone: true - secret_rabbitmq: true - secret_registry: true - service_api: true service_ingress_api: false - statefulset_conductor: true -... diff --git a/base-helm-configs/monitoring/openstack-metrics-exporter/openstack-metrics-exporter-helm-overrides.yaml b/base-helm-configs/monitoring/openstack-metrics-exporter/openstack-metrics-exporter-helm-overrides.yaml index f55a44bab..488bf4ecb 100644 --- a/base-helm-configs/monitoring/openstack-metrics-exporter/openstack-metrics-exporter-helm-overrides.yaml +++ b/base-helm-configs/monitoring/openstack-metrics-exporter/openstack-metrics-exporter-helm-overrides.yaml @@ -8,7 +8,7 @@ replicaCount: 1 image: repository: ghcr.io/openstack-exporter/openstack-exporter - tag: 1.7.0 + tag: latest pullPolicy: Always serviceMonitor: diff --git a/base-helm-configs/neutron/neutron-helm-overrides.yaml b/base-helm-configs/neutron/neutron-helm-overrides.yaml index 7563aa366..bdd77f5f2 100644 --- a/base-helm-configs/neutron/neutron-helm-overrides.yaml +++ b/base-helm-configs/neutron/neutron-helm-overrides.yaml @@ -1,5 +1,4 @@ -release_group: null - +--- images: tags: bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" @@ -15,6 +14,7 @@ images: neutron_linuxbridge_agent: "quay.io/rackspace/rackerlabs-neutron:2024.1-ubuntu_jammy" neutron_metadata: "quay.io/rackspace/rackerlabs-neutron:2024.1-ubuntu_jammy" neutron_ovn_metadata: "quay.io/rackspace/rackerlabs-neutron:2024.1-ubuntu_jammy" + neutron_ovn_vpn: "quay.io/rackspace/rackerlabs-neutron:2024.1-ubuntu_jammy" neutron_openvswitch_agent: "quay.io/rackspace/rackerlabs-neutron:2024.1-ubuntu_jammy" neutron_server: "quay.io/rackspace/rackerlabs-neutron:2024.1-ubuntu_jammy" neutron_rpc_server: "quay.io/rackspace/rackerlabs-neutron:2024.1-ubuntu_jammy" @@ -24,2275 +24,156 @@ images: purge_test: "quay.io/rackspace/rackerlabs-ospurge:latest" rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" netoffload: "quay.io/rackspace/rackerlabs-netoffload:v1.0.1" - neutron_sriov_agent: docker.io/openstackhelm/neutron:stein-18.04-sriov - neutron_sriov_agent_init: docker.io/openstackhelm/neutron:stein-18.04-sriov - neutron_bgp_dragent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic - neutron_ironic_agent: docker.io/openstackhelm/neutron:stein-ubuntu_bionic + neutron_sriov_agent: "quay.io/rackspace/rackerlabs-neutron:2024.1-ubuntu_jammy" + neutron_sriov_agent_init: "quay.io/rackspace/rackerlabs-neutron:2024.1-ubuntu_jammy" + neutron_bgp_dragent: "quay.io/rackspace/rackerlabs-neutron:2024.1-ubuntu_jammy" + neutron_ironic_agent: "quay.io/rackspace/rackerlabs-neutron:2024.1-ubuntu_jammy" dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync labels: - agent: - dhcp: - node_selector_key: openstack-control-plane - node_selector_value: enabled - l3: - node_selector_key: openstack-control-plane - node_selector_value: enabled - metadata: - node_selector_key: openstack-control-plane - node_selector_value: enabled - l2gw: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - lb: - node_selector_key: linuxbridge - node_selector_value: enabled - # openvswitch is a special case, requiring a special - # label that can apply to both control hosts - # and compute hosts, until we get more sophisticated - # with our daemonset scheduling ovs: node_selector_key: openstack-network-node - node_selector_value: enabled - sriov: - node_selector_key: sriov - node_selector_value: enabled - bagpipe_bgp: - node_selector_key: openstack-compute-node - node_selector_value: enabled - bgp_dragent: - node_selector_key: openstack-compute-node - node_selector_value: enabled - server: - node_selector_key: openstack-control-plane - node_selector_value: enabled - ironic_agent: - node_selector_key: openstack-control-plane - node_selector_value: enabled - netns_cleanup_cron: - node_selector_key: openstack-control-plane - node_selector_value: enabled - test: - node_selector_key: openstack-control-plane - node_selector_value: enabled network: - # provide what type of network wiring will be used backend: - ovn - # NOTE(Portdirect): Share network namespaces with the host, - # allowing agents to be restarted without packet loss and simpler - # debugging. This feature requires mount propagation support. - share_namespaces: true - interface: - # Tunnel interface will be used for VXLAN tunneling. - tunnel: null - # If tunnel is null there is a fallback mechanism to search - # for interface with routing using tunnel network cidr. - tunnel_network_cidr: "0/0" - # To perform setup of network interfaces using the SR-IOV init - # container you can use a section similar to: - # sriov: - # - device: ${DEV} - # num_vfs: 8 - # mtu: 9214 - # promisc: false - # qos: - # - vf_num: 0 - # share: 10 - # queues_per_vf: - # - num_queues: 16 - # exclude_vf: 0,11,21 - server: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 30096 - -bootstrap: - enabled: false - ks_user: neutron - script: | - openstack token issue dependencies: - dynamic: - common: - local_image_registry: - jobs: - - neutron-image-repo-sync - services: - - endpoint: node - service: local_image_registry - targeted: - sriov: {} - l2gateway: {} - bagpipe_bgp: {} - ovn: - server: - pod: null - bgp_dragent: {} - openvswitch: - dhcp: - pod: - - requireSameNode: true - labels: - application: neutron - component: neutron-ovs-agent - l3: - pod: - - requireSameNode: true - labels: - application: neutron - component: neutron-ovs-agent - metadata: - pod: - - requireSameNode: true - labels: - application: neutron - component: neutron-ovs-agent - linuxbridge: - dhcp: - pod: - - requireSameNode: true - labels: - application: neutron - component: neutron-lb-agent - l3: - pod: - - requireSameNode: true - labels: - application: neutron - component: neutron-lb-agent - metadata: - pod: - - requireSameNode: true - labels: - application: neutron - component: neutron-lb-agent - lb_agent: - pod: null static: - bootstrap: - services: - - endpoint: internal - service: network - - endpoint: internal - service: compute - db_drop: - services: - - endpoint: internal - service: oslo_db - db_init: - services: - - endpoint: internal - service: oslo_db db_sync: jobs: null - services: - - endpoint: internal - service: oslo_db dhcp: - pod: null jobs: null - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: network - - endpoint: internal - service: compute - ks_endpoints: - jobs: - - neutron-ks-service - services: - - endpoint: internal - service: identity - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity - rabbit_init: - services: - - service: oslo_messaging - endpoint: internal l3: - pod: null jobs: null - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: network - - endpoint: internal - service: compute lb_agent: - pod: null jobs: null - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: network metadata: - pod: null jobs: null - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: network - - endpoint: internal - service: compute - - endpoint: public - service: compute_metadata ovn_metadata: pod: [] - # - requireSameNode: true - # labels: - # application: ovn - # component: ovn-controller - services: - - endpoint: internal - service: compute_metadata - - endpoint: internal - service: network ovs_agent: jobs: null - pod: - - requireSameNode: true - labels: - application: openvswitch - component: server - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: network - server: + rpc_server: jobs: - neutron-db-sync - - neutron-ks-user - - neutron-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_cache - - endpoint: internal - service: identity - ironic_agent: + server: jobs: - neutron-db-sync - neutron-ks-user - neutron-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_cache - - endpoint: internal - service: identity - tests: - services: - - endpoint: internal - service: network - - endpoint: internal - service: compute - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry pod: use_fqdn: neutron_agent: false - probes: - rpc_timeout: 60 - rpc_retries: 2 - dhcp_agent: - dhcp_agent: - readiness: - enabled: true - params: - initialDelaySeconds: 30 - periodSeconds: 190 - timeoutSeconds: 185 - liveness: - enabled: true - params: - initialDelaySeconds: 120 - periodSeconds: 600 - timeoutSeconds: 580 - l3_agent: - l3_agent: - readiness: - enabled: true - params: - initialDelaySeconds: 30 - periodSeconds: 190 - timeoutSeconds: 185 - liveness: - enabled: true - params: - initialDelaySeconds: 120 - periodSeconds: 600 - timeoutSeconds: 580 - lb_agent: - lb_agent: - readiness: - enabled: true - metadata_agent: - metadata_agent: - readiness: - enabled: true - params: - initialDelaySeconds: 30 - periodSeconds: 190 - timeoutSeconds: 185 - liveness: - enabled: true - params: - initialDelaySeconds: 120 - periodSeconds: 600 - timeoutSeconds: 580 - ovn_metadata_agent: - ovn_metadata_agent: - readiness: - enabled: true - params: - initialDelaySeconds: 30 - periodSeconds: 190 - timeoutSeconds: 185 - liveness: - enabled: true - params: - initialDelaySeconds: 120 - periodSeconds: 600 - timeoutSeconds: 580 - ovs_agent: - ovs_agent: - readiness: - enabled: true - params: - timeoutSeconds: 10 - liveness: - enabled: true - params: - initialDelaySeconds: 120 - periodSeconds: 600 - timeoutSeconds: 580 - sriov_agent: - sriov_agent: - readiness: - enabled: true - params: - initialDelaySeconds: 30 - periodSeconds: 190 - timeoutSeconds: 185 - bagpipe_bgp: - bagpipe_bgp: - readiness: - enabled: true - params: - liveness: - enabled: true - params: - initialDelaySeconds: 60 - bgp_dragent: - bgp_dragent: - readiness: - enabled: false - params: - liveness: - enabled: true - params: - initialDelaySeconds: 60 - l2gw_agent: - l2gw_agent: - readiness: - enabled: true - params: - initialDelaySeconds: 30 - periodSeconds: 15 - timeoutSeconds: 65 - liveness: - enabled: true - params: - initialDelaySeconds: 120 - periodSeconds: 90 - timeoutSeconds: 70 - server: - server: - readiness: - enabled: true - params: - periodSeconds: 15 - timeoutSeconds: 10 - liveness: - enabled: true - params: - initialDelaySeconds: 60 - periodSeconds: 15 - timeoutSeconds: 10 - security_context: - neutron_dhcp_agent: - pod: - runAsUser: 42424 - container: - neutron_dhcp_agent: - readOnlyRootFilesystem: true - privileged: true - neutron_l2gw_agent: - pod: - runAsUser: 42424 - container: - neutron_l2gw_agent: - readOnlyRootFilesystem: true - privileged: true - neutron_bagpipe_bgp: - pod: - runAsUser: 42424 - container: - neutron_bagpipe_bgp: - readOnlyRootFilesystem: true - privileged: true - neutron_bgp_dragent: - pod: - runAsUser: 42424 - container: - neutron_bgp_dragent: - readOnlyRootFilesystem: true - privileged: true - neutron_l3_agent: - pod: - runAsUser: 42424 - container: - neutron_l3_agent: - readOnlyRootFilesystem: true - privileged: true - neutron_lb_agent: - pod: - runAsUser: 42424 - container: - neutron_lb_agent_kernel_modules: - capabilities: - add: - - SYS_MODULE - - SYS_CHROOT - runAsUser: 0 - readOnlyRootFilesystem: true - neutron_lb_agent_init: - privileged: true - runAsUser: 0 - readOnlyRootFilesystem: true - neutron_lb_agent: - readOnlyRootFilesystem: true - privileged: true - neutron_metadata_agent: - pod: - runAsUser: 42424 - container: - neutron_metadata_agent_init: - runAsUser: 0 - readOnlyRootFilesystem: true - neutron_ovn_metadata_agent: - pod: - runAsUser: 42424 - container: - neutron_ovn_metadata_agent_init: - runAsUser: 0 - readOnlyRootFilesystem: true - neutron_ovs_agent: - pod: - runAsUser: 42424 - container: - neutron_openvswitch_agent_kernel_modules: - capabilities: - add: - - SYS_MODULE - - SYS_CHROOT - runAsUser: 0 - readOnlyRootFilesystem: true - netoffload: - privileged: true - runAsUser: 0 - readOnlyRootFilesystem: true - neutron_ovs_agent_init: - privileged: true - runAsUser: 0 - readOnlyRootFilesystem: true - neutron_ovs_agent: - readOnlyRootFilesystem: true - privileged: true - neutron_server: - pod: - runAsUser: 42424 - container: - nginx: - runAsUser: 0 - readOnlyRootFilesystem: false - neutron_server: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - neutron_sriov_agent: - pod: - runAsUser: 42424 - container: - neutron_sriov_agent_init: - privileged: true - runAsUser: 0 - readOnlyRootFilesystem: false - neutron_sriov_agent: - readOnlyRootFilesystem: true - privileged: true - neutron_ironic_agent: - pod: - runAsUser: 42424 - container: - neutron_ironic_agent: - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - neutron_netns_cleanup_cron: - pod: - runAsUser: 42424 - container: - neutron_netns_cleanup_cron: - readOnlyRootFilesystem: true - privileged: true - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - tolerations: - neutron: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - mounts: - neutron_server: - init_container: null - neutron_server: - volumeMounts: - volumes: - neutron_dhcp_agent: - init_container: null - neutron_dhcp_agent: - volumeMounts: - volumes: - neutron_l3_agent: - init_container: null - neutron_l3_agent: - volumeMounts: - volumes: - neutron_lb_agent: - init_container: null - neutron_lb_agent: - volumeMounts: - volumes: - neutron_metadata_agent: - init_container: null - neutron_metadata_agent: - volumeMounts: - volumes: - neutron_ovn_metadata_agent: - init_container: null - neutron_ovn_metadata_agent: - volumeMounts: - volumes: - neutron_ovs_agent: - init_container: null - neutron_ovs_agent: - volumeMounts: - volumes: - neutron_sriov_agent: - init_container: null - neutron_sriov_agent: - volumeMounts: - volumes: - neutron_l2gw_agent: - init_container: null - neutron_l2gw_agent: - volumeMounts: - volumes: - bagpipe_bgp: - init_container: null - bagpipe_bgp: - volumeMounts: - volumes: - bgp_dragent: - init_container: null - bgp_dragent: - volumeMounts: - volumes: - neutron_ironic_agent: - init_container: null - neutron_ironic_agent: - volumeMounts: - volumes: - neutron_netns_cleanup_cron: - init_container: null - neutron_netns_cleanup_cron: - volumeMounts: - volumes: - neutron_tests: - init_container: null - neutron_tests: - volumeMounts: - volumes: - neutron_bootstrap: - init_container: null - neutron_bootstrap: - volumeMounts: - volumes: - neutron_db_sync: - neutron_db_sync: - volumeMounts: - - name: db-sync-conf - mountPath: /etc/neutron/plugins/ml2/ml2_conf.ini - subPath: ml2_conf.ini - readOnly: true - volumes: - replicas: - server: 1 - ironic_agent: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - daemonsets: - pod_replacement_strategy: RollingUpdate - dhcp_agent: - enabled: true - min_ready_seconds: 0 - max_unavailable: 1 - l3_agent: - enabled: true - min_ready_seconds: 0 - max_unavailable: 1 - lb_agent: - enabled: true - min_ready_seconds: 0 - max_unavailable: 1 - metadata_agent: - enabled: true - min_ready_seconds: 0 - max_unavailable: 1 - ovn_metadata_agent: - enabled: true - min_ready_seconds: 0 - max_unavailable: 1 - ovs_agent: - enabled: true - min_ready_seconds: 0 - max_unavailable: 1 - sriov_agent: - enabled: true - min_ready_seconds: 0 - max_unavailable: 1 - netns_cleanup_cron: - enabled: true - min_ready_seconds: 0 - max_unavailable: 1 - disruption_budget: - server: - min_available: 0 - termination_grace_period: - server: - timeout: 30 - ironic_agent: - timeout: 30 - resources: - enabled: true - agent: - dhcp: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - l3: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - lb: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - metadata: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - ovn_metadata: - requests: - memory: "512Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - ovs: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - sriov: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - l2gw: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - bagpipe_bgp: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - bgp_dragent: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - server: - requests: - memory: "1024Mi" - cpu: "100m" - limits: - memory: "6144Mi" - cpu: "2000m" - ironic_agent: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - netns_cleanup_cron: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - bootstrap: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - rabbit_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_drop: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_endpoints: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_service: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_user: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - tests: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" conf: - rally_tests: - force_project_purge: false - run_tempest: false - clean_up: | - # NOTE: We will make the best effort to clean up rally generated networks and routers, - # but should not block further automated deployment. - set +e - PATTERN="^[sc]_rally_" - - ROUTERS=$(openstack router list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r') - NETWORKS=$(openstack network list --format=value -c Name | grep -e $PATTERN | sort | tr -d '\r') - - for ROUTER in $ROUTERS - do - openstack router unset --external-gateway $ROUTER - openstack router set --disable --no-ha $ROUTER - - SUBNS=$(openstack router show $ROUTER -c interfaces_info --format=value | python -m json.tool | grep -oP '(?<="subnet_id": ")[a-f0-9\-]{36}(?=")' | sort | uniq) - for SUBN in $SUBNS - do - openstack router remove subnet $ROUTER $SUBN - done - - for PORT in $(openstack port list --router $ROUTER --format=value -c ID | tr -d '\r') - do - openstack router remove port $ROUTER $PORT - done - - openstack router delete $ROUTER - done - - for NETWORK in $NETWORKS - do - for PORT in $(openstack port list --network $NETWORK --format=value -c ID | tr -d '\r') - do - openstack port delete $PORT - done - openstack network delete $NETWORK - done - set -e - tests: - NeutronNetworks.create_and_delete_networks: - - args: - network_create_args: {} - context: - quotas: - neutron: - network: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronNetworks.create_and_delete_ports: - - args: - network_create_args: {} - port_create_args: {} - ports_per_network: 10 - context: - network: {} - quotas: - neutron: - network: -1 - port: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronNetworks.create_and_delete_routers: - - args: - network_create_args: {} - router_create_args: {} - subnet_cidr_start: 1.1.0.0/30 - subnet_create_args: {} - subnets_per_network: 2 - context: - network: {} - quotas: - neutron: - network: -1 - router: -1 - subnet: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronNetworks.create_and_delete_subnets: - - args: - network_create_args: {} - subnet_cidr_start: 1.1.0.0/30 - subnet_create_args: {} - subnets_per_network: 2 - context: - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronNetworks.create_and_list_routers: - - args: - network_create_args: {} - router_create_args: {} - subnet_cidr_start: 1.1.0.0/30 - subnet_create_args: {} - subnets_per_network: 2 - context: - network: {} - quotas: - neutron: - network: -1 - router: -1 - subnet: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronNetworks.create_and_list_subnets: - - args: - network_create_args: {} - subnet_cidr_start: 1.1.0.0/30 - subnet_create_args: {} - subnets_per_network: 2 - context: - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronNetworks.create_and_show_network: - - args: - network_create_args: {} - context: - quotas: - neutron: - network: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronNetworks.create_and_update_networks: - - args: - network_create_args: {} - network_update_args: - admin_state_up: false - context: - quotas: - neutron: - network: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronNetworks.create_and_update_ports: - - args: - network_create_args: {} - port_create_args: {} - port_update_args: - admin_state_up: false - device_id: dummy_id - device_owner: dummy_owner - ports_per_network: 5 - context: - network: {} - quotas: - neutron: - network: -1 - port: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronNetworks.create_and_update_routers: - - args: - network_create_args: {} - router_create_args: {} - router_update_args: - admin_state_up: false - subnet_cidr_start: 1.1.0.0/30 - subnet_create_args: {} - subnets_per_network: 2 - context: - network: {} - quotas: - neutron: - network: -1 - router: -1 - subnet: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronNetworks.create_and_update_subnets: - - args: - network_create_args: {} - subnet_cidr_start: 1.4.0.0/16 - subnet_create_args: {} - subnet_update_args: - enable_dhcp: false - subnets_per_network: 2 - context: - network: {} - quotas: - neutron: - network: -1 - subnet: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronNetworks.list_agents: - - args: - agent_args: {} - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronSecurityGroup.create_and_list_security_groups: - - args: - security_group_create_args: {} - context: - quotas: - neutron: - security_group: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NeutronSecurityGroup.create_and_update_security_groups: - - args: - security_group_create_args: {} - security_group_update_args: {} - context: - quotas: - neutron: - security_group: -1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - paste: - composite:neutron: - use: egg:Paste#urlmap - /: neutronversions_composite - /v2.0: neutronapi_v2_0 - composite:neutronapi_v2_0: - use: call:neutron.auth:pipeline_factory - noauth: cors http_proxy_to_wsgi request_id catch_errors extensions neutronapiapp_v2_0 - keystone: cors http_proxy_to_wsgi request_id catch_errors authtoken audit keystonecontext extensions neutronapiapp_v2_0 - composite:neutronversions_composite: - use: call:neutron.auth:pipeline_factory - noauth: cors http_proxy_to_wsgi neutronversions - keystone: cors http_proxy_to_wsgi neutronversions - filter:request_id: - paste.filter_factory: oslo_middleware:RequestId.factory - filter:catch_errors: - paste.filter_factory: oslo_middleware:CatchErrors.factory - filter:cors: - paste.filter_factory: oslo_middleware.cors:filter_factory - oslo_config_project: neutron - filter:http_proxy_to_wsgi: - paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory - filter:keystonecontext: - paste.filter_factory: neutron.auth:NeutronKeystoneContext.factory - filter:authtoken: - paste.filter_factory: keystonemiddleware.auth_token:filter_factory - filter:audit: - paste.filter_factory: keystonemiddleware.audit:filter_factory - audit_map_file: /etc/neutron/api_audit_map.conf - filter:extensions: - paste.filter_factory: neutron.api.extensions:plugin_aware_extension_middleware_factory - app:neutronversions: - paste.app_factory: neutron.pecan_wsgi.app:versions_factory - app:neutronapiapp_v2_0: - paste.app_factory: neutron.api.v2.router:APIRouter.factory - filter:osprofiler: - paste.filter_factory: osprofiler.web:WsgiMiddleware.factory - policy: {} - api_audit_map: - DEFAULT: - target_endpoint_type: None - custom_actions: - add_router_interface: update/add - remove_router_interface: update/remove - path_keywords: - floatingips: ip - healthmonitors: healthmonitor - health_monitors: health_monitor - lb: None - members: member - metering-labels: label - metering-label-rules: rule - networks: network - pools: pool - ports: port - routers: router - quotas: quota - security-groups: security-group - security-group-rules: rule - subnets: subnet - vips: vip - service_endpoints: - network: service/network - neutron_sudoers: | - # This sudoers file supports rootwrap for both Kolla and LOCI Images. - Defaults !requiretty - Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin" - neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf *, /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf * - neutron ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf, /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf - rootwrap: | - # Configuration for neutron-rootwrap - # This file should be owned by (and only-writeable by) the root user - - [DEFAULT] - # List of directories to load filter definitions from (separated by ','). - # These directories MUST all be only writeable by root ! - filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap,/var/lib/openstack/etc/neutron/rootwrap.d - - # List of directories to search executables in, in case filters do not - # explicitely specify a full path (separated by ',') - # If not specified, defaults to system PATH environment variable. - # These directories MUST all be only writeable by root ! - exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin - - # Enable logging to syslog - # Default value is False - use_syslog=False - - # Which syslog facility to use. - # Valid values include auth, authpriv, syslog, local0, local1... - # Default value is 'syslog' - syslog_log_facility=syslog - - # Which messages to log. - # INFO means log all usage - # ERROR means only log unsuccessful attempts - syslog_log_level=ERROR - - [xenapi] - # XenAPI configuration is only required by the L2 agent if it is to - # target a XenServer/XCP compute host's dom0. - xenapi_connection_url= - xenapi_connection_username=root - xenapi_connection_password= - rootwrap_filters: - debug: - pods: - - dhcp_agent - - l3_agent - - lb_agent - - metadata_agent - - ovn_metadata_agent - - ovs_agent - - sriov_agent - content: | - # neutron-rootwrap command filters for nodes on which neutron is - # expected to control network - # - # This file should be owned by (and only-writeable by) the root user - - # format seems to be - # cmd-name: filter-name, raw-command, user, args - - [Filters] - - # This is needed because we should ping - # from inside a namespace which requires root - # _alt variants allow to match -c and -w in any order - # (used by NeutronDebugAgent.ping_all) - ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+ - ping_alt: RegExpFilter, ping, root, ping, -c, \d+, -w, \d+, [0-9\.]+ - ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+ - ping6_alt: RegExpFilter, ping6, root, ping6, -c, \d+, -w, \d+, [0-9A-Fa-f:]+ - dibbler: - pods: - - dhcp_agent - - l3_agent - - lb_agent - - metadata_agent - - ovn_metadata_agent - - ovs_agent - - sriov_agent - content: | - # neutron-rootwrap command filters for nodes on which neutron is - # expected to control network - # - # This file should be owned by (and only-writeable by) the root user - - # format seems to be - # cmd-name: filter-name, raw-command, user, args - - [Filters] - - # Filters for the dibbler-based reference implementation of the pluggable - # Prefix Delegation driver. Other implementations using an alternative agent - # should include a similar filter in this folder. - - # prefix_delegation_agent - dibbler-client: CommandFilter, dibbler-client, root - ipset_firewall: - pods: - - dhcp_agent - - l3_agent - - lb_agent - - metadata_agent - - ovn_metadata_agent - - ovs_agent - - sriov_agent - content: | - # neutron-rootwrap command filters for nodes on which neutron is - # expected to control network - # - # This file should be owned by (and only-writeable by) the root user - - # format seems to be - # cmd-name: filter-name, raw-command, user, args - - [Filters] - # neutron/agent/linux/iptables_firewall.py - # "ipset", "-A", ... - ipset: CommandFilter, ipset, root - l3: - pods: - - dhcp_agent - - l3_agent - - lb_agent - - metadata_agent - - ovn_metadata_agent - - ovs_agent - - sriov_agent - content: | - # neutron-rootwrap command filters for nodes on which neutron is - # expected to control network - # - # This file should be owned by (and only-writeable by) the root user - - # format seems to be - # cmd-name: filter-name, raw-command, user, args - - [Filters] - - # arping - arping: CommandFilter, arping, root - - # l3_agent - sysctl: CommandFilter, sysctl, root - route: CommandFilter, route, root - radvd: CommandFilter, radvd, root - - # haproxy - haproxy: RegExpFilter, haproxy, root, haproxy, -f, .* - kill_haproxy: KillFilter, root, haproxy, -15, -9, -HUP - - # metadata proxy - metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root - # RHEL invocation of the metadata proxy will report /usr/bin/python - kill_metadata: KillFilter, root, python, -15, -9 - kill_metadata2: KillFilter, root, python2, -15, -9 - kill_metadata7: KillFilter, root, python2.7, -15, -9 - kill_metadata3: KillFilter, root, python3, -15, -9 - kill_metadata35: KillFilter, root, python3.5, -15, -9 - kill_metadata36: KillFilter, root, python3.6, -15, -9 - kill_metadata37: KillFilter, root, python3.7, -15, -9 - kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -15, -9, -HUP - kill_radvd: KillFilter, root, /sbin/radvd, -15, -9, -HUP - - # ip_lib - ip: IpFilter, ip, root - find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.* - ip_exec: IpNetnsExecFilter, ip, root - - # l3_tc_lib - l3_tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+ - l3_tc_add_qdisc_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress - l3_tc_add_qdisc_egress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, root, handle, 1:, htb - l3_tc_show_filters: RegExpFilter, tc, root, tc, -p, -s, -d, filter, show, dev, .+, parent, .+, prio, 1 - l3_tc_delete_filters: RegExpFilter, tc, root, tc, filter, del, dev, .+, parent, .+, prio, 1, handle, .+, u32 - l3_tc_add_filter_ingress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, dst, .+, police, rate, .+, burst, .+, drop, flowid, :1 - l3_tc_add_filter_egress: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, ip, prio, 1, u32, match, ip, src, .+, police, rate, .+, burst, .+, drop, flowid, :1 - - # For ip monitor - kill_ip_monitor: KillFilter, root, ip, -9 - - # ovs_lib (if OVSInterfaceDriver is used) - ovs-vsctl: CommandFilter, ovs-vsctl, root - - # iptables_manager - iptables-save: CommandFilter, iptables-save, root - iptables-restore: CommandFilter, iptables-restore, root - ip6tables-save: CommandFilter, ip6tables-save, root - ip6tables-restore: CommandFilter, ip6tables-restore, root - - # Keepalived - keepalived: CommandFilter, keepalived, root - kill_keepalived: KillFilter, root, keepalived, -HUP, -15, -9 - - # l3 agent to delete floatingip's conntrack state - conntrack: CommandFilter, conntrack, root - - # keepalived state change monitor - keepalived_state_change: CommandFilter, neutron-keepalived-state-change, root - # The following filters are used to kill the keepalived state change monitor. - # Since the monitor runs as a Python script, the system reports that the - # command of the process to be killed is python. - # TODO(mlavalle) These kill filters will be updated once we come up with a - # mechanism to kill using the name of the script being executed by Python - kill_keepalived_monitor_py: KillFilter, root, python, -15 - kill_keepalived_monitor_py27: KillFilter, root, python2.7, -15 - kill_keepalived_monitor_py3: KillFilter, root, python3, -15 - kill_keepalived_monitor_py35: KillFilter, root, python3.5, -15 - kill_keepalived_monitor_py36: KillFilter, root, python3.6, -15 - kill_keepalived_monitor_py37: KillFilter, root, python3.7, -15 - netns_cleanup: - pods: - - dhcp_agent - - l3_agent - - lb_agent - - metadata_agent - - ovn_metadata_agent - - ovs_agent - - sriov_agent - - netns_cleanup_cron - content: | - # neutron-rootwrap command filters for nodes on which neutron is - # expected to control network - # - # This file should be owned by (and only-writeable by) the root user - - # format seems to be - # cmd-name: filter-name, raw-command, user, args - - [Filters] - - # netns-cleanup - netstat: CommandFilter, netstat, root - dhcp: - pods: - - dhcp_agent - - l3_agent - - lb_agent - - metadata_agent - - ovn_metadata_agent - - ovs_agent - - sriov_agent - - netns_cleanup_cron - content: | - # neutron-rootwrap command filters for nodes on which neutron is - # expected to control network - # - # This file should be owned by (and only-writeable by) the root user - - # format seems to be - # cmd-name: filter-name, raw-command, user, args - - [Filters] - - # dhcp-agent - dnsmasq: CommandFilter, dnsmasq, root - # dhcp-agent uses kill as well, that's handled by the generic KillFilter - # it looks like these are the only signals needed, per - # neutron/agent/linux/dhcp.py - kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP, -15 - kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP, -15 - - ovs-vsctl: CommandFilter, ovs-vsctl, root - ivs-ctl: CommandFilter, ivs-ctl, root - mm-ctl: CommandFilter, mm-ctl, root - dhcp_release: CommandFilter, dhcp_release, root - dhcp_release6: CommandFilter, dhcp_release6, root - - # metadata proxy - metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root - # RHEL invocation of the metadata proxy will report /usr/bin/python - kill_metadata: KillFilter, root, python, -9 - kill_metadata2: KillFilter, root, python2, -9 - kill_metadata7: KillFilter, root, python2.7, -9 - kill_metadata3: KillFilter, root, python3, -9 - kill_metadata35: KillFilter, root, python3.5, -9 - kill_metadata36: KillFilter, root, python3.6, -9 - kill_metadata37: KillFilter, root, python3.7, -9 - - # ip_lib - ip: IpFilter, ip, root - find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.* - ip_exec: IpNetnsExecFilter, ip, root - ebtables: - pods: - - dhcp_agent - - l3_agent - - lb_agent - - metadata_agent - - ovn_metadata_agent - - ovs_agent - - sriov_agent - content: | - # neutron-rootwrap command filters for nodes on which neutron is - # expected to control network - # - # This file should be owned by (and only-writeable by) the root user - - # format seems to be - # cmd-name: filter-name, raw-command, user, args - - [Filters] - - ebtables: CommandFilter, ebtables, root - iptables_firewall: - pods: - - dhcp_agent - - l3_agent - - lb_agent - - metadata_agent - - ovn_metadata_agent - - ovs_agent - - sriov_agent - content: | - # neutron-rootwrap command filters for nodes on which neutron is - # expected to control network - # - # This file should be owned by (and only-writeable by) the root user - - # format seems to be - # cmd-name: filter-name, raw-command, user, args - - [Filters] - - # neutron/agent/linux/iptables_firewall.py - # "iptables-save", ... - iptables-save: CommandFilter, iptables-save, root - iptables-restore: CommandFilter, iptables-restore, root - ip6tables-save: CommandFilter, ip6tables-save, root - ip6tables-restore: CommandFilter, ip6tables-restore, root - - # neutron/agent/linux/iptables_firewall.py - # "iptables", "-A", ... - iptables: CommandFilter, iptables, root - ip6tables: CommandFilter, ip6tables, root - - # neutron/agent/linux/iptables_firewall.py - sysctl: CommandFilter, sysctl, root - - # neutron/agent/linux/ip_conntrack.py - conntrack: CommandFilter, conntrack, root - linuxbridge_plugin: - pods: - - dhcp_agent - - l3_agent - - lb_agent - - metadata_agent - - ovn_metadata_agent - - ovs_agent - - sriov_agent - content: | - # neutron-rootwrap command filters for nodes on which neutron is - # expected to control network - # - # This file should be owned by (and only-writeable by) the root user - - # format seems to be - # cmd-name: filter-name, raw-command, user, args - - [Filters] - - # linuxbridge-agent - # unclear whether both variants are necessary, but I'm transliterating - # from the old mechanism - brctl: CommandFilter, brctl, root - bridge: CommandFilter, bridge, root - - # ip_lib - ip: IpFilter, ip, root - find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.* - ip_exec: IpNetnsExecFilter, ip, root - - # tc commands needed for QoS support - tc_replace_tbf: RegExpFilter, tc, root, tc, qdisc, replace, dev, .+, root, tbf, rate, .+, latency, .+, burst, .+ - tc_add_ingress: RegExpFilter, tc, root, tc, qdisc, add, dev, .+, ingress, handle, .+ - tc_delete: RegExpFilter, tc, root, tc, qdisc, del, dev, .+, .+ - tc_show_qdisc: RegExpFilter, tc, root, tc, qdisc, show, dev, .+ - tc_show_filters: RegExpFilter, tc, root, tc, filter, show, dev, .+, parent, .+ - tc_add_filter: RegExpFilter, tc, root, tc, filter, add, dev, .+, parent, .+, protocol, all, prio, .+, basic, police, rate, .+, burst, .+, mtu, .+, drop - openvswitch_plugin: - pods: - - dhcp_agent - - l3_agent - - lb_agent - - metadata_agent - - ovn_metadata_agent - - ovs_agent - - sriov_agent - content: | - # neutron-rootwrap command filters for nodes on which neutron is - # expected to control network - # - # This file should be owned by (and only-writeable by) the root user - - # format seems to be - # cmd-name: filter-name, raw-command, user, args - - [Filters] - - # openvswitch-agent - # unclear whether both variants are necessary, but I'm transliterating - # from the old mechanism - ovs-vsctl: CommandFilter, ovs-vsctl, root - # NOTE(yamamoto): of_interface=native doesn't use ovs-ofctl - ovs-ofctl: CommandFilter, ovs-ofctl, root - ovs-appctl: CommandFilter, ovs-appctl, root - kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9 - ovsdb-client: CommandFilter, ovsdb-client, root - xe: CommandFilter, xe, root - - # ip_lib - ip: IpFilter, ip, root - find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.* - ip_exec: IpNetnsExecFilter, ip, root - - # needed for FDB extension - bridge: CommandFilter, bridge, root - privsep: - pods: - - dhcp_agent - - l3_agent - - lb_agent - - metadata_agent - - ovn_metadata_agent - - ovs_agent - - sriov_agent - - netns_cleanup_cron - content: | - # Command filters to allow privsep daemon to be started via rootwrap. - # - # This file should be owned by (and only-writeable by) the root user - - [Filters] - - # By installing the following, the local admin is asserting that: - # - # 1. The python module load path used by privsep-helper - # command as root (as started by sudo/rootwrap) is trusted. - # 2. Any oslo.config files matching the --config-file - # arguments below are trusted. - # 3. Users allowed to run sudo/rootwrap with this configuration(*) are - # also allowed to invoke python "entrypoint" functions from - # --privsep_context with the additional (possibly root) privileges - # configured for that context. - # - # (*) ie: the user is allowed by /etc/sudoers to run rootwrap as root - # - # In particular, the oslo.config and python module path must not - # be writeable by the unprivileged user. - - # oslo.privsep default neutron context - privsep: PathFilter, privsep-helper, root, - --config-file, /etc, - --privsep_context, neutron.privileged.default, - --privsep_sock_path, / - - # NOTE: A second `--config-file` arg can also be added above. Since - # many neutron components are installed like that (eg: by devstack). - # Adjust to suit local requirements. - linux_vxlan: - pods: - - bagpipe_bgp - content: | - # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is - # expected to control VXLAN Linux Bridge dataplane - # - # This file should be owned by (and only-writeable by) the root user - - # format seems to be - # cmd-name: filter-name, raw-command, user, args - - [Filters] - - # - modprobe: CommandFilter, modprobe, root - - # - brctl: CommandFilter, brctl, root - bridge: CommandFilter, bridge, root - - # ip_lib - ip: IpFilter, ip, root - ip_exec: IpNetnsExecFilter, ip, root - - # shell (for piped commands) - sh: CommandFilter, sh, root - mpls_ovs_dataplane: - pods: - - bagpipe_bgp - content: | - # bagpipe-bgp-rootwrap command filters for nodes on which bagpipe-bgp is - # expected to control MPLS OpenVSwitch dataplane - # - # This file should be owned by (and only-writeable by) the root user - - # format seems to be - # cmd-name: filter-name, raw-command, user, args - - [Filters] - - # openvswitch - ovs-vsctl: CommandFilter, ovs-vsctl, root - ovs-ofctl: CommandFilter, ovs-ofctl, root - - # ip_lib - ip: IpFilter, ip, root - ip_exec: IpNetnsExecFilter, ip, root - - # shell (for piped commands) - sh: CommandFilter, sh, root - openvswitch_db_server: - ptcp_port: 6640 + dhcp_agent: + agent: + availability_zone: az1 + logging: + logger_root: + handlers: + - stdout + level: INFO neutron: DEFAULT: + api_workers: 2 debug: false - metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy - log_config_append: /etc/neutron/logging.conf - # NOTE(portdirect): the bind port should not be defined, and is manipulated - # via the endpoints section. - bind_port: null default_availability_zones: az1 + dhcp_agents_per_network: 1 + dhcp_load_type: networks + l3_ha: false + l3_ha_network_type: geneve + max_l3_agents_per_router: 1 network_scheduler_driver: neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler + router_distributed: true router_scheduler_driver: neutron.scheduler.l3_agent_scheduler.AZLeastRoutersScheduler - dhcp_load_type: networks - api_workers: 8 + rpc_state_report_workers: 2 rpc_workers: 4 - rpc_state_report_workers: 4 - allow_overlapping_ips: True - state_path: /var/lib/neutron - # core_plugin can be: ml2, calico - core_plugin: ml2 - router_distributed: True - # service_plugin can be: router, odl-router, empty for calico, - # networking_ovn.l3.l3_ovn.OVNL3RouterPlugin for OVN - # NOTE(cloudnull): This is a bug, doc needs to be updated for ovn-router, instead of OVNL3RouterPlugin - service_plugins: ovn-router,qos,metering,trunk,segments - allow_automatic_l3agent_failover: True - l3_ha: False - max_l3_agents_per_router: 1 - l3_ha_network_type: geneve - dhcp_agents_per_network: 1 - network_auto_schedule: True - router_auto_schedule: True - # (NOTE)portdirect: if unset this is populated dynamically from the value in - # 'network.backend' to sane defaults. - interface_driver: null - oslo_concurrency: - lock_path: /tmp/neutron + service_plugins: "ovn-router,qos,metering,trunk,segments" + agent: + availability_zone: az1 database: - mysql_sql_mode: "" connection_debug: 0 + connection_recycle_time: 3600 connection_trace: true - use_db_reconnect: True idle_timeout: 3600 - connection_recycle_time: 3600 - max_retries: -1 - agent: - availability_zone: az1 - root_helper: sudo /var/lib/openstack/bin/neutron-rootwrap /etc/neutron/rootwrap.conf - root_helper_daemon: sudo /var/lib/openstack/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf - oslo_messaging_notifications: - driver: messagingv2 + mysql_sql_mode: "" + use_db_reconnect: true oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true rabbit_ha_queues: false rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly rabbit_interval_max: 10 # Send more frequent heartbeats and fail unhealthy nodes faster # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + heartbeat_timeout_threshold: 60 + # NOTE (deprecation warning) heartbeat_in_pthread will be deprecated in 2024.2 + heartbeat_in_pthread: True + # Setting lower kombu_reconnect_delay should resolve issue with HA failing when one node is down # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 kombu_reconnect_delay: 0.5 - oslo_middleware: - enable_proxy_headers_parsing: true - oslo_policy: - policy_file: /etc/neutron/policy.yaml ovn: + dns_servers: "8.8.8.8,1.1.1.1" enable_distributed_floating_ip: true + neutron_sync_mode: "off" + ovn_l3_mode: "true" ovn_l3_scheduler: leastloaded - dns_servers: 8.8.8.8,1.1.1.1 - neutron_sync_mode: 'off' # This needs to be set to OFF for OVN in a hybrid deployment. - ovn_l3_mode: 'true' - ovn_sb_connection: tcp:127.0.0.1:6642 - ovn_nb_connection: tcp:127.0.0.1:6641 - ovn_metadata_enabled: true - nova: - auth_type: password - auth_version: v3 - endpoint_type: internal - placement: - auth_type: password - auth_version: v3 - endpoint_type: internal - designate: - auth_type: password - auth_version: v3 - endpoint_type: internal - allow_reverse_dns_lookup: true - ironic: - endpoint_type: internal - keystone_authtoken: - service_token_roles: service - service_token_roles_required: true - memcache_security_strategy: ENCRYPT - auth_type: password - auth_version: v3 - service_type: network - octavia: - request_poll_timeout: 3000 - logging: - loggers: - keys: - - root - - neutron - - neutron_taas - handlers: - keys: - - stdout - - stderr - - "null" - formatters: - keys: - - context - - default - logger_root: - level: INFO - handlers: - - stdout - logger_neutron: - level: INFO - handlers: - - stdout - qualname: neutron - logger_neutron_taas: - level: INFO - handlers: - - stdout - qualname: neutron_taas - logger_amqp: - level: WARNING - handlers: stderr - qualname: amqp - logger_amqplib: - level: WARNING - handlers: stderr - qualname: amqplib - logger_eventletwsgi: - level: WARNING - handlers: stderr - qualname: eventlet.wsgi.server - logger_sqlalchemy: - level: WARNING - handlers: stderr - qualname: sqlalchemy - logger_boto: - level: WARNING - handlers: stderr - qualname: boto - handler_null: - class: logging.NullHandler - formatter: default - args: () - handler_stdout: - class: StreamHandler - args: (sys.stdout,) - formatter: context - handler_stderr: - class: StreamHandler - args: (sys.stderr,) - formatter: context - formatter_context: - class: oslo_log.formatters.ContextFormatter - datefmt: "%Y-%m-%d %H:%M:%S" - formatter_default: - format: "%(message)s" - datefmt: "%Y-%m-%d %H:%M:%S" + ovn_nb_connection: "tcp:127.0.0.1:6641" + ovn_sb_connection: "tcp:127.0.0.1:6642" + neutron_api_uwsgi: + uwsgi: + processes: 4 + threads: 2 + openvswitch_db_server: + ptcp_port: 6640 + ovn_metadata_agent: + DEFAULT: + metadata_workers: 8 + ovs: + ovsdb_connection: "tcp:127.0.0.1:6640" plugins: ml2_conf: + agent: + availability_zone: az1 + extensions: "fip_qos,gateway_ip_qos" ml2: - extension_drivers: port_security,qos - # (NOTE)portdirect: if unset this is populated dyanmicly from the value - # in 'network.backend' to sane defaults. + extension_drivers: "port_security,qos" mechanism_drivers: ovn - type_drivers: flat,vlan,geneve tenant_network_types: geneve + type_drivers: "flat,vlan,geneve" + ml2_type_vlan: + network_vlan_ranges: physnet1 ovn: + dns_servers: "8.8.8.8,1.1.1.1" + neutron_sync_mode: "off" + ovn_l3_mode: "true" ovn_l3_scheduler: leastloaded - dns_servers: 8.8.8.8,1.1.1.1 - neutron_sync_mode: 'off' # This needs to be set to OFF for OVN in a hybrid deployment. - ovn_l3_mode: 'true' - ovn_sb_connection: tcp:127.0.0.1:6642 - ovn_nb_connection: tcp:127.0.0.1:6641 ovn_metadata_enabled: true - ml2_type_vxlan: - vni_ranges: 1:1000 - vxlan_group: 239.1.1.1 - ml2_type_flat: - flat_networks: "*" - # If you want to use the external network as a tagged provider network, - # a range should be specified including the intended VLAN target - # using ml2_type_vlan.network_vlan_ranges: - ml2_type_vlan: - network_vlan_ranges: "physnet1" - ml2_type_geneve: - vni_ranges: 1:65536 - max_header_size: 38 - agent: - availability_zone: az1 - extensions: fip_qos,gateway_ip_qos - ml2_conf_sriov: null - taas: - taas: - enabled: False + ovn_nb_connection: "tcp:127.0.0.1:6641" + ovn_sb_connection: "tcp:127.0.0.1:6642" openvswitch_agent: agent: availability_zone: az1 - tunnel_types: vxlan - l2_population: True - arp_responder: True - ovs: - bridge_mappings: "external:br-ex" - securitygroup: - firewall_driver: neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver - linuxbridge_agent: - linux_bridge: - # To define Flat and VLAN connections, in LB we can assign - # specific interface to the flat/vlan network name using: - # physical_interface_mappings: "external:eth3" - # Or we can set the mapping between the network and bridge: - bridge_mappings: "external:br-ex" - # The two above options are exclusive, do not use both of them at once - securitygroup: - firewall_driver: iptables - vxlan: - l2_population: True - arp_responder: True - macvtap_agent: null - sriov_agent: - securitygroup: - firewall_driver: neutron.agent.firewall.NoopFirewallDriver - sriov_nic: - physical_device_mappings: physnet2:enp3s0f1 - # NOTE: do not use null here, use an empty string - exclude_devices: "" - dhcp_agent: - DEFAULT: - # (NOTE)portdirect: if unset this is populated dyanmicly from the value in - # 'network.backend' to sane defaults. - interface_driver: null - dnsmasq_config_file: /etc/neutron/dnsmasq.conf - force_metadata: True - agent: - availability_zone: az1 - dnsmasq: | - #no-hosts - #port=5353 - #cache-size=500 - #no-negcache - #dns-forward-max=100 - #resolve-file= - #strict-order - #bind-interface - #bind-dynamic - #domain= - #dhcp-range=10.10.10.10,10.10.10.100,24h - #dhcp-lease-max=150 - #dhcp-host=11:22:33:44:55:66,ignore - #dhcp-option=3,10.10.10.1 - #dhcp-option-force=26,1450 - - l3_agent: - DEFAULT: - # (NOTE)portdirect: if unset this is populated dyanmicly from the value in - # 'network.backend' to sane defaults. - interface_driver: null - agent_mode: legacy - metering_agent: null - metadata_agent: - DEFAULT: - # we cannot change the proxy socket path as it is declared - # as a hostPath volume from agent daemonsets - metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy - metadata_proxy_shared_secret: "password" - cache: - enabled: true - backend: dogpile.cache.memcached - bagpipe_bgp: {} - ovn_metadata_agent: - DEFAULT: - # we cannot change the proxy socket path as it is declared - # as a hostPath volume from agent daemonsets - metadata_proxy_socket: /var/lib/neutron/openstack-helm/metadata_proxy - metadata_proxy_shared_secret: "password" - metadata_workers: 8 - cache: - enabled: true - backend: dogpile.cache.memcached - ovs: - ovsdb_connection: tcp:127.0.0.1:6640 - bgp_dragent: {} - rabbitmq: policies: [] - ## NOTE: "besteffort" is meant for dev env with mixed compute type only. - ## This helps prevent sriov init script from failing due to mis-matched NIC - ## For prod env, target NIC should match and init script should fail otherwise. - ## sriov_init: - ## - besteffort - sriov_init: - - - # auto_bridge_add is a table of "bridge: interface" pairs - # To automatically add a physical interfaces to a specific bridges, - # for example eth3 to bridge br-physnet1, if0 to br0 and iface_two - # to br1 do something like: - # - # auto_bridge_add: - # br-physnet1: eth3 - # br0: if0 - # br1: iface_two - # br-ex will be added by default - auto_bridge_add: - br-ex: null - - # Network off-loading configuration - netoffload: - enabled: false - asap2: - # - dev: enp97s0f0 - # vfs: 16 - - # configuration of OVS DPDK bridges and NICs - # this is a separate section and not part of the auto_bridge_add section - # because additional parameters are needed - ovs_dpdk: - enabled: false - # setting update_dpdk_bond_config to true will have default behavior, - # which may cause disruptions in ovs dpdk traffic in case of neutron - # ovs agent restart or when dpdk nic/bond configurations are changed. - # Setting this to false will configure dpdk in the first run and - # disable nic/bond config on event of restart or config update. - update_dpdk_bond_config: true - driver: uio_pci_generic - # In case bonds are configured, the nics which are part of those bonds - # must NOT be provided here. - nics: - - name: dpdk0 - pci_id: '0000:05:00.0' - # Set VF Index in case some particular VF(s) need to be - # used with ovs-dpdk. - # vf_index: 0 - bridge: br-phy - migrate_ip: true - n_rxq: 2 - n_txq: 2 - pmd_rxq_affinity: "0:3,1:27" - ofport_request: 1 - # optional parameters for tuning the OVS DPDK config - # in alignment with the available hardware resources - # mtu: 2000 - # n_rxq_size: 1024 - # n_txq_size: 1024 - # vhost-iommu-support: true - bridges: - - name: br-phy - # optional parameter, in case tunnel traffic needs to be transported over a vlan underlay - # - tunnel_underlay_vlan: 45 - # Optional parameter for configuring bonding in OVS-DPDK - # - name: br-phy-bond0 - # bonds: - # - name: dpdkbond0 - # bridge: br-phy-bond0 - # # The IP from the first nic in nics list shall be used - # migrate_ip: true - # mtu: 2000 - # # Please note that n_rxq is set for each NIC individually - # # rather than denoting the total number of rx queues for - # # the bond as a whole. So setting n_rxq = 2 below for ex. - # # would be 4 rx queues in total for the bond. - # # Same for n_txq - # n_rxq: 2 - # n_txq: 2 - # ofport_request: 1 - # n_rxq_size: 1024 - # n_txq_size: 1024 - # vhost-iommu-support: true - # ovs_options: "bond_mode=active-backup" - # nics: - # - name: dpdk_b0s0 - # pci_id: '0000:06:00.0' - # pmd_rxq_affinity: "0:3,1:27" - # # Set VF Index in case some particular VF(s) need to be - # # used with ovs-dpdk. In which case pci_id of PF must be - # # provided above. - # # vf_index: 0 - # - name: dpdk_b0s1 - # pci_id: '0000:07:00.0' - # pmd_rxq_affinity: "0:3,1:27" - # # Set VF Index in case some particular VF(s) need to be - # # used with ovs-dpdk. In which case pci_id of PF must be - # # provided above. - # # vf_index: 0 - # - # Set the log level for each target module (default level is always dbg) - # Supported log levels are: off, emer, err, warn, info, dbg - # - # modules: - # - name: dpdk - # log_level: info - -# Names of secrets used by bootstrap and environmental checks -secrets: - identity: - admin: neutron-keystone-admin - neutron: neutron-keystone-user - test: neutron-keystone-test - oslo_db: - admin: neutron-db-admin - neutron: neutron-db-user - oslo_messaging: - admin: neutron-rabbitmq-admin - neutron: neutron-rabbitmq-user - tls: - compute_metadata: - metadata: - internal: metadata-tls-metadata - network: - server: - public: neutron-tls-public - internal: neutron-tls-server - oci_image_registry: - neutron: neutron-oci-image-registry -# typically overridden by environmental -# values, but should include all endpoints -# required by this chart endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - neutron: - username: neutron - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null - oslo_db: - auth: - admin: - username: root - password: password - secret: - tls: - internal: mariadb-tls-direct - neutron: - username: neutron - password: password - hosts: - default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /neutron - scheme: mysql+pymysql - port: - mysql: - default: 3306 - oslo_messaging: - auth: - admin: - username: rabbitmq - password: password - secret: - tls: - internal: rabbitmq-tls-direct - neutron: - username: neutron - password: password - statefulset: - replicas: 3 - name: rabbitmq-server - hosts: - default: rabbitmq-nodes - host_fqdn_override: - default: rabbitmq.openstack.svc.cluster.local - path: /neutron - scheme: rabbit - port: - amqp: - default: 5672 - http: - default: 15672 - oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null - hosts: - default: memcached - host_fqdn_override: - default: null + baremetal: port: - memcache: - default: 11211 + api: + default: 6385 + internal: 6385 + public: 80 + service: 6385 compute: - name: nova - hosts: - default: nova-api - public: nova - host_fqdn_override: - default: null - path: - default: "/v2.1/%(tenant_id)s" - scheme: - default: 'http' port: api: default: 8774 @@ -2300,263 +181,66 @@ endpoints: novncproxy: default: 6080 compute_metadata: - name: nova - hosts: - default: nova-metadata - public: metadata - host_fqdn_override: - default: null - path: - default: / - scheme: - default: 'http' port: metadata: default: 8775 public: 8775 - identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - neutron: - role: admin,service - region_name: RegionOne - username: neutron - password: password - project_name: service - user_domain_name: service - project_domain_name: service - nova: - region_name: RegionOne - project_name: service - username: nova - password: password - user_domain_name: service - project_domain_name: service - placement: - region_name: RegionOne - project_name: service - username: placement - password: password - user_domain_name: service - project_domain_name: service - designate: - region_name: RegionOne - project_name: service - username: designate - password: password - user_domain_name: service - project_domain_name: service - ironic: - region_name: RegionOne - project_name: service - username: ironic - password: password - user_domain_name: service - project_domain_name: service - # test: - # role: admin - # region_name: RegionOne - # username: neutron-test - # password: password - # # NOTE: this project will be purged and reset if - # # conf.rally_tests.force_project_purge is set to true - # # which may be required upon test failure, but be aware that this will - # # expunge all openstack objects, so if this is used a seperate project - # # should be used for each helm test, and also it should be ensured - # # that this project is not in use by other tenants - # project_name: test - # user_domain_name: service - # project_domain_name: service - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: http + dns: port: api: - default: 5000 - internal: 5000 + default: 9001 + internal: 9001 public: 80 - network: - name: neutron - hosts: - default: neutron-server - public: neutron - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: null - scheme: - default: 'http' - service: 'http' + service: 9001 + fluentd: + namespace: fluentbit + identity: port: api: - default: 9696 + default: 5000 + internal: 5000 public: 80 - internal: 9696 - service: 9696 load_balancer: - name: octavia - hosts: - default: octavia-api - public: octavia - host_fqdn_override: - default: null - path: - default: null - scheme: - default: http port: api: default: 9876 - public: 80 internal: 9876 + public: 80 service: 9876 - fluentd: - namespace: fluentbit - name: fluentd - hosts: - default: fluentd-logging - host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 - dns: - name: designate - hosts: - default: designate-api - public: designate - host_fqdn_override: - default: null - path: - default: / - scheme: - default: 'http' + network: port: api: - default: 9001 + default: 9696 + internal: 9696 public: 80 - internal: 9001 - service: 9001 - baremetal: - name: ironic + service: 9696 + oslo_db: + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: - default: ironic-api - public: ironic + default: mariadb-cluster-primary + oslo_cache: host_fqdn_override: - default: null - path: - default: null - scheme: - default: 'http' - port: - api: - default: 6385 - public: 80 - internal: 6385 - service: 6385 - # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress - # They are using to enable the Egress K8s network policy. - kube_dns: - namespace: kube-system - name: kubernetes-dns + default: memcached.openstack.svc.cluster.local hosts: - default: kube-dns + default: memcached + oslo_messaging: host_fqdn_override: - default: null - path: - default: null - scheme: http - port: - dns: - default: 53 - protocol: UDP - ingress: - namespace: null - name: ingress + default: rabbitmq.openstack.svc.cluster.local hosts: - default: ingress - port: - ingress: - default: 80 - -network_policy: - neutron: - # TODO(lamt): Need to tighten this ingress for security. - ingress: - - {} - egress: - - {} - -helm3_hook: true - -health_probe: - logging: - level: ERROR - -tls: - identity: false - oslo_messaging: false - oslo_db: false + default: rabbitmq-nodes manifests: - certificates: false - configmap_bin: true - configmap_etc: true daemonset_dhcp_agent: false daemonset_l3_agent: false - daemonset_lb_agent: true daemonset_metadata_agent: false - daemonset_ovs_agent: false - daemonset_sriov_agent: true - daemonset_l2gw_agent: false - daemonset_bagpipe_bgp: false - daemonset_bgp_dragent: false - daemonset_netns_cleanup_cron: true daemonset_ovn_metadata_agent: true - deployment_ironic_agent: false - deployment_server: true + daemonset_ovs_agent: false ingress_server: false - job_bootstrap: true job_db_init: false - job_db_sync: true - job_db_drop: false - job_image_repo_sync: true - job_ks_endpoints: true - job_ks_service: true - job_ks_user: true job_rabbit_init: false - pdb_server: true pod_rally_test: false - network_policy: false secret_db: false secret_ingress_tls: false - secret_keystone: true secret_rabbitmq: false - secret_registry: true service_ingress_server: false - service_server: true diff --git a/base-helm-configs/nova/nova-helm-overrides.yaml b/base-helm-configs/nova/nova-helm-overrides.yaml index e958c51ea..96a7d850f 100644 --- a/base-helm-configs/nova/nova-helm-overrides.yaml +++ b/base-helm-configs/nova/nova-helm-overrides.yaml @@ -1,2588 +1,293 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Default values for nova. -# This is a YAML-formatted file. -# Declare name/value pairs to be passed into your templates. -# name: value - --- -release_group: null -labels: - agent: - compute: - node_selector_key: openstack-compute-node - node_selector_value: enabled - compute_ironic: - node_selector_key: openstack-compute-node - node_selector_value: enabled - api_metadata: - node_selector_key: openstack-control-plane - node_selector_value: enabled - conductor: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - novncproxy: - node_selector_key: openstack-control-plane - node_selector_value: enabled - osapi: - node_selector_key: openstack-control-plane - node_selector_value: enabled - scheduler: - node_selector_key: openstack-control-plane - node_selector_value: enabled - spiceproxy: - node_selector_key: openstack-control-plane - node_selector_value: enabled - test: - node_selector_key: openstack-control-plane - node_selector_value: enabled images: - pull_policy: IfNotPresent tags: bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" db_drop: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" + image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" nova_api: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" + nova_archive_deleted_rows: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" nova_cell_setup: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" nova_cell_setup_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" nova_compute: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" + nova_compute_ironic: "docker.io/kolla/ubuntu-source-nova-compute-ironic:wallaby" nova_compute_ssh: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" nova_conductor: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" nova_db_sync: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" nova_novncproxy: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" nova_novncproxy_assets: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" nova_scheduler: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" + nova_service_cleaner: "quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy" nova_spiceproxy: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" nova_spiceproxy_assets: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" - nova_service_cleaner: "quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy" - dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" - rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" - nova_archive_deleted_rows: "quay.io/rackspace/rackerlabs-nova-efi:2024.1-ubuntu_jammy-1737928811" - nova_compute_ironic: 'docker.io/kolla/ubuntu-source-nova-compute-ironic:wallaby' nova_storage_init: "quay.io/rackspace/rackerlabs-ceph-config-helper:latest-ubuntu_jammy" - test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" - image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" nova_wait_for_computes_init: "quay.io/rackspace/rackerlabs-hyperkube-amd64:v1.11.6" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync -jobs: - # NOTE(portdirect): When using cells new nodes will be added to the cell on the hour by default. - # TODO(portdirect): Add a post-start action to nova compute pods that registers themselves. - cell_setup: - cron: "0 */1 * * *" - starting_deadline: 600 - history: - success: 3 - failed: 1 - extended_wait: - enabled: false - iteration: 3 - duration: 5 - extra_command: null - service_cleaner: - cron: "0 */1 * * *" - starting_deadline: 600 - history: - success: 3 - failed: 1 - sleep_time: 60 - extra_command: null - archive_deleted_rows: - cron: "0 */1 * * *" - starting_deadline: 600 - history: - success: 3 - failed: 1 -bootstrap: - enabled: true - ks_user: admin - script: null - structured: - flavors: - enabled: true - options: - m1_tiny: - name: "m1.tiny" - ram: 512 - disk: 1 - vcpus: 1 - m1_small: - name: "m1.small" - ram: 2048 - disk: 20 - vcpus: 1 - m1_medium: - name: "m1.medium" - ram: 4096 - disk: 40 - vcpus: 2 - m1_large: - name: "m1.large" - ram: 8192 - disk: 80 - vcpus: 4 - m1_xlarge: - name: "m1.xlarge" - ram: 16384 - disk: 160 - vcpus: 8 - wait_for_computes: - enabled: false - # Wait percentage is the minimum percentage of compute hypervisors which - # must be available before the remainder of the bootstrap script can be run. - wait_percentage: 70 - # Once the wait_percentage above is achieved, the remaining_wait is the - # amount of time in seconds to wait before executing the remainder of the - # boostrap script. - remaining_wait: 300 - scripts: - init_script: | - # This runs in a bootstrap init container. It counts the number of compute nodes. - COMPUTE_NODES=$(kubectl get nodes -o custom-columns=NAME:.metadata.name -l openstack-compute-node=enabled --no-headers | sort) - /bin/echo $COMPUTE_NODES > /tmp/compute_nodes.txt - wait_script: | - # This script runs in the main bootstrap container just before the - # bootstrap.script is called. - COMPUTE_HOSTS=`cat /tmp/compute_nodes.txt | wc -w` - if [[ $COMPUTE_HOSTS == 0 ]]; then - echo "There are no compute hosts found!" - exit 1 - fi - - # Wait for all hypervisors to come up before moving on with the deployment - HYPERVISOR_WAIT=true - WAIT_AFTER_READY=0 - SLEEP=5 - while [[ $HYPERVISOR_WAIT == true ]]; do - # Its possible that openstack command may fail due to not being able to - # reach the compute service - set +e - HYPERVISORS=$(openstack hypervisor list -f value -c 'Hypervisor Hostname' | wc -w) - set -e + rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" + test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" - PERCENT_READY=$(( $HYPERVISORS * 100 / $COMPUTE_HOSTS )) - if [[ $PERCENT_READY -ge $WAIT_PERCENTAGE ]]; then - echo "Hypervisor ready percentage is $PERCENT_READY" - if [[ $PERCENT_READY == 100 ]]; then - HYPERVISOR_WAIT=false - echo "All hypervisors are ready." - elif [[ WAIT_AFTER_READY -ge $REMAINING_WAIT ]]; then - HYPERVISOR_WAIT=false - echo "Waited the configured time -- $HYPERVISORS out of $COMPUTE_HOSTS hypervisor(s) ready -- proceeding with the bootstrap." - else - sleep $SLEEP - WAIT_AFTER_READY=$(( $WAIT_AFTER_READY + $SLEEP )) - fi - else - echo "Waiting $SLEEP seconds for enough hypervisors to be discovered..." - sleep $SLEEP - fi - done network: - # provide what type of network wiring will be used - # possible options: openvswitch, linuxbridge, sriov backend: - ovn - osapi: - port: 8774 - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 30774 - metadata: - port: 8775 - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 30775 - novncproxy: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - node_port: - enabled: false - port: 30680 - spiceproxy: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - node_port: - enabled: false - port: 30682 + lifecycle: + upgrades: + daemonsets: + compute: + enabled: true + max_unavailable: 20% ssh: enabled: true - port: 8022 - from_subnet: 0.0.0.0/0 - key_types: - - rsa - - dsa - - ecdsa - - ed25519 - private_key: 'null' - public_key: 'null' + dependencies: dynamic: - common: - local_image_registry: - jobs: - - nova-image-repo-sync - services: - - endpoint: node - service: local_image_registry targeted: ovn: compute: pod: [] - openvswitch: - compute: - pod: - - requireSameNode: true - labels: - application: neutron - component: neutron-ovs-agent - linuxbridge: - compute: - pod: - - requireSameNode: true - labels: - application: neutron - component: neutron-lb-agent - sriov: - compute: - pod: - - requireSameNode: true - labels: - application: neutron - component: neutron-sriov-agent static: api: jobs: - nova-db-sync - nova-ks-user - nova-ks-endpoints - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity api_metadata: jobs: - nova-db-sync - nova-ks-user - nova-ks-endpoints - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - bootstrap: - services: - - endpoint: internal - service: identity - - endpoint: internal - service: compute - cell_setup: + archive_deleted_rows: jobs: - nova-db-sync - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: compute - pod: - - requireSameNode: false - labels: - application: nova - component: compute - service_cleaner: + cell_setup: jobs: - nova-db-sync - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: compute compute: - pod: - - requireSameNode: true - labels: - application: libvirt - component: libvirt jobs: - nova-db-sync - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: image - - endpoint: internal - service: compute - - endpoint: internal - service: network - - endpoint: internal - service: compute_metadata compute_ironic: jobs: - nova-db-sync - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: image - - endpoint: internal - service: compute - - endpoint: internal - service: network - - endpoint: internal - service: baremetal conductor: jobs: - nova-db-sync - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: compute - db_drop: - services: - - endpoint: internal - service: oslo_db - archive_deleted_rows: - jobs: - - nova-db-sync - db_init: - services: - - endpoint: internal - service: oslo_db db_sync: jobs: [] - services: - - endpoint: internal - service: oslo_db - ks_endpoints: - jobs: - - nova-ks-service - services: - - endpoint: internal - service: identity - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity - rabbit_init: - services: - - service: oslo_messaging - endpoint: internal - novncproxy: - jobs: - - nova-db-sync - services: - - endpoint: internal - service: oslo_db - spiceproxy: + scheduler: jobs: - nova-db-sync - services: - - endpoint: internal - service: oslo_db - scheduler: + service_cleaner: jobs: - nova-db-sync - services: - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: compute - tests: - services: - - endpoint: internal - service: image - - endpoint: internal - service: compute - - endpoint: internal - service: network - - endpoint: internal - service: compute_metadata - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry -console: - # serial | spice | novnc | none - console_kind: novnc - serial: - spice: - compute: - # IF blank, search default routing interface - server_proxyclient_interface: null - # or set network cidr - server_proxyclient_network_cidr: 0/0 - proxy: - # IF blank, search default routing interface - server_proxyclient_interface: null - # or set network cidr - server_proxyclient_network_cidr: 0/0 - novnc: - compute: - # IF blank, search default routing interface - vncserver_proxyclient_interface: null - # or set network cidr - vncserver_proxyclient_network_cidr: 0/0 - vncproxy: - # IF blank, search default routing interface - vncserver_proxyclient_interface: null - # or set network cidr - vncserver_proxyclient_network_cidr: 0/0 - address_search_enabled: true -ceph_client: - configmap: ceph-etc - user_secret_name: pvc-ceph-client-key -rbd_pool: - app_name: nova-vms - replication: 3 - crush_rule: replicated_rule - chunk_size: 8 -conf: - security: | - # - # Disable access to the entire file system except for the directories that - # are explicitly allowed later. - # - # This currently breaks the configurations that come with some web application - # Debian packages. - # - # - # AllowOverride None - # Require all denied - # - - # Changing the following options will not really affect the security of the - # server, but might make attacks slightly more difficult in some cases. - - # - # ServerTokens - # This directive configures what you return as the Server HTTP response - # Header. The default is 'Full' which sends information about the OS-Type - # and compiled in modules. - # Set to one of: Full | OS | Minimal | Minor | Major | Prod - # where Full conveys the most information, and Prod the least. - ServerTokens Prod - - # - # Optionally add a line containing the server version and virtual host - # name to server-generated pages (internal error documents, FTP directory - # listings, mod_status and mod_info output etc., but not CGI generated - # documents or custom error documents). - # Set to "EMail" to also include a mailto: link to the ServerAdmin. - # Set to one of: On | Off | EMail - ServerSignature Off - - # - # Allow TRACE method - # - # Set to "extended" to also reflect the request body (only for testing and - # diagnostic purposes). - # - # Set to one of: On | Off | extended - TraceEnable Off - # - # Forbid access to version control directories - # - # If you use version control systems in your document root, you should - # probably deny access to their directories. For example, for subversion: - # - # - # Require all denied - # - - # - # Setting this header will prevent MSIE from interpreting files as something - # else than declared by the content type in the HTTP headers. - # Requires mod_headers to be enabled. - # - #Header set X-Content-Type-Options: "nosniff" - - # - # Setting this header will prevent other sites from embedding pages from this - # site as frames. This defends against clickjacking attacks. - # Requires mod_headers to be enabled. - # - #Header set X-Frame-Options: "sameorigin" - software: - apache2: - binary: apache2 - start_parameters: -DFOREGROUND - conf_dir: /etc/apache2/conf-enabled - site_dir: /etc/apache2/sites-enable - mods_dir: /etc/apache2/mods-available - a2enmod: null - a2dismod: null +conf: ceph: enabled: false - admin_keyring: null - cinder: - user: "cinder" - keyring: null - secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337 - rally_tests: - run_tempest: false - clean_up: | - FLAVORS=$(openstack flavor list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }') - if [ -n "$FLAVORS" ]; then - echo $FLAVORS | xargs openstack flavor delete - fi - SERVERS=$(openstack server list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }') - if [ -n "$SERVERS" ]; then - echo $SERVERS | xargs openstack server delete - fi - IMAGES=$(openstack image list -f value | awk '$2 ~ /^c_rally_/ { print $1 }') - if [ -n "$IMAGES" ]; then - echo $IMAGES | xargs openstack image delete - fi - tests: - NovaAggregates.create_and_get_aggregate_details: - - args: - availability_zone: az1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaAggregates.create_and_update_aggregate: - - args: - availability_zone: az1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaAggregates.list_aggregates: - - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaAvailabilityZones.list_availability_zones: - - args: - detailed: true - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaFlavors.create_and_delete_flavor: - - args: - disk: 1 - ram: 500 - vcpus: 1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaFlavors.create_and_list_flavor_access: - - args: - disk: 1 - ram: 500 - vcpus: 1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaFlavors.create_flavor: - - args: - disk: 1 - ram: 500 - vcpus: 1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaFlavors.create_flavor_and_add_tenant_access: - - args: - disk: 1 - ram: 500 - vcpus: 1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaFlavors.create_flavor_and_set_keys: - - args: - disk: 1 - extra_specs: - 'quota:disk_read_bytes_sec': 10240 - ram: 500 - vcpus: 1 - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaFlavors.list_flavors: - - args: - detailed: true - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaHypervisors.list_and_get_hypervisors: - - args: - detailed: true - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaHypervisors.list_and_get_uptime_hypervisors: - - args: - detailed: true - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaHypervisors.list_and_search_hypervisors: - - args: - detailed: true - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaHypervisors.list_hypervisors: - - args: - detailed: true - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaHypervisors.statistics_hypervisors: - - args: {} - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaKeypair.create_and_delete_keypair: - - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaKeypair.create_and_list_keypairs: - - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaServerGroups.create_and_list_server_groups: - - args: - all_projects: false - kwargs: - policies: - - affinity - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - NovaServices.list_services: - - runner: - concurrency: 1 - times: 1 - type: constant - sla: - failure_rate: - max: 0 - paste: - composite:metadata: - use: egg:Paste#urlmap - /: meta - pipeline:meta: - pipeline: cors metaapp - app:metaapp: - paste.app_factory: nova.api.metadata.handler:MetadataRequestHandler.factory - composite:osapi_compute: - use: call:nova.api.openstack.urlmap:urlmap_factory - /: oscomputeversions - /v2: openstack_compute_api_v21_legacy_v2_compatible - /v2.1: openstack_compute_api_v21 - composite:openstack_compute_api_v21: - use: call:nova.api.auth:pipeline_factory_v21 - noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21 - keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext osapi_compute_app_v21 - composite:openstack_compute_api_v21_legacy_v2_compatible: - use: call:nova.api.auth:pipeline_factory_v21 - noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2 legacy_v2_compatible osapi_compute_app_v21 - keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken audit keystonecontext legacy_v2_compatible osapi_compute_app_v21 - filter:request_id: - paste.filter_factory: oslo_middleware:RequestId.factory - filter:compute_req_id: - paste.filter_factory: nova.api.compute_req_id:ComputeReqIdMiddleware.factory - filter:faultwrap: - paste.filter_factory: nova.api.openstack:FaultWrapper.factory - filter:noauth2: - paste.filter_factory: nova.api.openstack.auth:NoAuthMiddleware.factory - filter:sizelimit: - paste.filter_factory: oslo_middleware:RequestBodySizeLimiter.factory - filter:http_proxy_to_wsgi: - paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory - filter:legacy_v2_compatible: - paste.filter_factory: nova.api.openstack:LegacyV2CompatibleWrapper.factory - app:osapi_compute_app_v21: - paste.app_factory: nova.api.openstack.compute:APIRouterV21.factory - pipeline:oscomputeversions: - pipeline: faultwrap http_proxy_to_wsgi oscomputeversionapp - app:oscomputeversionapp: - paste.app_factory: nova.api.openstack.compute.versions:Versions.factory - filter:cors: - paste.filter_factory: oslo_middleware.cors:filter_factory - oslo_config_project: nova - filter:keystonecontext: - paste.filter_factory: nova.api.auth:NovaKeystoneContext.factory - filter:authtoken: - paste.filter_factory: keystonemiddleware.auth_token:filter_factory - filter:audit: - paste.filter_factory: keystonemiddleware.audit:filter_factory - audit_map_file: /etc/nova/api_audit_map.conf - policy: {} - nova_sudoers: | - # This sudoers file supports rootwrap for both Kolla and LOCI Images. - Defaults !requiretty - Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin" - nova ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/nova-rootwrap /etc/nova/rootwrap.conf *, /var/lib/openstack/bin/nova-rootwrap /etc/nova/rootwrap.conf * - api_audit_map: - DEFAULT: - target_endpoint_type: None - custom_actions: - enable: enable - disable: disable - delete: delete - startup: start/startup - shutdown: stop/shutdown - reboot: start/reboot - os-migrations/get: read - os-server-password/post: update - path_keywords: - add: None - action: None - enable: None - disable: None - configure-project: None - defaults: None - delete: None - detail: None - diagnostics: None - entries: entry - extensions: alias - flavors: flavor - images: image - ips: label - limits: None - metadata: key - os-agents: os-agent - os-aggregates: os-aggregate - os-availability-zone: None - os-certificates: None - os-cloudpipe: None - os-fixed-ips: ip - os-extra_specs: key - os-flavor-access: None - os-floating-ip-dns: domain - os-floating-ips-bulk: host - os-floating-ip-pools: None - os-floating-ips: floating-ip - os-hosts: host - os-hypervisors: hypervisor - os-instance-actions: instance-action - os-keypairs: keypair - os-migrations: None - os-networks: network - os-quota-sets: tenant - os-security-groups: security_group - os-security-group-rules: rule - os-server-password: None - os-services: None - os-simple-tenant-usage: tenant - os-virtual-interfaces: None - os-volume_attachments: attachment - os-volumes_boot: None - os-volumes: volume - os-volume-types: volume-type - os-snapshots: snapshot - reboot: None - servers: server - shutdown: None - startup: None - statistics: None - service_endpoints: - compute: service/compute - rootwrap: | - # Configuration for nova-rootwrap - # This file should be owned by (and only-writeable by) the root user - - [DEFAULT] - # List of directories to load filter definitions from (separated by ','). - # These directories MUST all be only writeable by root ! - filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap - - # List of directories to search executables in, in case filters do not - # explicitely specify a full path (separated by ',') - # If not specified, defaults to system PATH environment variable. - # These directories MUST all be only writeable by root ! - exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin - - # Enable logging to syslog - # Default value is False - use_syslog=False - - # Which syslog facility to use. - # Valid values include auth, authpriv, syslog, local0, local1... - # Default value is 'syslog' - syslog_log_facility=syslog - - # Which messages to log. - # INFO means log all usage - # ERROR means only log unsuccessful attempts - syslog_log_level=ERROR - rootwrap_filters: - api_metadata: - pods: - - metadata - content: | - # nova-rootwrap command filters for api-metadata nodes - # This is needed on nova-api hosts running with "metadata" in enabled_apis - # or when running nova-api-metadata - # This file should be owned by (and only-writeable by) the root user - - [Filters] - # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... - iptables-save: CommandFilter, iptables-save, root - ip6tables-save: CommandFilter, ip6tables-save, root - - # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) - iptables-restore: CommandFilter, iptables-restore, root - ip6tables-restore: CommandFilter, ip6tables-restore, root - compute: - pods: - - compute - content: | - # nova-rootwrap command filters for compute nodes - # This file should be owned by (and only-writeable by) the root user - - [Filters] - # nova/virt/disk/mount/api.py: 'kpartx', '-a', device - # nova/virt/disk/mount/api.py: 'kpartx', '-d', device - kpartx: CommandFilter, kpartx, root - - # nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path - # nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path - tune2fs: CommandFilter, tune2fs, root - - # nova/virt/disk/mount/api.py: 'mount', mapped_device - # nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target - # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'.. - # nova/virt/configdrive.py: 'mount', device, mountdir - # nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ... - mount: CommandFilter, mount, root - - # nova/virt/disk/mount/api.py: 'umount', mapped_device - # nova/virt/disk/api.py: 'umount' target - # nova/virt/xenapi/vm_utils.py: 'umount', dev_path - # nova/virt/configdrive.py: 'umount', mountdir - umount: CommandFilter, umount, root - - # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image - # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device - qemu-nbd: CommandFilter, qemu-nbd, root - - # nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image - # nova/virt/disk/mount/loop.py: 'losetup', '--detach', device - losetup: CommandFilter, losetup, root - - # nova/virt/disk/vfs/localfs.py: 'blkid', '-o', 'value', '-s', 'TYPE', device - blkid: CommandFilter, blkid, root - - # nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path - # nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device - blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.* - - # nova/virt/disk/vfs/localfs.py: 'tee', canonpath - tee: CommandFilter, tee, root - - # nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath - mkdir: CommandFilter, mkdir, root - - # nova/virt/disk/vfs/localfs.py: 'chown' - # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log - # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log - # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk') - chown: CommandFilter, chown, root - - # nova/virt/disk/vfs/localfs.py: 'chmod' - chmod: CommandFilter, chmod, root - - # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' - # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' - # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev - # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. - # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. - # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. - # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. - # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) - # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] - # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge - # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. - # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. - # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... - # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,.. - # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' - # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' - # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. - # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, .. - # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' - # nova/network/linux_net.py: 'ip', 'route', 'add', .. - # nova/network/linux_net.py: 'ip', 'route', 'del', . - # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev - ip: CommandFilter, ip, root - - # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev - # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev - tunctl: CommandFilter, tunctl, root - - # nova/virt/libvirt/vif.py: 'ovs-vsctl', ... - # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... - # nova/network/linux_net.py: 'ovs-vsctl', .... - ovs-vsctl: CommandFilter, ovs-vsctl, root - - # nova/virt/libvirt/vif.py: 'vrouter-port-control', ... - vrouter-port-control: CommandFilter, vrouter-port-control, root - - # nova/virt/libvirt/vif.py: 'ebrctl', ... - ebrctl: CommandFilter, ebrctl, root - - # nova/virt/libvirt/vif.py: 'mm-ctl', ... - mm-ctl: CommandFilter, mm-ctl, root - - # nova/network/linux_net.py: 'ovs-ofctl', .... - ovs-ofctl: CommandFilter, ovs-ofctl, root - - # nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ... - dd: CommandFilter, dd, root - - # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ... - iscsiadm: CommandFilter, iscsiadm, root - - # nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev - # nova/virt/libvirt/volume/aoe.py: 'aoe-discover' - aoe-revalidate: CommandFilter, aoe-revalidate, root - aoe-discover: CommandFilter, aoe-discover, root - - # nova/virt/xenapi/vm_utils.py: parted, --script, ... - # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*. - parted: CommandFilter, parted, root - - # nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path - pygrub: CommandFilter, pygrub, root - - # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s - fdisk: CommandFilter, fdisk, root - - # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path - # nova/virt/disk/api.py: e2fsck, -f, -p, image - e2fsck: CommandFilter, e2fsck, root - - # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path - # nova/virt/disk/api.py: resize2fs, image - resize2fs: CommandFilter, resize2fs, root - - # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... - iptables-save: CommandFilter, iptables-save, root - ip6tables-save: CommandFilter, ip6tables-save, root - - # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) - iptables-restore: CommandFilter, iptables-restore, root - ip6tables-restore: CommandFilter, ip6tables-restore, root - - # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... - # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. - arping: CommandFilter, arping, root - - # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address - dhcp_release: CommandFilter, dhcp_release, root - - # nova/network/linux_net.py: 'kill', '-9', pid - # nova/network/linux_net.py: 'kill', '-HUP', pid - kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP - - # nova/network/linux_net.py: 'kill', pid - kill_radvd: KillFilter, root, /usr/sbin/radvd - - # nova/network/linux_net.py: dnsmasq call - dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq - - # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'.. - radvd: CommandFilter, radvd, root - - # nova/network/linux_net.py: 'brctl', 'addbr', bridge - # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 - # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' - # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface - brctl: CommandFilter, brctl, root - - # nova/virt/libvirt/utils.py: 'mkswap' - # nova/virt/xenapi/vm_utils.py: 'mkswap' - mkswap: CommandFilter, mkswap, root - - # nova/virt/libvirt/utils.py: 'nova-idmapshift' - nova-idmapshift: CommandFilter, nova-idmapshift, root - - # nova/virt/xenapi/vm_utils.py: 'mkfs' - # nova/utils.py: 'mkfs', fs, path, label - mkfs: CommandFilter, mkfs, root - - # nova/virt/libvirt/utils.py: 'qemu-img' - qemu-img: CommandFilter, qemu-img, root - - # nova/virt/disk/vfs/localfs.py: 'readlink', '-e' - readlink: CommandFilter, readlink, root - - # nova/virt/disk/api.py: - mkfs.ext3: CommandFilter, mkfs.ext3, root - mkfs.ext4: CommandFilter, mkfs.ext4, root - mkfs.ntfs: CommandFilter, mkfs.ntfs, root - - # nova/virt/libvirt/connection.py: - lvremove: CommandFilter, lvremove, root - - # nova/virt/libvirt/utils.py: - lvcreate: CommandFilter, lvcreate, root - - # nova/virt/libvirt/utils.py: - lvs: CommandFilter, lvs, root - - # nova/virt/libvirt/utils.py: - vgs: CommandFilter, vgs, root - - # nova/utils.py:read_file_as_root: 'cat', file_path - # (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file) - read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd - read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow - - # os-brick needed commands - read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi - multipath: CommandFilter, multipath, root - # multipathd show status - multipathd: CommandFilter, multipathd, root - systool: CommandFilter, systool, root - vgc-cluster: CommandFilter, vgc-cluster, root - # os_brick/initiator/connector.py - drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid - - # TODO(smcginnis) Temporary fix. - # Need to pull in os-brick os-brick.filters file instead and clean - # out stale brick values from this file. - scsi_id: CommandFilter, /lib/udev/scsi_id, root - # os_brick.privileged.default oslo.privsep context - # This line ties the superuser privs with the config files, context name, - # and (implicitly) the actual python code invoked. - privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.* - - # nova/storage/linuxscsi.py: sg_scan device - sg_scan: CommandFilter, sg_scan, root - - # nova/volume/encryptors/cryptsetup.py: - # nova/volume/encryptors/luks.py: - ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/crypt-.+, .+ - - # nova/volume/encryptors.py: - # nova/virt/libvirt/dmcrypt.py: - cryptsetup: CommandFilter, cryptsetup, root - - # nova/virt/xenapi/vm_utils.py: - xenstore-read: CommandFilter, xenstore-read, root - - # nova/virt/libvirt/utils.py: - rbd: CommandFilter, rbd, root - - # nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path - shred: CommandFilter, shred, root - - # nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control.. - cp: CommandFilter, cp, root - - # nova/virt/xenapi/vm_utils.py: - sync: CommandFilter, sync, root - - # nova/virt/libvirt/imagebackend.py: - ploop: RegExpFilter, ploop, root, ploop, restore-descriptor, .* - prl_disk_tool: RegExpFilter, prl_disk_tool, root, prl_disk_tool, resize, --size, .*M$, --resize_partition, --hdd, .* - - # nova/virt/libvirt/utils.py: 'xend', 'status' - xend: CommandFilter, xend, root - - # nova/virt/libvirt/utils.py: - touch: CommandFilter, touch, root - - # nova/virt/libvirt/volume/vzstorage.py - pstorage-mount: CommandFilter, pstorage-mount, root - network: - pods: - - compute - content: | - # nova-rootwrap command filters for network nodes - # This file should be owned by (and only-writeable by) the root user - - [Filters] - # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap' - # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up' - # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev - # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i.. - # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'.. - # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',.. - # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',.. - # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev) - # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1] - # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge - # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', .. - # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',.. - # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ... - # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,.. - # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up' - # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up' - # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, .. - # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, .. - # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up' - # nova/network/linux_net.py: 'ip', 'route', 'add', .. - # nova/network/linux_net.py: 'ip', 'route', 'del', . - # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev - ip: CommandFilter, ip, root - - # nova/virt/libvirt/vif.py: 'ovs-vsctl', ... - # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ... - # nova/network/linux_net.py: 'ovs-vsctl', .... - ovs-vsctl: CommandFilter, ovs-vsctl, root - - # nova/network/linux_net.py: 'ovs-ofctl', .... - ovs-ofctl: CommandFilter, ovs-ofctl, root - - # nova/virt/libvirt/vif.py: 'ivs-ctl', ... - # nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ... - # nova/network/linux_net.py: 'ivs-ctl', .... - ivs-ctl: CommandFilter, ivs-ctl, root - - # nova/virt/libvirt/vif.py: 'ifc_ctl', ... - ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root - - # nova/network/linux_net.py: 'ebtables', '-D' ... - # nova/network/linux_net.py: 'ebtables', '-I' ... - ebtables: CommandFilter, ebtables, root - ebtables_usr: CommandFilter, ebtables, root - - # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ... - iptables-save: CommandFilter, iptables-save, root - ip6tables-save: CommandFilter, ip6tables-save, root - - # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,) - iptables-restore: CommandFilter, iptables-restore, root - ip6tables-restore: CommandFilter, ip6tables-restore, root - - # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ... - # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],.. - arping: CommandFilter, arping, root - - # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address - dhcp_release: CommandFilter, dhcp_release, root - - # nova/network/linux_net.py: 'kill', '-9', pid - # nova/network/linux_net.py: 'kill', '-HUP', pid - kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP - - # nova/network/linux_net.py: 'kill', pid - kill_radvd: KillFilter, root, /usr/sbin/radvd - - # nova/network/linux_net.py: dnsmasq call - dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq - - # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'.. - radvd: CommandFilter, radvd, root - - # nova/network/linux_net.py: 'brctl', 'addbr', bridge - # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0 - # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off' - # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface - brctl: CommandFilter, brctl, root - - # nova/network/linux_net.py: 'sysctl', .... - sysctl: CommandFilter, sysctl, root - - # nova/network/linux_net.py: 'conntrack' - conntrack: CommandFilter, conntrack, root - - # nova/network/linux_net.py: 'fp-vdev' - fp-vdev: CommandFilter, fp-vdev, root - nova_ironic: - DEFAULT: - scheduler_host_manager: ironic_host_manager - compute_driver: ironic.IronicDriver - ram_allocation_ratio: 1.0 - cpu_allocation_ratio: 1.0 - reserved_host_memory_mb: 0 - libvirt: - address_search_enabled: true - # When "address_search_enabled", get the IP address to be used as the target for live migration - # traffic using interface name. - # If this option is set to None, the hostname of the migration target compute node will be used. - live_migration_interface: null - # or set cidr - live_migration_network_cidr: 0/0 - hypervisor: - address_search_enabled: true - # my_ip can be set automatically through this interface name. - host_interface: null - # If host_interface is null there is a fallback mechanism to search - # for interface with routing using host network cidr. - host_network_cidr: 0/0 - # This list is the keys to exclude from the config file ingested by nova-compute - nova_compute_redactions: - - database - - api_database - - cell0_database + logging: + logger_root: + handlers: + - stdout + level: INFO nova: DEFAULT: - service_down_time: 120 + block_device_allocate_retries: 180 + block_device_allocate_retries_interval: 5 + cpu_allocation_ratio: 8 default_availability_zone: az1 default_schedule_zone: az1 - # NOTE(cloudnull): the vif_plugging_* options are an implemntation detail of the nova container when running with OVN - vif_plugging_is_fatal: true - vif_plugging_timeout: 300 - log_config_append: /etc/nova/logging.conf - default_ephemeral_format: ext4 - ram_allocation_ratio: 1.0 - disk_allocation_ratio: 1.0 - cpu_allocation_ratio: 8.0 - state_path: /var/lib/nova - osapi_compute_listen: 0.0.0.0 - # NOTE(portdirect): the bind port should not be defined, and is manipulated - # via the endpoints section. - osapi_compute_listen_port: null - osapi_compute_workers: 8 - metadata_workers: 8 - compute_driver: libvirt.LibvirtDriver - my_ip: 0.0.0.0 - instance_usage_audit: True - instance_usage_audit_period: hour - resume_guests_state_on_host_boot: True instance_build_timeout: 900 - block_device_allocate_retries: 180 - block_device_allocate_retries_interval: 5 + metadata_workers: 2 + osapi_compute_workers: 2 preallocate_images: space + service_down_time: 120 + vif_plugging_is_fatal: true + vif_plugging_timeout: 300 + cross_az_attach: true + network_allocate_retries: 3 + api_database: + idle_timeout: 3600 + connection_recycle_time: 3600 + pool_timeout: 60 + cell0_database: + idle_timeout: 3600 + connection_recycle_time: 3600 + pool_timeout: 60 compute: max_disk_devices_to_attach: 8 - vnc: - auth_schemes: none - novncproxy_host: 0.0.0.0 - server_listen: 0.0.0.0 - # This would be set by each compute nodes's ip - # server_proxyclient_address: 127.0.0.1 - spice: - html5proxy_host: 0.0.0.0 - server_listen: 0.0.0.0 - # This would be set by each compute nodes's ip - # server_proxyclient_address: 127.0.0.1 conductor: - workers: 8 - schedule: - workers: 8 - max_attempts: 5 - oslo_policy: - policy_file: /etc/nova/policy.yaml - oslo_concurrency: - lock_path: /tmp/nova - oslo_middleware: - enable_proxy_headers_parsing: true + workers: 4 + database: + idle_timeout: 3600 + connection_recycle_time: 3600 + pool_timeout: 60 glance: num_retries: 8 - ironic: - api_endpoint: null - auth_url: null - neutron: - metadata_proxy_shared_secret: "password" - service_metadata_proxy: True - auth_type: password - auth_version: v3 - cinder: - cross_az_attach: true - catalog_info: volumev3::internalURL key_manager: backend: barbican - database: - max_retries: -1 - api_database: - max_retries: -1 - cell0_database: - max_retries: -1 keystone_authtoken: - service_token_roles: service - service_token_roles_required: true auth_type: password auth_version: v3 memcache_security_strategy: ENCRYPT + service_token_roles: service + service_token_roles_required: true service_type: compute - notifications: - notify_on_state_change: vm_and_task_state - service_user: - auth_type: password - send_service_user_token: true libvirt: - connection_uri: "qemu+unix:///system?socket=/run/libvirt/libvirt-sock" - images_type: qcow2 - images_rbd_pool: vms - images_rbd_ceph_conf: /etc/ceph/ceph.conf - rbd_user: cinder - rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337 - disk_cachemodes: "network=writeback" - hw_disk_discard: unmap cpu_mode: host-model - volume_use_multipath: false # Disabled because multipathd is not configured or running - num_pcie_ports: 16 max_queues: 8 - upgrade_levels: - compute: auto - cache: - enabled: true - backend: dogpile.cache.memcached - wsgi: - api_paste_config: /etc/nova/api-paste.ini - oslo_messaging_notifications: - driver: messagingv2 + num_pcie_ports: 16 + volume_use_multipath: false + os_vif_ovs: + ovsdb_connection: "tcp:127.0.0.1:6640" oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true rabbit_ha_queues: false rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly rabbit_interval_max: 10 # Send more frequent heartbeats and fail unhealthy nodes faster # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + heartbeat_timeout_threshold: 60 + # NOTE (deprecation warning) heartbeat_in_pthread will be deprecated in 2024.2 + heartbeat_in_pthread: True + # Setting lower kombu_reconnect_delay should resolve issue with HA failing when one node is down # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 kombu_reconnect_delay: 0.5 - os_vif_ovs: - ovsdb_connection: tcp:127.0.0.1:6640 - placement: - auth_type: password - auth_version: v3 + schedule: + workers: 4 workarounds: skip_cpu_compare_at_startup: false skip_cpu_compare_on_dest: false - logging: - loggers: - keys: - - root - - oslo.db - - nova - - os.brick - handlers: - keys: - - stdout - - stderr - - "null" - formatters: - keys: - - context - - default - logger_root: - level: INFO - handlers: - - stdout - logger_nova: - level: INFO - handlers: - - stdout - qualname: nova - logger_oslo.db: - level: DEBUG - handlers: - - stdout - qualname: oslo_db.api - logger_os.brick: - level: INFO - handlers: - - stdout - qualname: os.brick - logger_amqp: - level: WARNING - handlers: stderr - qualname: amqp - logger_amqplib: - level: WARNING - handlers: stderr - qualname: amqplib - logger_eventletwsgi: - level: WARNING - handlers: stderr - qualname: eventlet.wsgi.server - logger_sqlalchemy: - level: WARNING - handlers: stderr - qualname: sqlalchemy - logger_boto: - level: WARNING - handlers: stderr - qualname: boto - handler_null: - class: logging.NullHandler - formatter: default - args: () - handler_stdout: - class: StreamHandler - args: (sys.stdout,) - formatter: context - handler_stderr: - class: StreamHandler - args: (sys.stderr,) - formatter: context - formatter_context: - class: oslo_log.formatters.ContextFormatter - datefmt: "%Y-%m-%d %H:%M:%S" - formatter_default: - format: "%(message)s" - datefmt: "%Y-%m-%d %H:%M:%S" + nova_api_uwsgi: + uwsgi: + processes: 4 + threads: 2 + nova_metadata_uwsgi: + uwsgi: + processes: 4 + threads: 2 rabbitmq: policies: [] - enable_iscsi: false - archive_deleted_rows: - purge_deleted_rows: false - until_completion: true - all_cells: false - max_rows: - enabled: False - rows: 1000 - before: - enabled: false - date: 'nil' -# Names of secrets used by bootstrap and environmental checks -secrets: - identity: - admin: nova-keystone-admin - nova: nova-keystone-user - test: nova-keystone-test - oslo_db: - admin: nova-db-admin - nova: nova-db-user - oslo_db_api: - admin: nova-db-api-admin - nova: nova-db-api-user - oslo_db_cell0: - admin: nova-db-cell0-admin - nova: nova-db-cell0-user - oslo_messaging: - admin: nova-rabbitmq-admin - nova: nova-rabbitmq-user - tls: - compute: - osapi: - public: nova-tls-public - internal: nova-tls-api - compute_novnc_proxy: - novncproxy: - public: nova-novncproxy-tls-public - internal: nova-novncproxy-tls-proxy - vencrypt: - internal: nova-novncproxy-vencrypt - compute_metadata: - metadata: - public: metadata-tls-public - internal: metadata-tls-metadata - compute_spice_proxy: - spiceproxy: - public: nova-spiceproxy-tls-public - internal: nova-spiceproxy-tls-proxy - oci_image_registry: - nova: nova-oci-image-registry -# typically overridden by environmental -# values, but should include all endpoints -# required by this chart + endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - nova: - username: nova - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null - oslo_db: - auth: - admin: - username: root - password: password - secret: - tls: - internal: mariadb-tls-direct - nova: - username: nova - password: password - hosts: - default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /nova - scheme: mysql+pymysql - port: - mysql: - default: 3306 - oslo_db_api: - auth: - admin: - username: root - password: password - nova: - username: nova - password: password - hosts: - default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /nova_api - scheme: mysql+pymysql - port: - mysql: - default: 3306 - oslo_db_cell0: - auth: - admin: - username: root - password: password - nova: - username: nova - password: password - hosts: - default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /nova_cell0 - scheme: mysql+pymysql - port: - mysql: - default: 3306 - oslo_messaging: - auth: - admin: - username: rabbitmq - password: password - secret: - tls: - internal: rabbitmq-tls-direct - nova: - username: nova - password: password - statefulset: - replicas: 3 - name: rabbitmq-server - hosts: - default: rabbitmq-nodes - host_fqdn_override: - default: rabbitmq.openstack.svc.cluster.local - path: /nova - scheme: rabbit - port: - amqp: - default: 5672 - http: - default: 15672 - oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null - hosts: - default: memcached - host_fqdn_override: - default: null - port: - memcache: - default: 11211 - identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - nova: - role: admin,service - region_name: RegionOne - username: nova - password: password - project_name: service - user_domain_name: service - project_domain_name: service - # NOTE(portdirect): the neutron user is not managed by the nova chart - # these values should match those set in the neutron chart. - neutron: - region_name: RegionOne - project_name: service - user_domain_name: service - project_domain_name: service - username: neutron - password: password - # NOTE(portdirect): the ironic user is not managed by the nova chart - # these values should match those set in the ironic chart. - ironic: - auth_type: password - auth_version: v3 - region_name: RegionOne - project_name: service - user_domain_name: service - project_domain_name: service - username: ironic - password: password - placement: - role: admin - region_name: RegionOne - username: placement - password: password - project_name: service - user_domain_name: service - project_domain_name: service - cinder: - role: admin,service - region_name: RegionOne - username: cinder - password: password - project_name: service - user_domain_name: service - project_domain_name: service - test: - role: admin - region_name: RegionOne - username: nova-test - password: password - project_name: test - user_domain_name: service - project_domain_name: service - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: http - port: - api: - default: 5000 - public: 80 - internal: 5000 - service: 5000 - image: - name: glance - hosts: - default: glance-api - public: glance - host_fqdn_override: - default: null - path: - default: null - scheme: - default: http + baremetal: port: api: - default: 9292 + default: 6385 + internal: 6385 public: 80 - internal: 9292 - service: 9292 + service: 6385 compute: - name: nova - hosts: - default: nova-api - public: nova - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: "/v2.1/%(tenant_id)s" - scheme: - default: 'http' - service: 'http' port: api: default: 8774 - public: 80 internal: 8774 + public: 80 service: 8774 - novncproxy: - default: 6080 compute_metadata: - name: nova - ip: - # IF blank, set clusterIP and metadata_host dynamically - ingress: null - hosts: - default: nova-metadata - public: metadata - host_fqdn_override: - default: null - path: - default: / - scheme: - default: 'http' port: metadata: default: 8775 - public: 80 internal: 8775 + public: 80 service: 8775 compute_novnc_proxy: - name: nova - hosts: - default: nova-novncproxy - public: novncproxy - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: /vnc_auto.html - scheme: - default: 'http' port: novnc_proxy: default: 6080 - public: 80 internal: 6080 + public: 80 service: 6080 - # This endpoint is only to allow configuring the cert used specifically for - # vencrypt. Specifically, the same CA/issuer needs to be used to sign both - # this cert, and the libvirt/qemu certs. - compute_novnc_vencrypt: - hosts: - default: nova-novncproxy - host_fqdn_override: - default: - commonName: nova-novncproxy - usages: - - client auth compute_spice_proxy: - name: nova - hosts: - default: nova-spiceproxy - public: spiceproxy - host_fqdn_override: - default: null - path: - default: /spice_auto.html - scheme: - default: 'http' port: spice_proxy: default: 6082 public: 6082 - placement: - name: placement - hosts: - default: placement-api - public: placement - host_fqdn_override: - default: null - path: - default: / - scheme: - default: 'http' - service: 'http' + fluentd: + namespace: fluentbit + identity: port: api: - default: 8778 + default: 5000 + internal: 5000 public: 80 - internal: 8778 - service: 8778 + service: 5000 + image: + port: + api: + default: 9292 + internal: 9292 + public: 80 + service: 9292 network: - name: neutron - hosts: - default: neutron-server - public: neutron - host_fqdn_override: - default: null - path: - default: null - scheme: - default: 'http' port: api: default: 9696 - public: 80 internal: 9696 + public: 80 service: 9696 - baremetal: - name: ironic + oslo_db: + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: - default: ironic-api - public: ironic + default: mariadb-cluster-primary + oslo_db_api: host_fqdn_override: - default: null - path: - default: null - scheme: - default: http - port: - api: - default: 6385 - public: 80 - internal: 6385 - service: 6385 - fluentd: - namespace: fluentbit - name: fluentd + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: - default: fluentd-logging + default: mariadb-cluster-primary + oslo_db_cell0: host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 - # NOTE(tp6510): these endpoints allow for things like DNS lookups and ingress - # They are using to enable the Egress K8s network policy. - kube_dns: - namespace: kube-system - name: kubernetes-dns + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: - default: kube-dns + default: mariadb-cluster-primary + oslo_cache: host_fqdn_override: - default: null - path: - default: null - scheme: http - port: - dns: - default: 53 - protocol: UDP - ingress: - namespace: null - name: ingress + default: memcached.openstack.svc.cluster.local hosts: - default: ingress + default: memcached + oslo_messaging: + host_fqdn_override: + default: rabbitmq.openstack.svc.cluster.local + hosts: + default: rabbitmq-nodes + placement: port: - ingress: - default: 80 + api: + default: 8778 + internal: 8778 + public: 80 + service: 8778 + pod: probes: - rpc_timeout: 60 rpc_retries: 3 - compute: - default: - liveness: - enabled: True - params: - periodSeconds: 120 - timeoutSeconds: 120 - readiness: - enabled: True - params: - periodSeconds: 90 - timeoutSeconds: 70 - startup: - enabled: True - params: - failureThreshold: 120 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 70 - api-metadata: - default: - liveness: - enabled: True - params: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 15 - readiness: - enabled: True - params: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 15 - api-osapi: - default: - liveness: - enabled: True - params: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 15 - readiness: - enabled: True - params: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 15 - conductor: - default: - liveness: - enabled: True - params: - initialDelaySeconds: 120 - periodSeconds: 120 - timeoutSeconds: 120 - readiness: - enabled: True - params: - initialDelaySeconds: 80 - periodSeconds: 90 - timeoutSeconds: 70 - novncproxy: - default: - liveness: - enabled: True - params: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 15 - readiness: - enabled: True - params: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 15 - scheduler: - default: - liveness: - enabled: True - params: - initialDelaySeconds: 120 - periodSeconds: 120 - timeoutSeconds: 120 - readiness: - enabled: True - params: - initialDelaySeconds: 80 - periodSeconds: 90 - timeoutSeconds: 70 - compute-spice-proxy: - default: - liveness: - enabled: True - params: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 15 - readiness: - enabled: True - params: - initialDelaySeconds: 30 - periodSeconds: 60 - timeoutSeconds: 15 security_context: nova: - pod: - runAsUser: 42424 container: - nova_compute_init: - readOnlyRootFilesystem: true - runAsUser: 0 - tungstenfabric_compute_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - ceph_perms: - readOnlyRootFilesystem: true - runAsUser: 0 - nova_compute_vnc_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_compute_spice_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false nova_compute: readOnlyRootFilesystem: false - privileged: true - nova_compute_ssh: - privileged: true - runAsUser: 0 - nova_compute_ssh_init: - runAsUser: 0 - nova_api_metadata_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_api: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_osapi: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_conductor: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_novncproxy_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_novncproxy_init_assests: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_novncproxy: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_scheduler: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_spiceproxy_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_spiceproxy_init_assets: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_spiceproxy: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - bootstrap: - pod: - runAsUser: 42424 - container: - nova_wait_for_computes_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - bootstrap: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_cell_setup: - pod: - runAsUser: 42424 - container: - nova_wait_for_computes_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_cell_setup_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_cell_setup: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - archive_deleted_rows: - pod: - runAsUser: 42424 - container: - nova_archive_deleted_rows_init: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - nova_archive_deleted_rows: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - cell_setup: - pod: - runAsUser: 42424 - container: - nova_cell_setup: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false - service_cleaner: - pod: - runAsUser: 42424 - container: - nova_service_cleaner: - readOnlyRootFilesystem: true - allowPrivilegeEscalation: false use_fqdn: - # NOTE: If the option "host" is not specified in nova.conf, the host name - # shown in the hypervisor host is defaulted to the short name of the host. - # Setting the option here to true will cause use $(hostname --fqdn) as the - # host name by default. If the short name is desired $(hostname --short), - # set the option to false. Specifying a host in the nova.conf via the conf: - # section will supersede the value of this option. compute: false - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - weight: - default: 10 - tolerations: - nova: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - mounts: - nova_compute: - init_container: null - nova_compute: - volumeMounts: - volumes: - nova_compute_ironic: - init_container: null - nova_compute_ironic: - volumeMounts: - volumes: - nova_api_metadata: - init_container: null - nova_api_metadata: - volumeMounts: - volumes: - nova_api_osapi: - init_container: null - nova_api_osapi: - volumeMounts: - volumes: - nova_conductor: - init_container: null - nova_conductor: - volumeMounts: - volumes: - nova_scheduler: - init_container: null - nova_scheduler: - volumeMounts: - volumes: - nova_bootstrap: - init_container: null - nova_bootstrap: - volumeMounts: - volumes: - nova_tests: - init_container: null - nova_tests: - volumeMounts: - volumes: - nova_novncproxy: - init_novncproxy: null - nova_novncproxy: - volumeMounts: - volumes: - nova_spiceproxy: - init_spiceproxy: null - nova_spiceproxy: - volumeMounts: - volumes: - nova_db_sync: - nova_db_sync: - volumeMounts: - volumes: - useHostNetwork: - novncproxy: true - replicas: - api_metadata: 1 - compute_ironic: 1 - osapi: 1 - conductor: 1 - scheduler: 1 - novncproxy: 1 - spiceproxy: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - daemonsets: - pod_replacement_strategy: RollingUpdate - compute: - enabled: true - min_ready_seconds: 0 - max_unavailable: 20% - disruption_budget: - metadata: - min_available: 0 - osapi: - min_available: 0 - termination_grace_period: - metadata: - timeout: 30 - osapi: - timeout: 30 - resources: - enabled: true - compute: - requests: - memory: "512Mi" - cpu: "200m" - limits: - memory: "4096Mi" - cpu: "2000m" - compute_ironic: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - api_metadata: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - api: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - conductor: - requests: - memory: "256Mi" - cpu: "200m" - limits: - memory: "4096Mi" - cpu: "2000m" - scheduler: - requests: - memory: "1024Mi" - cpu: "200m" - limits: - memory: "6144Mi" - cpu: "2000m" - ssh: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - novncproxy: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - spiceproxy: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - bootstrap: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - storage_init: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - db_init: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - rabbit_init: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - db_sync: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - archive_deleted_rows: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - db_drop: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_endpoints: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_service: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - ks_user: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - tests: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - cell_setup: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - service_cleaner: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" - image_repo_sync: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "1024Mi" - cpu: "2000m" -network_policy: - nova: - # TODO(lamt): Need to tighten this ingress for security. - ingress: - - {} - egress: - - {} -# NOTE(helm_hook): helm_hook might break for helm2 binary. -# set helm3_hook: false when using the helm2 binary. -helm3_hook: true -health_probe: - logging: - level: ERROR -tls: - identity: false - oslo_messaging: false - oslo_db: false + manifests: - certificates: false - configmap_bin: true - configmap_etc: true - cron_job_cell_setup: true - cron_job_service_cleaner: true - cron_job_archive_deleted_rows: false - daemonset_compute: true - deployment_api_metadata: true - deployment_api_osapi: true - deployment_conductor: true - deployment_novncproxy: true deployment_spiceproxy: false - deployment_scheduler: true ingress_metadata: false ingress_novncproxy: false - ingress_spiceproxy: false ingress_osapi: false - job_bootstrap: true - job_storage_init: false + ingress_spiceproxy: false job_db_init: false - job_db_sync: true - job_db_drop: false - job_image_repo_sync: true job_rabbit_init: false - job_ks_endpoints: true - job_ks_service: true - job_ks_user: true - job_cell_setup: true - pdb_metadata: true - pdb_osapi: true + job_storage_init: false pod_rally_test: false - network_policy: false - secret_db_api: true - secret_db_cell0: true - secret_db: true secret_ingress_tls: false - secret_keystone: true - secret_rabbitmq: true - secret_registry: true service_ingress_metadata: false service_ingress_novncproxy: false - service_ingress_spiceproxy: false service_ingress_osapi: false - service_metadata: true - service_novncproxy: true + service_ingress_spiceproxy: false service_spiceproxy: false - service_osapi: true - statefulset_compute_ironic: false diff --git a/base-helm-configs/octavia/octavia-helm-overrides.yaml b/base-helm-configs/octavia/octavia-helm-overrides.yaml index 45f29d0b1..458fe5eed 100644 --- a/base-helm-configs/octavia/octavia-helm-overrides.yaml +++ b/base-helm-configs/octavia/octavia-helm-overrides.yaml @@ -1,205 +1,74 @@ --- -release_group: null - -labels: - api: - node_selector_key: openstack-control-plane - node_selector_value: enabled - worker: - node_selector_key: openstack-control-plane - node_selector_value: enabled - housekeeping: - node_selector_key: openstack-control-plane - node_selector_value: enabled - health_manager: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - images: tags: - test: "quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0" - bootstrap: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - db_drop: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - rabbit_init: "quay.io/rackspace/rackerlabs-rabbitmq:3.13-management" - ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" - image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - octavia_db_sync: "quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745" - octavia_api: "quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745" - octavia_worker: "quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745" - octavia_housekeeping: "quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745" - octavia_health_manager: "quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745" - octavia_health_manager_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - openvswitch_vswitchd: docker.io/kolla/centos-source-openvswitch-vswitchd:rocky - pull_policy: "IfNotPresent" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -bootstrap: - enabled: true - ks_user: admin - script: | - openstack role create --or-show load-balancer_admin - openstack role create --or-show load-balancer_observer - openstack role create --or-show load-balancer_global_observer - openstack role create --or-show load-balancer_quota_admin - openstack role create --or-show load-balancer_member - -network: - api: - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 30826 + bootstrap: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + db_drop: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + db_init: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + dep_check: 'quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0' + image_repo_sync: 'quay.io/rackspace/rackerlabs-docker:17.07.0' + ks_endpoints: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + ks_service: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + ks_user: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + octavia_api: 'quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745' + octavia_db_sync: 'quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745' + octavia_health_manager: 'quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745' + octavia_health_manager_init: 'quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy' + octavia_housekeeping: 'quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745' + octavia_worker: 'quay.io/rackspace/rackerlabs-octavia-ovn:2024.1-ubuntu_jammy-1737651745' + openvswitch_vswitchd: 'docker.io/kolla/centos-source-openvswitch-vswitchd:rocky' + rabbit_init: 'quay.io/rackspace/rackerlabs-rabbitmq:3.13-management' + test: 'quay.io/rackspace/rackerlabs-xrally-openstack:2.0.0' dependencies: - dynamic: - common: - local_image_registry: - jobs: - - heat-image-repo-sync - services: - - endpoint: node - service: local_image_registry static: api: jobs: - octavia-db-sync - octavia-ks-user - octavia-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_cache - - endpoint: internal - service: network - worker: + db_sync: + jobs: [] + health_manager: jobs: - octavia-db-sync - octavia-ks-user - octavia-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_cache - - endpoint: internal - service: network - - endpoint: internal - service: load_balancer housekeeping: jobs: - octavia-db-sync - octavia-ks-user - octavia-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_cache - - endpoint: internal - service: network - - endpoint: internal - service: load_balancer - health_manager: + worker: jobs: - octavia-db-sync - octavia-ks-user - octavia-ks-endpoints - services: - - endpoint: internal - service: oslo_db - - endpoint: internal - service: identity - - endpoint: internal - service: oslo_messaging - - endpoint: internal - service: oslo_cache - - endpoint: internal - service: network - - endpoint: internal - service: load_balancer - db_init: - services: - - endpoint: internal - service: oslo_db - db_sync: - jobs: [] - services: - - endpoint: internal - service: oslo_db - ks_endpoints: - jobs: - - octavia-ks-service - services: - - endpoint: internal - service: identity - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity - rabbit_init: - services: - - endpoint: internal - service: oslo_messaging - image_repo_sync: - services: - - endpoint: internal - service: local_image_registry conf: + logging: + logger_root: + handlers: + - stdout + level: INFO octavia: - DEFAULT: - log_config_append: /etc/octavia/logging.conf api_settings: default_provider_driver: amphora enabled_provider_drivers: >- ovn: "The Octavia OVN driver", amphora: "The Octavia Amphora driver" - api_handler: queue_producer - bind_host: 0.0.0.0 + certificates: + endpoint_type: internalURL + cinder: + endpoint_type: internalURL + valid_interfaces: internal + controller_worker: + loadbalancer_topology: ACTIVE_STANDBY + workers: 4 driver_agent: enabled_provider_agents: ovn - database: - max_retries: -1 - health_manager: - bind_port: 5555 - bind_ip: 0.0.0.0 - controller_ip_port_list: 0.0.0.0:5555 - heartbeat_key: insecure + glance: + endpoint_type: internalURL + valid_interfaces: internal keystone_authtoken: service_token_roles: service service_token_roles_required: true @@ -208,583 +77,128 @@ conf: memcache_security_strategy: ENCRYPT service_type: load-balancer valid_interfaces: internal - certificates: - endpoint_type: internalURL - ca_private_key_passphrase: foobar - ca_private_key: /etc/octavia/certs/private/cakey.pem - ca_certificate: /etc/octavia/certs/ca_01.pem - cinder: - endpoint_type: internalURL - valid_interfaces: internal - glance: - endpoint_type: internalURL - valid_interfaces: internal neutron: endpoint_type: internalURL valid_interfaces: internal - haproxy_amphora: - server_ca: /etc/octavia/certs/ca_01.pem - client_cert: /etc/octavia/certs/client.pem - base_path: /var/lib/octavia - base_cert_dir: /var/lib/octavia/certs - controller_worker: - amp_image_owner_id: null - amp_secgroup_list: null - amp_flavor_id: null - amp_boot_network_list: null - amp_ssh_key_name: octavia_ssh_key - amp_image_tag: amphora - network_driver: allowed_address_pairs_driver - compute_driver: compute_nova_driver - amphora_driver: amphora_haproxy_rest_driver - workers: 8 - amp_active_retries: 100 - amp_active_wait_sec: 2 - loadbalancer_topology: ACTIVE_STANDBY - oslo_messaging: - topic: octavia_prov - rpc_thread_pool_size: 2 - oslo_messaging_notifications: - driver: messagingv2 + nova: + enable_anti_affinity: 'True' + endpoint_type: internalURL oslo_concurrency: lock_path: /tmp/octavia oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true rabbit_ha_queues: false rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly rabbit_interval_max: 10 # Send more frequent heartbeats and fail unhealthy nodes faster # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + heartbeat_timeout_threshold: 60 + # NOTE (deprecation warning) heartbeat_in_pthread will be deprecated in 2024.2 + heartbeat_in_pthread: True + # Setting lower kombu_reconnect_delay should resolve issue with HA failing when one node is down # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 kombu_reconnect_delay: 0.5 - house_keeping: - load_balancer_expiry_age: 3600 - amphora_expiry_age: 3600 + ovn: + ovn_nb_connection: 'tcp:127.0.0.1:6641' + ovn_sb_connection: 'tcp:127.0.0.1:6642' service_auth: - auth_type: password - cafile: "" - auth_version: v3 - memcache_security_strategy: ENCRYPT insecure: true - ovn: - ovn_sb_connection: tcp:127.0.0.1:6642 - ovn_nb_connection: tcp:127.0.0.1:6641 - nova: - enable_anti_affinity: "True" - endpoint_type: internalURL - - logging: - loggers: - keys: - - root - - octavia - handlers: - keys: - - stdout - - stderr - - "null" - formatters: - keys: - - context - - default - logger_root: - level: INFO - handlers: - - stdout - logger_octavia: - level: WARNING - handlers: - - stdout - qualname: octavia - logger_amqp: - level: WARNING - handlers: stderr - qualname: amqp - logger_amqplib: - level: WARNING - handlers: stderr - qualname: amqplib - logger_eventletwsgi: - level: WARNING - handlers: stderr - qualname: eventlet.wsgi.server - logger_sqlalchemy: - level: WARNING - handlers: stderr - qualname: sqlalchemy - logger_boto: - level: WARNING - handlers: stderr - qualname: boto - handler_null: - class: logging.NullHandler - formatter: default - args: () - handler_stdout: - class: StreamHandler - args: (sys.stdout,) - formatter: context - handler_stderr: - class: StreamHandler - args: (sys.stderr,) - formatter: context - formatter_context: - class: oslo_log.formatters.ContextFormatter - formatter_default: - format: "%(message)s" - rabbitmq: - # NOTE(rk760n): adding rmq policy to mirror messages from notification queues and set expiration time for the ones - policies: - - vhost: "octavia" - name: "ha_ttl_octavia" - definition: - # mirror messges to other nodes in rmq cluster - ha-mode: "all" - ha-sync-mode: "automatic" - # 70s - message-ttl: 70000 - priority: 0 - apply-to: all - pattern: '(notifications)\.' - -secrets: - identity: - admin: octavia-keystone-admin - octavia: octavia-keystone-user - test: octavia-keystone-test - oslo_db: - admin: octavia-db-admin - octavia: octavia-db-user - oslo_messaging: - admin: octavia-rabbitmq-admin - octavia: octavia-rabbitmq-user - tls: - load_balancer: - api: - public: octavia-tls-public - oci_image_registry: - octavia: octavia-oci-image-registry + octavia_api_uwsgi: + uwsgi: + processes: 4 + threads: 2 endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - octavia: - username: octavia - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null + fluentd: + namespace: fluentbit identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - octavia: - role: admin - region_name: RegionOne - username: octavia - password: password - project_name: service - user_domain_name: service - project_domain_name: service - test: - role: admin - region_name: RegionOne - username: test - password: password - project_name: test - user_domain_name: service - project_domain_name: service - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: /v3 - scheme: - default: http - service: http port: api: admin: 5000 default: 5000 - public: 80 - # NOTE(portdirect): to retain portability across images, and allow - # running under a unprivileged user simply, we default to a port > 1000. internal: 5000 + public: 80 service: 5000 + scheme: + default: http + service: http load_balancer: - name: octavia hosts: - internal: octavia-api default: octavia - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: null + internal: octavia-api + port: + api: + default: 9876 + internal: 9876 + public: 80 + service: 9876 scheme: default: http service: http + network: port: api: - default: 9876 + default: 9696 + internal: 9696 public: 80 - internal: 9876 - service: 9876 + service: 9696 + scheme: + default: http + service: http oslo_db: - auth: - admin: - username: root - password: password - octavia: - username: octavia - password: password + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /octavia - scheme: mysql+pymysql - port: - mysql: - default: 3306 oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null + host_fqdn_override: + default: memcached.openstack.svc.cluster.local hosts: default: memcached - host_fqdn_override: - default: null - port: - memcache: - default: 11211 oslo_messaging: - auth: - admin: - username: rabbitmq - password: password - octavia: - username: octavia - password: password - statefulset: - replicas: 3 - name: rabbitmq-server - hosts: - default: rabbitmq-nodes host_fqdn_override: default: rabbitmq.openstack.svc.cluster.local - path: /octavia - scheme: rabbit - port: - amqp: - default: 5672 - http: - default: 15672 - network: - name: neutron - hosts: - default: neutron-server - public: neutron - host_fqdn_override: - default: null - # NOTE(portdirect): this chart supports TLS for fqdn over-ridden public - # endpoints using the following format: - # public: - # host: null - # tls: - # crt: null - # key: null - path: - default: null - scheme: - default: 'http' - service: 'http' - port: - api: - default: 9696 - public: 80 - internal: 9696 - service: 9696 - fluentd: - namespace: fluentbit - name: fluentd hosts: - default: fluentd-logging - host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 + default: rabbitmq-nodes pod: - user: - octavia: - uid: 42424 affinity: anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname weight: default: 10 - tolerations: - octavia: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule mounts: octavia_api: init_container: null octavia_api: volumeMounts: - - name: pod-run-octavia - mountPath: /var/run/octavia + - name: pod-run-octavia + mountPath: /var/run/octavia volumes: - - name: pod-run-octavia - emptyDir: {} + - name: pod-run-octavia + emptyDir: {} octavia_worker: init_container: null octavia_worker: volumeMounts: - - name: pod-run-octavia - mountPath: /var/run/octavia - volumes: - - name: pod-run-octavia - emptyDir: {} - octavia_housekeeping: - init_container: null - octavia_housekeeping: - volumeMounts: - volumes: - octavia_health_manager: - init_container: null - octavia_health_manager: - volumeMounts: - volumes: - octavia_bootstrap: - init_container: null - octavia_bootstrap: - volumeMounts: + - name: pod-run-octavia + mountPath: /var/run/octavia volumes: + - name: pod-run-octavia + emptyDir: {} octavia_driver_agent: init_container: null octavia_bootstrap: volumeMounts: volumes: - replicas: - api: 1 - worker: 1 - housekeeping: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - daemonsets: - pod_replacement_strategy: RollingUpdate - health_manager: - enabled: true - min_ready_seconds: 0 - max_unavailable: 1 - disruption_budget: - api: - min_available: 0 - termination_grace_period: - api: - timeout: 30 - resources: - enabled: true - api: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - worker: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - housekeeping: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - health_manager: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - cpu: "2000m" - jobs: - bootstrap: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - rabbit_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_drop: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_endpoints: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_service: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_user: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - tests: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - image_repo_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - -network_policy: - octavia: - ingress: - - {} manifests: - configmap_bin: true - configmap_etc: true - daemonset_health_manager: true - deployment_api: true - deployment_worker: true - deployment_housekeeping: true ingress_api: false - job_bootstrap: true job_db_init: false - job_db_sync: true - job_db_drop: false - job_image_repo_sync: true job_rabbit_init: false - job_ks_endpoints: true - job_ks_service: true - job_ks_user: true - pdb_api: true - pod_rally_test: false - network_policy: false - secret_credential_keys: true - secret_db: true secret_ingress_tls: false - secret_keystone: true - secret_rabbitmq: true - secret_registry: true service_ingress_api: false - service_api: true diff --git a/base-helm-configs/placement/placement-helm-overrides.yaml b/base-helm-configs/placement/placement-helm-overrides.yaml index 329de1432..e397eae11 100644 --- a/base-helm-configs/placement/placement-helm-overrides.yaml +++ b/base-helm-configs/placement/placement-helm-overrides.yaml @@ -1,513 +1,96 @@ -release_group: null - -labels: - api: - node_selector_key: openstack-control-plane - node_selector_value: enabled - job: - node_selector_key: openstack-control-plane - node_selector_value: enabled - +--- images: - pull_policy: IfNotPresent tags: - placement: "quay.io/rackspace/rackerlabs-placement:2024.1-ubuntu_jammy" - ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" db_drop: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" - placement_db_sync: "quay.io/rackspace/rackerlabs-placement:2024.1-ubuntu_jammy" + db_init: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" dep_check: "quay.io/rackspace/rackerlabs-kubernetes-entrypoint:v1.0.0" image_repo_sync: "quay.io/rackspace/rackerlabs-docker:17.07.0" - local_registry: - active: false - exclude: - - dep_check - - image_repo_sync - -network: - api: - port: 8778 - ingress: - public: true - classes: - namespace: "nginx" - cluster: "nginx-openstack" - annotations: - nginx.ingress.kubernetes.io/rewrite-target: / - external_policy_local: false - node_port: - enabled: false - port: 30778 + ks_endpoints: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_service: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + ks_user: "quay.io/rackspace/rackerlabs-heat:2024.1-ubuntu_jammy" + placement: "quay.io/rackspace/rackerlabs-placement:2024.1-ubuntu_jammy" + placement_db_sync: "quay.io/rackspace/rackerlabs-placement:2024.1-ubuntu_jammy" conf: - software: - apache2: - binary: apache2 - start_parameters: -DFOREGROUND - # Enable/Disable modules - # a2enmod: - # - headers - # - rewrite - # a2dismod: - # - status - a2enmod: null - a2dismod: null - policy: {} + logging: + logger_root: + handlers: + - stdout + level: INFO placement: - DEFAULT: - debug: false - use_syslog: false - log_config_append: /etc/placement/logging.conf - placement: - randomize_allocation_candidates: true - placement_database: - connection: null - max_retries: -1 keystone_authtoken: - service_token_roles: service - service_token_roles_required: true - auth_version: v3 auth_type: password + auth_version: v3 memcache_security_strategy: ENCRYPT + service_token_roles: service + service_token_roles_required: true service_type: placement - oslo_messaging_notifications: - driver: messagingv2 oslo_concurrency: lock_path: /tmp/octavia + oslo_messaging_notifications: + driver: messagingv2 oslo_messaging_rabbit: amqp_durable_queues: false - # We define use of quorum queues via kustomize but this was enabling HA queues instead - # ha_queues are deprecated, explicitly set to false and set quorum_queue true rabbit_ha_queues: false rabbit_quorum_queue: true - # TODO: Not available until 2024.1, but once it is, we want to enable these! - # new feature ref; https://docs.openstack.org/releasenotes/oslo.messaging/2024.1.html rabbit_transient_quorum_queue: false use_queue_manager: false - # Reconnect after a node outage more quickly rabbit_interval_max: 10 # Send more frequent heartbeats and fail unhealthy nodes faster # heartbeat_timeout / heartbeat_rate / 2.0 = 30 / 3 / 2.0 = 5 # https://opendev.org/openstack/oslo.messaging/commit/36fb5bceabe08a982ebd52e4a8f005cd26fdf6b8 heartbeat_rate: 3 - heartbeat_timeout_threshold: 30 - # Setting lower kombu_reconnect_delay should resolve isssue with HA failing when one node is down + heartbeat_timeout_threshold: 60 + # NOTE (deprecation warning) heartbeat_in_pthread will be deprecated in 2024.2 + heartbeat_in_pthread: True + # Setting lower kombu_reconnect_delay should resolve issue with HA failing when one node is down # https://lists.openstack.org/pipermail/openstack-discuss/2023-April/033314.html # https://review.opendev.org/c/openstack/oslo.messaging/+/866617 kombu_reconnect_delay: 0.5 - logging: - loggers: - keys: - - root - - placement - handlers: - keys: - - stdout - - stderr - - "null" - formatters: - keys: - - context - - default - logger_root: - level: INFO - handlers: - - stdout - logger_placement: - level: INFO - handlers: - - stdout - qualname: placement - logger_amqp: - level: WARNING - handlers: stderr - qualname: amqp - logger_amqplib: - level: WARNING - handlers: stderr - qualname: amqplib - logger_eventletwsgi: - level: WARNING - handlers: stderr - qualname: eventlet.wsgi.server - logger_sqlalchemy: - level: WARNING - handlers: stderr - qualname: sqlalchemy - logger_boto: - level: WARNING - handlers: stderr - qualname: boto - handler_null: - class: logging.NullHandler - formatter: default - args: () - handler_stdout: - class: StreamHandler - args: (sys.stdout,) - formatter: context - handler_stderr: - class: StreamHandler - args: (sys.stderr,) - formatter: context - formatter_context: - class: oslo_log.formatters.ContextFormatter - datefmt: "%Y-%m-%d %H:%M:%S" - formatter_default: - format: "%(message)s" - datefmt: "%Y-%m-%d %H:%M:%S" - wsgi_placement: | - Listen 0.0.0.0:{{ tuple "placement" "service" "api" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }} - LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined - LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy - SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded - CustomLog /dev/stdout combined env=!forwarded - CustomLog /dev/stdout proxy env=forwarded - - WSGIDaemonProcess placement-api processes=4 threads=1 user=placement group=placement display-name=%{GROUP} - WSGIProcessGroup placement-api - WSGIScriptAlias / /var/www/cgi-bin/placement/placement-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /dev/stdout - SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded - CustomLog /dev/stdout combined env=!forwarded - CustomLog /dev/stdout proxy env=forwarded - - Alias /placement /var/www/cgi-bin/placement/placement-api - - SetHandler wsgi-script - Options +ExecCGI - WSGIProcessGroup placement-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - + placement: + randomize_allocation_candidates: true + placement_database: + idle_timeout: 3600 + connection_recycle_time: 3600 + pool_timeout: 60 + placement_api_uwsgi: + uwsgi: + processes: 4 + threads: 2 endpoints: - cluster_domain_suffix: cluster.local - local_image_registry: - name: docker-registry - namespace: docker-registry - hosts: - default: localhost - internal: docker-registry - node: localhost - host_fqdn_override: - default: null - port: - registry: - node: 5000 - oci_image_registry: - name: oci-image-registry - namespace: oci-image-registry - auth: - enabled: false - placement: - username: placement - password: password - hosts: - default: localhost - host_fqdn_override: - default: null - port: - registry: - default: null - oslo_db: - auth: - admin: - username: root - password: password - secret: - tls: - internal: mariadb-tls-direct - placement: - username: placement - password: password - # NOTE: This should be the username/password used to access the nova_api - # database. This is required only if database migration from nova to - # placement is desired. - nova_api: - username: nova - password: password - hosts: - default: mariadb-cluster-primary - host_fqdn_override: - default: null - path: /placement - scheme: mysql+pymysql - port: - mysql: - default: 3306 - oslo_cache: - auth: - # NOTE(portdirect): this is used to define the value for keystone - # authtoken cache encryption key, if not set it will be populated - # automatically with a random value, but to take advantage of - # this feature all services should be set to use the same key, - # and memcache service. - memcache_secret_key: null - hosts: - default: memcached - host_fqdn_override: - default: null - port: - memcache: - default: 11211 + fluentd: + namespace: fluentbit identity: - name: keystone - auth: - admin: - region_name: RegionOne - username: admin - password: password - project_name: admin - user_domain_name: default - project_domain_name: default - placement: - role: admin - region_name: RegionOne - username: placement - password: password - project_name: service - user_domain_name: service - project_domain_name: service - hosts: - default: keystone - internal: keystone-api - host_fqdn_override: - default: null - path: - default: /v3 - scheme: - default: http port: api: default: 5000 internal: 5000 public: 80 service: 5000 - placement: - name: placement + oslo_db: + host_fqdn_override: + default: mariadb-cluster-primary.openstack.svc.cluster.local hosts: - default: placement-api - public: placement + default: mariadb-cluster-primary + oslo_cache: host_fqdn_override: - default: null - path: - default: / - scheme: - default: 'http' - service: 'http' - port: - api: - default: 8778 - public: 80 - internal: 8778 - service: 8778 - fluentd: - namespace: fluentbit - name: fluentd + default: memcached.openstack.svc.cluster.local hosts: - default: fluentd-logging + default: memcached + oslo_messaging: host_fqdn_override: - default: null - path: - default: null - scheme: 'http' - port: - service: - default: 24224 - metrics: - default: 24220 - -pod: - security_context: - placement: - pod: - runAsUser: 42424 - container: - placement_api: - readOnlyRootFilesystem: false - runAsUser: 0 - placement_mysql_migration: - readOnlyRootFilesystem: false - runAsUser: 0 - affinity: - anti: - type: - default: preferredDuringSchedulingIgnoredDuringExecution - topologyKey: - default: kubernetes.io/hostname - tolerations: - placement: - enabled: false - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule - mounts: - placement: - init_container: null - placement: - volumeMounts: - volumes: - replicas: - api: 1 - lifecycle: - upgrades: - deployments: - revision_history: 3 - pod_replacement_strategy: RollingUpdate - rolling_update: - max_unavailable: 1 - max_surge: 3 - disruption_budget: - api: - min_available: 0 - termination_grace_period: - api: - timeout: 30 - resources: - enabled: true - api: - requests: - memory: "128Mi" - cpu: "100m" - limits: - memory: "2048Mi" - cpu: "2000m" - jobs: - db_init: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_sync: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - db_drop: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - ks_endpoints: - requests: - memory: "64Mi" - limits: - memory: "4096Mi" - ks_service: - requests: - memory: "64Mi" - limits: - memory: "4096Mi" - ks_user: - requests: - memory: "64Mi" - cpu: "100m" - limits: - memory: "4096Mi" - -secrets: - identity: - admin: placement-keystone-admin - placement: placement-keystone-user - oslo_db: - admin: placement-db-admin - placement: placement-db-user - tls: - placement: - api: - public: placement-tls-public - internal: placement-tls-api - oci_image_registry: - placement: placement-oci-image-registry + default: rabbitmq.openstack.svc.cluster.local + hosts: + default: rabbitmq-nodes dependencies: - dynamic: - common: - local_image_registry: - jobs: - - image-repo-sync - services: - - endpoint: node - service: local_image_registry static: - api: - jobs: - - placement-db-sync - - placement-ks-service - - placement-ks-user - - placement-ks-endpoints - ks_endpoints: - jobs: - - placement-ks-user - - placement-ks-service - services: - - endpoint: internal - service: identity - ks_service: - services: - - endpoint: internal - service: identity - ks_user: - services: - - endpoint: internal - service: identity - db_drop: - services: - - endpoint: internal - service: oslo_db - db_init: - services: - - endpoint: internal - service: oslo_db db_sync: jobs: [] - # - placement-db-init - services: - - endpoint: internal - service: oslo_db - -# NOTE(helm_hook): helm_hook might break for helm2 binary. -# set helm3_hook: false when using the helm2 binary. -helm3_hook: true - -tls: - identity: false - oslo_messaging: false - oslo_db: false manifests: - certificates: false - configmap_bin: true - configmap_etc: true - deployment: true - job_image_repo_sync: true + ingress: false job_db_init: false - job_db_sync: true - job_db_drop: false - job_ks_endpoints: true - job_ks_service: true - job_ks_user: true - network_policy: false - secret_db: true secret_ingress_tls: false - secret_registry: true - pdb: true - ingress: false - secret_keystone: true service_ingress: false - service: true diff --git a/base-helm-configs/prometheus-snmp-exporter/values.yaml b/base-helm-configs/prometheus-snmp-exporter/values.yaml new file mode 100644 index 000000000..86a524e67 --- /dev/null +++ b/base-helm-configs/prometheus-snmp-exporter/values.yaml @@ -0,0 +1,268 @@ +restartPolicy: Always + +kind: Deployment + +image: + repository: quay.io/prometheus/snmp-exporter + # if not set appVersion field from Chart.yaml is used + tag: "" + pullPolicy: IfNotPresent + +imagePullSecrets: [] +nodeSelector: {} +tolerations: [] +affinity: {} +topologySpreadConstraints: [] + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +## Provide a namespace to substitude for the namespace on resources +namespaceOverride: "" + +## Security context to be added to snmp-exporter pods +securityContext: {} + # fsGroup: 1000 + # runAsUser: 1000 + # runAsNonRoot: true + +## Security context to be added to snmp-exporter containers +containerSecurityContext: + runAsNonRoot: true + runAsUser: 1000 + readOnlyRootFilesystem: true + +## Additional labels to add to all resources +customLabels: {} + # app: snmp-exporter + +# config: + +extraConfigmapMounts: [] + # - name: snmp-exporter-configmap + # mountPath: /run/secrets/snmp-exporter + # subPath: snmp.yaml # (optional) + # configMap: snmp-exporter-configmap-configmap + # readOnly: true + # defaultMode: 420 + +## Additional init containers +# These will be added to the prometheus-snmp-exporter pod. +extraInitContainers: [] + # - name: init-myservice + # image: busybox:1.28 + # command: [ 'sh', '-c', "sleep 10; done" ] + +## Additional secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /run/secrets/snmp-exporter + # secretName: snmp-exporter-secret-files + # readOnly: true + # defaultMode: 420 + +# Additional volumes, e.g. for secrets used in an extraContainer +extraVolumes: [] + +# Additional volume mounts for snmp-exporter container +extraVolumeMounts: [] + +## For RBAC support: +rbac: + # Specifies whether RBAC resources should be created + create: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +resources: {} + # limits: + # memory: 300Mi + # requests: + # memory: 50Mi + +livenessProbe: + httpGet: + path: / + port: http +readinessProbe: + httpGet: + path: / + port: http + +service: + annotations: {} + type: ClusterIP + port: 9116 + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + +## An Ingress resource can provide name-based virtual hosting and TLS +## termination among other things for CouchDB deployments which are accessed +## from outside the Kubernetes cluster. +## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ +ingress: + enabled: false + ## Class name can be set since version 1.18. + className: "" + ## Path type is required since version 1.18. Default: ImplementationSpecific. + pathType: "" + hosts: [] + # - chart-example.local + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + tls: [] + # Secrets must be manually created in the namespace. + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +podAnnotations: {} + +extraArgs: [] +# --history.limit=1000 + +envFrom: [] +# - secretRef: +# name: name-of-secret + +replicas: 1 + +# Uncomment to set a specific revisionHistoryLimit. Defaults to 10 as per Kubernetes API if not set. +# revisionHistoryLimit: 3 + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/jimmidyson/configmap-reload +## +configmapReload: + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: quay.io/prometheus-operator/prometheus-config-reloader + tag: v0.79.2 + pullPolicy: IfNotPresent + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +# Enable this if you're using https://github.com/prometheus-operator/prometheus-operator +# A service monitor will be created per each item in serviceMonitor.params[] +serviceMonitor: + enabled: false + # Default value is the namespace the release is deployed to + # namespace: monitoring + + path: /snmp + + # Fall back to the prometheus default unless specified + # interval: 10s + scrapeTimeout: 10s + module: + - if_mib + # Auth used for scraping. + auth: + - public_v2 + + # Relabelings dynamically rewrite the label set of a target before it gets scraped. + # Set if defined unless overriden by params.relabelings. + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.RelabelConfig + relabelings: [] + # - sourceLabels: [__address__] + # targetLabel: __param_target + # - sourceLabels: [__param_target] + # targetLabel: instance + + # Metric relabeling is applied to samples as the last step before ingestion. + # Set if defined unless overriden by params.additionalMetricsRelabels. + # This sets fixed relabel configs with action 'replace'. + additionalMetricsRelabels: {} + # targetLabel1: replacementValue1 + # targetLabel2: replacementValue2 + + # Metric relabeling is applied to samples as the last step before ingestion. + # Set if defined unless overridden by params.additionalMetricsRelabelConfigs. + # This allows setting arbitrary relabel configs. + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.RelabelConfig + additionalMetricsRelabelConfigs: [] + # - sourceLabels: [__name__] + # targetLabel: __name__ + # action: replace + # regex: (.*) + # replacement: prefix_$1 + + # Label for selecting service monitors as set in Prometheus CRD. + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.PrometheusSpec + selector: + prometheus: kube-prometheus + + # Retain the job and instance labels of the metrics retrieved by the snmp-exporter + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Endpoint + honorLabels: true + + params: [] + # Human readable URL that will appear in Prometheus / AlertManager + # - name: localhost + # The target that snmp will scrape + # target: 127.0.0.1 + # Module used for scraping. Overrides value set in serviceMonitor.module + # module: + # - if_mib + # Map of labels for ServiceMonitor. Overrides value set in serviceMonitor.selector + # labels: {} + # release: kube-prometheus-stack + # Scraping interval. Overrides value set in serviceMonitor.interval + # interval: 30s + # Scrape timeout. Overrides value set in serviceMonitor.scrapeTimeout + # scrapeTimeout: 30s + # Relabelings. Overrides value set in serviceMonitor.relabelings + # relabelings: [] + # Map of metric labels and values to add. Overrides value set in serviceMonitor.additionalMetricsRelabels + # This sets fixed relabel configs with action 'replace'. + # additionalMetricsRelabels: {} + # Metrics relabelings. Overrides value set in serviceMonitor.additionalMetricsRelabelConfigs + # additionalMetricsRelabelConfigs: [] + + ## If true, a ServiceMonitor CRD is created for snmp-exporter itself + ## + selfMonitor: + enabled: true + additionalMetricsRelabels: {} + additionalRelabeling: [] + labels: {} + path: /metrics + scheme: http + tlsConfig: {} + interval: 30s + scrapeTimeout: 30s + +# Extra manifests to deploy as an array +extraManifests: [] + # - | + # apiVersion: v1 + # kind: ConfigMap + # metadata: + # labels: + # name: prometheus-extra + # data: + # extra-data: "value" + +strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + type: RollingUpdate diff --git a/base-helm-configs/prometheus/alerting_rules.yaml b/base-helm-configs/prometheus/alerting_rules.yaml index 6117bd8ad..46d1a94d9 100644 --- a/base-helm-configs/prometheus/alerting_rules.yaml +++ b/base-helm-configs/prometheus/alerting_rules.yaml @@ -1,4 +1,50 @@ additionalPrometheusRulesMap: + openstack-resource-alerts: + groups: + - name: Compute Resource Alerts + rules: + - alert: AbnormalInstanceFailures + expr: count(count(last_over_time(openstack_nova_server_status{status=~"BUILD|ERROR"}[60m])) by (id)) /count(count(last_over_time(openstack_nova_server_status{status="ACTIVE"}[60m])) by (id)) * 100 >= 20 + labels: + severity: critical + annotations: + summary: "Instance build failure rate is abnormally high" + description: "This indicates a major problem building compute instances view logs and take action to resolve the build failures" + - alert: InstancesStuckInFailureState + expr: count(openstack_nova_server_status{status=~"BUILD|ERROR"}) > 0 + for: 90m + labels: + severity: warning + annotations: + summary: "Instances stuck in failure state for a prolonged period" + description: "There are instance stuck in a building or error state for a prolonged period that need be cleaned up" + - name: Image Resource Alerts + rules: + - alert: AbnormalImageFailures + expr: count(count(last_over_time(openstack_glance_image_created_at{status!~"active|deactivated"}[60m])) by (id)) / count(count(last_over_time(openstack_glance_image_created_at{status="active"}[60m])) by (id)) * 100 >= 20 + labels: + severity: critical + annotations: + summary: "Image create failure rate is abnormally high" + description: "This indicates a major problem creating images view logs and take action to resolve the build failures" + - alert: ImagesStuckInFailureState + expr: count(openstack_glance_image_created_at{status="failure"}) > 0 + for: 90m + labels: + severity: warning + annotations: + summary: "Images stuck in failure state for a prolonged period" + description: "There are images stuck in a failures state for a prolonged period that need be cleaned up" + - name: Octavia Resource Alerts + rules: + - alert: LoadbalancersInError + expr: count(openstack_loadbalancer_loadbalancer_status{provisioning_status="ERROR"}) > 0 + for: 90m + labels: + severity: critical + annotations: + summary: "Loadbalancer stuck in error state for a prolonged period" + description: "This may indicate a potential problem with failover and/or healthmanager services. This could also indicate other problems building load balancers in general" rabbitmq-alerts: groups: - name: Prometheus Alerts @@ -141,3 +187,15 @@ additionalPrometheusRulesMap: annotations: summary: Second successive MariaDB backup not successful within 1 hour of scheduled run description: "Second successive MariaDB backup not successful within 1 hour of scheduled run" + fluentbit-serviceMonitor-alert: + groups: + - name: fluentbit serviceMonitor alert + rules: + - alert: MissingFluentbitServiceMonitor + expr: count(up{job="fluentbit-fluent-bit"}) == 0 + for: 1m + labels: + severity: critical + annotations: + summary: "ServiceMonitor 'fluentbit-fluent-bit' is either down or missing." + description: "Check if the Fluentbit ServiceMonitor is properly configured and deployed." diff --git a/base-helm-configs/sealed-secrets/helm-sealed-secrets-overrides.yaml b/base-helm-configs/sealed-secrets/helm-sealed-secrets-overrides.yaml new file mode 100644 index 000000000..ea6bba1a5 --- /dev/null +++ b/base-helm-configs/sealed-secrets/helm-sealed-secrets-overrides.yaml @@ -0,0 +1,22 @@ +--- +fullnameOverride: "sealed-secrets-controller" + +secretName: "sealed-secrets-key" + +podSecurityContext: + fsGroup: 65534 + +affinity: + enableAntiAffinity: true + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: In + values: + - worker + +rbac: + clusterRoleName: "secrets-unsealer" + namespacedRolesName: "secrets-unsealer" diff --git a/base-helm-configs/topolvm/helm-topolvm-overrides.yaml b/base-helm-configs/topolvm/helm-topolvm-overrides.yaml new file mode 100644 index 000000000..b839deed3 --- /dev/null +++ b/base-helm-configs/topolvm/helm-topolvm-overrides.yaml @@ -0,0 +1,739 @@ +# useLegacy -- If true, the legacy plugin name and legacy custom resource group is used(topolvm.cybozu.com). +useLegacy: false + +image: + # image.repository -- TopoLVM image repository to use. + repository: ghcr.io/topolvm/topolvm-with-sidecar + + # image.tag -- TopoLVM image tag to use. + # @default -- `{{ .Chart.AppVersion }}` + tag: # 0.18.1 + + # image.pullPolicy -- TopoLVM image pullPolicy. + pullPolicy: # Always + + # image.pullSecrets -- List of imagePullSecrets. + pullSecrets: [] + + csi: + # image.csi.nodeDriverRegistrar -- Specify csi-node-driver-registrar: image. + # If not specified, `ghcr.io/topolvm/topolvm-with-sidecar:{{ .Values.image.tag }}` will be used. + nodeDriverRegistrar: # registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.2.0 + + # image.csi.csiProvisioner -- Specify csi-provisioner image. + # If not specified, `ghcr.io/topolvm/topolvm-with-sidecar:{{ .Values.image.tag }}` will be used. + csiProvisioner: # registry.k8s.io/sig-storage/csi-provisioner:v2.2.1 + + # image.csi.csiResizer -- Specify csi-resizer image. + # If not specified, `ghcr.io/topolvm/topolvm-with-sidecar:{{ .Values.image.tag }}` will be used. + csiResizer: # registry.k8s.io/sig-storage/csi-resizer:v1.2.0 + + # image.csi.csiSnapshotter -- Specify csi-snapshot image. + # If not specified, `ghcr.io/topolvm/topolvm-with-sidecar:{{ .Values.image.tag }}` will be used. + csiSnapshotter: # registry.k8s.io/sig-storage/csi-snapshotter:v5.0.1 + + # image.csi.livenessProbe -- Specify livenessprobe image. + # If not specified, `ghcr.io/topolvm/topolvm-with-sidecar:{{ .Values.image.tag }}` will be used. + livenessProbe: # registry.k8s.io/sig-storage/livenessprobe:v2.3.0 + +# A scheduler extender for TopoLVM +scheduler: + # scheduler.enabled -- If true, enable scheduler extender for TopoLVM + enabled: false + + # scheduler.args -- Arguments to be passed to the command. + args: [] + + # scheduler.type -- If you run with a managed control plane (such as GKE, AKS, etc), topolvm-scheduler should be deployed as Deployment and Service. + # topolvm-scheduler should otherwise be deployed as DaemonSet in unmanaged (i.e. bare metal) deployments. + # possible values: daemonset/deployment + type: daemonset + + # Use only if you choose `scheduler.type` deployment + deployment: + # scheduler.deployment.replicaCount -- Number of replicas for Deployment. + replicaCount: 2 + + # Use only if you choose `scheduler.type` deployment + service: + # scheduler.service.type -- Specify Service type. + type: LoadBalancer + # scheduler.service.clusterIP -- Specify Service clusterIP. + clusterIP: # None + # scheduler.service.nodePort -- (int) Specify nodePort. + nodePort: # 30251 + + # scheduler.updateStrategy -- Specify updateStrategy on the Deployment or DaemonSet. + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # scheduler.terminationGracePeriodSeconds -- (int) Specify terminationGracePeriodSeconds on the Deployment or DaemonSet. + terminationGracePeriodSeconds: # 30 + + # scheduler.minReadySeconds -- (int) Specify minReadySeconds on the Deployment or DaemonSet. + minReadySeconds: # 0 + + # scheduler.affinity -- Specify affinity on the Deployment or DaemonSet. + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + + podDisruptionBudget: + # scheduler.podDisruptionBudget.enabled -- Specify podDisruptionBudget enabled. + enabled: true + + # scheduler.tolerations -- Specify tolerations on the Deployment or DaemonSet. + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + + # scheduler.nodeSelector -- Specify nodeSelector on the Deployment or DaemonSet. + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + nodeSelector: + node-role.kubernetes.io/control-plane: "" + + # scheduler.priorityClassName -- Specify priorityClassName on the Deployment or DaemonSet. + priorityClassName: system-cluster-critical + + # scheduler.schedulerOptions -- Tune the Node scoring. + # ref: https://github.com/topolvm/topolvm/blob/master/deploy/README.md + schedulerOptions: {} + # default-divisor: 1 + # divisors: + # ssd: 1 + # hdd: 10 + + profiling: + # scheduler.profiling.bindAddress -- Enables pprof profiling server. If empty, profiling is disabled. + bindAddress: "" + + options: + listen: + # scheduler.options.listen.host -- Host used by Probe. + host: localhost + # scheduler.options.listen.port -- Listen port. + port: 9251 + + # scheduler.podLabels -- Additional labels to be set on the scheduler pods. + podLabels: {} + # scheduler.labels -- Additional labels to be added to the Deployment or Daemonset. + labels: {} + +# lvmd service +lvmd: + # lvmd.managed -- If true, set up lvmd service with DaemonSet. + managed: true + + # lvmd.socketName -- Specify socketName. + socketName: /run/topolvm/lvmd.sock + + # lvmd.deviceClasses -- Specify the device-class settings. + deviceClasses: + - name: general + volume-group: vg-general + default: true + spare-gb: 10 + + # lvmd.lvcreateOptionClasses -- Specify the lvcreate-option-class settings. + lvcreateOptionClasses: [] + # - name: ssd + # options: + # - --type=raid1 + + # lvmd.args -- Arguments to be passed to the command. + args: [] + + # lvmd.priorityClassName -- Specify priorityClassName. + priorityClassName: + + # lvmd.tolerations -- Specify tolerations. + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + + # lvmd.nodeSelector -- Specify nodeSelector. + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + nodeSelector: {} + + # lvmd.affinity -- Specify affinity. + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + affinity: {} + + # lvmd.volumes -- Specify volumes. + volumes: [] + # - name: lvmd-socket-dir + # hostPath: + # path: /run/topolvm + # type: DirectoryOrCreate + + # lvmd.volumeMounts -- Specify volumeMounts. + volumeMounts: [] + # - name: lvmd-socket-dir + # mountPath: /run/topolvm + + # lvmd.additionalVolumes -- Specify additional volumes without conflicting with default volumes + # most useful for initContainers but available to all containers in the pod. + additionalVolumes: [] + # - name: custom-config-map + # configMap: + # # Provide the name of the ConfigMap containing the files you want + # # to add to the container + # name: special-config + + # lvmd.env -- extra environment variables + env: [] + # - name: LVM_SYSTEM_DIR + # value: /tmp + + # lvmd.additionalConfigs -- Define additional LVM Daemon configs if you have additional types of nodes. + # Please ensure nodeSelectors are non overlapping. + additionalConfigs: [] + # - tolerations: [] + # nodeSelector: {} + # deviceClasses: + # - name: ssd + # volume-group: myvg2 + # default: true + # spare-gb: 10 + + # lvmd.updateStrategy -- Specify updateStrategy. + updateStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 50% + # maxUnavailable: 50% + + # lvmd.podLabels -- Additional labels to be set on the lvmd service pods. + podLabels: {} + # lvmd.labels -- Additional labels to be added to the Daemonset. + labels: {} + + # lvmd.initContainers -- Additional initContainers for the lvmd service. + initContainers: [] + + profiling: + # lvmd.profiling.bindAddress -- Enables pprof profiling server. If empty, profiling is disabled. + bindAddress: "" + + metrics: + # lvmd.metrics.enabled -- If true, enable scraping of metrics by Prometheus. + enabled: true + # lvmd.metrics.annotations -- Annotations for Scrape used by Prometheus. + annotations: + prometheus.io/port: metrics + + prometheus: + podMonitor: + # lvmd.prometheus.podMonitor.enabled -- Set this to `true` to create PodMonitor for Prometheus operator. + enabled: false + + # lvmd.prometheus.podMonitor.additionalLabels -- Additional labels that can be used so PodMonitor will be discovered by Prometheus. + additionalLabels: {} + + # lvmd.prometheus.podMonitor.namespace -- Optional namespace in which to create PodMonitor. + namespace: "" + + # lvmd.prometheus.podMonitor.interval -- Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + + # lvmd.prometheus.podMonitor.scrapeTimeout -- Scrape timeout. If not set, the Prometheus default scrape timeout is used. + scrapeTimeout: "" + + # lvmd.prometheus.podMonitor.relabelings -- RelabelConfigs to apply to samples before scraping. + relabelings: [] + # - sourceLabels: [__meta_kubernetes_service_label_cluster] + # targetLabel: cluster + # regex: (.*) + # replacement: ${1} + # action: replace + + # lvmd.prometheus.podMonitor.metricRelabelings -- MetricRelabelConfigs to apply to samples before ingestion. + metricRelabelings: [] + # - sourceLabels: [__meta_kubernetes_service_label_cluster] + # targetLabel: cluster + # regex: (.*) + # replacement: ${1} + # action: replace + +# CSI node service +node: + # node.lvmdEmbedded -- Specify whether to embed lvmd in the node container. + # Should not be used in conjunction with lvmd.managed otherwise lvmd will be started twice. + lvmdEmbedded: false + # node.lvmdSocket -- Specify the socket to be used for communication with lvmd. + lvmdSocket: /run/topolvm/lvmd.sock + # node.kubeletWorkDirectory -- Specify the work directory of Kubelet on the host. + # For example, on microk8s it needs to be set to `/var/snap/microk8s/common/var/lib/kubelet` + kubeletWorkDirectory: /var/lib/kubelet + + # node.args -- Arguments to be passed to the command. + args: [] + + # node.securityContext. -- Container securityContext. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + securityContext: + privileged: true + + metrics: + # node.metrics.enabled -- If true, enable scraping of metrics by Prometheus. + enabled: true + # node.metrics.annotations -- Annotations for Scrape used by Prometheus. + annotations: + prometheus.io/port: metrics + + prometheus: + podMonitor: + # node.prometheus.podMonitor.enabled -- Set this to `true` to create PodMonitor for Prometheus operator. + enabled: false + + # node.prometheus.podMonitor.additionalLabels -- Additional labels that can be used so PodMonitor will be discovered by Prometheus. + additionalLabels: {} + + # node.prometheus.podMonitor.namespace -- Optional namespace in which to create PodMonitor. + namespace: "" + + # node.prometheus.podMonitor.interval -- Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + + # node.prometheus.podMonitor.scrapeTimeout -- Scrape timeout. If not set, the Prometheus default scrape timeout is used. + scrapeTimeout: "" + + # node.prometheus.podMonitor.relabelings -- RelabelConfigs to apply to samples before scraping. + relabelings: [] + # - sourceLabels: [__meta_kubernetes_service_label_cluster] + # targetLabel: cluster + # regex: (.*) + # replacement: ${1} + # action: replace + + # node.prometheus.podMonitor.metricRelabelings -- MetricRelabelConfigs to apply to samples before ingestion. + metricRelabelings: [] + # - sourceLabels: [__meta_kubernetes_service_label_cluster] + # targetLabel: cluster + # regex: (.*) + # replacement: ${1} + # action: replace + + # node.priorityClassName -- Specify priorityClassName. + priorityClassName: + + # node.tolerations -- Specify tolerations. + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + + # node.nodeSelector -- Specify nodeSelector. + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + nodeSelector: {} + + # node.affinity -- Specify affinity. + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + affinity: {} + + # node.volumes -- Specify volumes. + volumes: [] + # - name: registration-dir + # hostPath: + # path: /var/lib/kubelet/plugins_registry/ + # type: Directory + # - name: node-plugin-dir + # hostPath: + # path: /var/lib/kubelet/plugins/topolvm.io/node + # type: DirectoryOrCreate + # - name: csi-plugin-dir + # hostPath: + # path: /var/lib/kubelet/plugins/kubernetes.io/csi + # type: DirectoryOrCreate + # - name: pod-volumes-dir + # hostPath: + # path: /var/lib/kubelet/pods/ + # type: DirectoryOrCreate + # - name: lvmd-socket-dir + # hostPath: + # path: /run/topolvm + # type: Directory + + # node.additionalVolumes -- Specify additional volumes without conflicting with default volumes + # most useful for initContainers but available to all containers in the pod. + additionalVolumes: [] + # - name: custom-config-map + # configMap: + # # Provide the name of the ConfigMap containing the files you want + # # to add to the container + # name: special-config + + volumeMounts: + # node.volumeMounts.topolvmNode -- Specify volumes. + topolvmNode: [] + # - name: node-plugin-dir + # mountPath: /var/lib/kubelet/plugins/topolvm.io/node + # - name: csi-plugin-dir + # mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi + # mountPropagation: "Bidirectional" + # - name: pod-volumes-dir + # mountPath: /var/lib/kubelet/pods + # mountPropagation: "Bidirectional" + # - name: lvmd-socket-dir + # mountPath: /run/topolvm + + # node.updateStrategy -- Specify updateStrategy. + updateStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 50% + # maxUnavailable: 50% + + # node.podLabels -- Additional labels to be set on the node pods. + podLabels: {} + # node.labels -- Additional labels to be added to the Daemonset. + labels: {} + + profiling: + # node.profiling.bindAddress -- Enables pprof profiling server. if empty profiling is disabled. + bindAddress: "" + + # node.initContainers -- Additional initContainers for the node service. + initContainers: [] + +# CSI controller service +controller: + # controller.replicaCount -- Number of replicas for CSI controller service. + replicaCount: 1 + + # controller.args -- Arguments to be passed to the command. + args: [] + + storageCapacityTracking: + # controller.storageCapacityTracking.enabled -- Enable Storage Capacity Tracking for csi-provisioner. + enabled: true + + securityContext: + # controller.securityContext.enabled -- Enable securityContext. + enabled: true + + nodeFinalize: + # controller.nodeFinalize.skipped -- Skip automatic cleanup of PhysicalVolumeClaims when a Node is deleted. + skipped: false + + leaderElection: + # controller.leaderElection.enabled -- Enable leader election for controller and all sidecars. + enabled: true + + prometheus: + podMonitor: + # controller.prometheus.podMonitor.enabled -- Set this to `true` to create PodMonitor for Prometheus operator. + enabled: false + + # controller.prometheus.podMonitor.additionalLabels -- Additional labels that can be used so PodMonitor will be discovered by Prometheus. + additionalLabels: {} + + # controller.prometheus.podMonitor.namespace -- Optional namespace in which to create PodMonitor. + namespace: "" + + # controller.prometheus.podMonitor.interval -- Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: "" + + # controller.prometheus.podMonitor.scrapeTimeout -- Scrape timeout. If not set, the Prometheus default scrape timeout is used. + scrapeTimeout: "" + + # controller.prometheus.podMonitor.relabelings -- RelabelConfigs to apply to samples before scraping. + relabelings: [] + # - sourceLabels: [__meta_kubernetes_service_label_cluster] + # targetLabel: cluster + # regex: (.*) + # replacement: ${1} + # action: replace + + # controller.prometheus.podMonitor.metricRelabelings -- MetricRelabelConfigs to apply to samples before ingestion. + metricRelabelings: [] + # - sourceLabels: [__meta_kubernetes_service_label_cluster] + # targetLabel: cluster + # regex: (.*) + # replacement: ${1} + # action: replace + + # controller.terminationGracePeriodSeconds -- (int) Specify terminationGracePeriodSeconds. + terminationGracePeriodSeconds: # 10 + + # controller.priorityClassName -- Specify priorityClassName. + priorityClassName: + + # controller.updateStrategy -- Specify updateStrategy. + updateStrategy: {} + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 50% + # maxUnavailable: 50% + # + # For non-leader-election mode, you can use this non-HA non-conflicting strategy: + # type: Recreate + + + # controller.minReadySeconds -- (int) Specify minReadySeconds. + minReadySeconds: # 0 + + # controller.affinity -- Specify affinity. + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + affinity: | + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app.kubernetes.io/component + operator: In + values: + - controller + - key: app.kubernetes.io/name + operator: In + values: + - {{ include "topolvm.name" . }} + topologyKey: kubernetes.io/hostname + + # controller.tolerations -- Specify tolerations. + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + + # controller.nodeSelector -- Specify nodeSelector. + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + nodeSelector: + node-role.kubernetes.io/control-plane: "" + + # controller.volumes -- Specify volumes. + volumes: + - name: socket-dir + emptyDir: {} + + podDisruptionBudget: + # controller.podDisruptionBudget.enabled -- Specify podDisruptionBudget enabled. + enabled: true + + # controller.podLabels -- Additional labels to be set on the controller pod. + podLabels: {} + # controller.labels -- Additional labels to be added to the Deployment. + labels: {} + + profiling: + # controller.profiling.bindAddress -- Enables pprof profiling server. If empty, profiling is disabled. + bindAddress: "" + + # controller.initContainers -- Additional initContainers for the controller service. + initContainers: [] + +resources: + # resources.topolvm_node -- Specify resources. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + topolvm_node: {} + # requests: + # memory: 100Mi + # cpu: 100m + # limits: + # memory: 500Mi + # cpu: 500m + # resources.csi_registrar -- Specify resources. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + csi_registrar: {} + # requests: + # cpu: "25m" + # memory: "10Mi" + # limits: + # cpu: "200m" + # memory: "200Mi" + # resources.liveness_probe -- Specify resources. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + liveness_probe: {} + # requests: + # cpu: "25m" + # memory: "10Mi" + # limits: + # cpu: "200m" + # memory: "200Mi" + # resources.topolvm_controller -- Specify resources. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + topolvm_controller: {} + # requests: + # memory: "50Mi" + # cpu: "50m" + # limits: + # memory: "200Mi" + # cpu: "200m" + # resources.csi_provisioner -- Specify resources. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + csi_provisioner: {} + # requests: + # memory: "50Mi" + # cpu: "50m" + # limits: + # memory: "200Mi" + # cpu: "200m" + # resources.csi_resizer -- Specify resources. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + csi_resizer: {} + # requests: + # memory: "50Mi" + # cpu: "50m" + # limits: + # memory: "200Mi" + # cpu: "200m" + # resources.csi_snapshotter -- Specify resources. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + csi_snapshotter: {} + # requests: + # memory: "50Mi" + # cpu: "50m" + # limits: + # memory: "200Mi" + # cpu: "200m" + # resources.lvmd -- Specify resources. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + lvmd: {} + # requests: + # memory: 100Mi + # cpu: 100m + # limits: + # memory: 500Mi + # cpu: 500m + # resources.topolvm_scheduler -- Specify resources. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + topolvm_scheduler: {} + # requests: + # memory: "50Mi" + # cpu: "50m" + # limits: + # memory: "200Mi" + # cpu: "200m" + +env: + # env.topolvm_node -- Specify environment variables for topolvm_node container. + topolvm_node: [] + # env.csi_registrar -- Specify environment variables for csi_registrar container. + csi_registrar: [] + # env.liveness_probe -- Specify environment variables for liveness_probe container. + liveness_probe: [] + # env.topolvm_controller -- Specify environment variables for topolvm_controller container. + topolvm_controller: [] + # env.csi_provisioner -- Specify environment variables for csi_provisioner container. + csi_provisioner: [] + # env.csi_resizer -- Specify environment variables for csi_resizer container. + csi_resizer: [] + # env.csi_snapshotter -- Specify environment variables for csi_snapshotter container. + csi_snapshotter: [] + # To specify environment variables for lvmd, use lvmd.env instead. + # lvmd: [] + # env.topolvm_scheduler -- Specify environment variables for topolvm_scheduler container. + topolvm_scheduler: [] + +livenessProbe: + # livenessProbe.topolvm_node -- Specify resources. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + topolvm_node: + failureThreshold: + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 60 + # livenessProbe.csi_registrar -- Specify livenessProbe. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + csi_registrar: + failureThreshold: + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 60 + # livenessProbe.topolvm_controller -- Specify livenessProbe. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + topolvm_controller: + failureThreshold: + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 60 + # livenessProbe.lvmd -- Specify livenessProbe. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + lvmd: + failureThreshold: + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 60 + # livenessProbe.topolvm_scheduler -- Specify livenessProbe. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + topolvm_scheduler: + failureThreshold: + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 60 + +# storageClasses -- Whether to create storageclass(es) +# ref: https://kubernetes.io/docs/concepts/storage/storage-classes/ +storageClasses: + - name: general # Defines name of storage class. + storageClass: + # Supported filesystems are: ext4, xfs, and btrfs. + fsType: xfs + # reclaimPolicy + reclaimPolicy: # Delete + # Additional annotations + annotations: {} + # Default storage class for dynamic volume provisioning + # ref: https://kubernetes.io/docs/concepts/storage/dynamic-provisioning + isDefaultClass: true + # volumeBindingMode can be either WaitForFirstConsumer or Immediate. WaitForFirstConsumer is recommended because TopoLVM cannot schedule pods wisely if volumeBindingMode is Immediate. + volumeBindingMode: Immediate + # enables CSI drivers to expand volumes. This feature is available for Kubernetes 1.16 and later releases. + allowVolumeExpansion: true + additionalParameters: + topolvm.io/device-class: "general" + # mount options + mountOptions: [] + +webhook: + # webhook.caBundle -- Specify the certificate to be used for AdmissionWebhook. + caBundle: # Base64-encoded, PEM-encoded CA certificate that signs the server certificate. + # webhook.existingCertManagerIssuer -- Specify the cert-manager issuer to be used for AdmissionWebhook. + existingCertManagerIssuer: {} + # group: cert-manager.io + # kind: Issuer + # name: webhook-issuer + podMutatingWebhook: + # webhook.podMutatingWebhook.enabled -- Enable Pod MutatingWebhook. + enabled: false + # webhook.podMutatingWebhook.objectSelector -- Labels required on Pods for webhook action. + # **WARNING**: Modifying objectSelector can affect TopoLVM Pod scheduling. Proceed with caution. + ## ref: https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector + objectSelector: {} + # webhook: topolvm + pvcMutatingWebhook: + # webhook.pvcMutatingWebhook.enabled -- Enable PVC MutatingWebhook. + enabled: true + # webhook.pvcMutatingWebhook.objectSelector -- Labels required on PVCs for webhook action. + # **WARNING**: Modifying objectSelector can affect TopoLVM PVC management. Proceed with caution. + ## ref: https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector + objectSelector: {} + # webhook: topolvm + +# Container Security Context +# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +securityContext: + # securityContext.runAsUser -- Specify runAsUser. + runAsUser: 10000 + # securityContext.runAsGroup -- Specify runAsGroup. + runAsGroup: 10000 + +cert-manager: + # cert-manager.enabled -- Install cert-manager together. + ## ref: https://cert-manager.io/docs/installation/kubernetes/#installing-with-helm + enabled: false + +priorityClass: + # priorityClass.enabled -- Install priorityClass. + enabled: true + # priorityClass.name -- Specify priorityClass resource name. + name: topolvm + # priorityClass.value -- Specify priorityClass value. + value: 1000000 + +snapshot: + # snapshot.enabled -- Turn on the snapshot feature. + enabled: true diff --git a/base-kustomize/argocd/base/kustomization.yaml b/base-kustomize/argocd/base/kustomization.yaml index 38a691025..5e77734ae 100644 --- a/base-kustomize/argocd/base/kustomization.yaml +++ b/base-kustomize/argocd/base/kustomization.yaml @@ -3,22 +3,4 @@ sortOptions: resources: - namespace.yaml -namespace: argocd1 -helmGlobals: - chartHome: ../charts/ -helmCharts: -- name: argo-cd - includeCRDs: true - valuesFile: values.yaml - releaseName: argocd - version: 5.51.5 - repo: https://argoproj.github.io/argo-helm -patches: -- target: - kind: Pod - patch: |- - $patch: delete - apiVersion: v1 - kind: Pod - metadata: - name: argocd-redis-ha-service-test +namespace: argocd diff --git a/base-kustomize/argocd/base/values.yaml b/base-kustomize/argocd/base/values.yaml deleted file mode 100644 index 6f11c07d9..000000000 --- a/base-kustomize/argocd/base/values.yaml +++ /dev/null @@ -1,39 +0,0 @@ -global: - nodeSelector: - openstack-control-plane: enabled - -redis-ha: - enabled: true - nodeSelector: - openstack-control-plane: enabled - -controller: - replicas: 1 - -configs: - cm: - kustomize.buildOptions: --enable-helm - -server: - autoscaling: - enabled: true - minReplicas: 2 - ingress: - annotations: - nginx.ingress.kubernetes.io/backend-protocol: HTTPS - enabled: false - ingressClassName: "nginx-cluster" - hosts: ["argocd.your.domain.tld"] - tls: - - hosts: - - argocd.your.domain.tld - secretName: argocd-tls-public - https: true - -repoServer: - autoscaling: - enabled: true - minReplicas: 2 - -applicationSet: - replicas: 2 diff --git a/base-kustomize/barbican/base/barbican-routes.yaml b/base-kustomize/barbican/base/barbican-routes.yaml deleted file mode 100644 index a35577d7f..000000000 --- a/base-kustomize/barbican/base/barbican-routes.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: barbican-route - namespace: openstack -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "barbican.cluster.local" - rules: - - backendRefs: - - name: barbican-api - port: 9311 diff --git a/base-kustomize/barbican/base/kustomization.yaml b/base-kustomize/barbican/base/kustomization.yaml index da34adee2..1bc6f9365 100644 --- a/base-kustomize/barbican/base/kustomization.yaml +++ b/base-kustomize/barbican/base/kustomization.yaml @@ -5,4 +5,3 @@ resources: - barbican-rabbitmq-queue.yaml - all.yaml - hpa-barbican-api.yaml - - barbican-routes.yaml diff --git a/base-kustomize/cinder/base/cinder-routes.yaml b/base-kustomize/cinder/base/cinder-routes.yaml deleted file mode 100644 index 4f477904a..000000000 --- a/base-kustomize/cinder/base/cinder-routes.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: cinder-gateway-route - namespace: openstack - labels: - application: gateway-api - service: HTTPRoute - route: cinder -spec: - parentRefs: - - name: flex-gateway - sectionName: cinder-tls - namespace: nginx-gateway - hostnames: - - "cinder.cluster.local" - rules: - - backendRefs: - - name: cinder-api - port: 8776 diff --git a/base-kustomize/cinder/base/kustomization.yaml b/base-kustomize/cinder/base/kustomization.yaml index 1e4095148..2edd1a4f9 100644 --- a/base-kustomize/cinder/base/kustomization.yaml +++ b/base-kustomize/cinder/base/kustomization.yaml @@ -6,4 +6,3 @@ resources: - all.yaml - hpa-cinder-scheduler.yaml - hpa-cinder-api.yaml - - cinder-routes.yaml diff --git a/base-kustomize/envoyproxy-gateway/base/envoy-custom-proxy-config.yaml b/base-kustomize/envoyproxy-gateway/base/envoy-custom-proxy-config.yaml new file mode 100644 index 000000000..869445fff --- /dev/null +++ b/base-kustomize/envoyproxy-gateway/base/envoy-custom-proxy-config.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: gateway.envoyproxy.io/v1alpha1 +kind: EnvoyProxy +metadata: + name: custom-proxy-config + namespace: envoy-gateway +spec: + provider: + type: Kubernetes + kubernetes: + envoyService: + externalTrafficPolicy: Cluster + envoyHpa: + minReplicas: 2 + maxReplicas: 9 + metrics: + - resource: + name: cpu + target: + averageUtilization: 60 + type: Utilization + type: Resource + - resource: + name: memory + target: + type: AverageValue + averageValue: 500Mi + type: Resource + envoyDeployment: + pod: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: In + values: + - worker diff --git a/base-kustomize/envoyproxy-gateway/base/envoy-endpoint-policies.yaml b/base-kustomize/envoyproxy-gateway/base/envoy-endpoint-policies.yaml new file mode 100644 index 000000000..a893b89a1 --- /dev/null +++ b/base-kustomize/envoyproxy-gateway/base/envoy-endpoint-policies.yaml @@ -0,0 +1,80 @@ +--- +apiVersion: gateway.envoyproxy.io/v1alpha1 +kind: ClientTrafficPolicy +metadata: + name: flex-gateway-client-policy + namespace: envoy-gateway +spec: + targetRefs: + - group: gateway.networking.k8s.io + kind: Gateway + name: flex-gateway + clientIPDetection: + xForwardedFor: + numTrustedHops: 2 + connection: + bufferLimit: 16384 + timeout: + http: + idleTimeout: 5s +--- +apiVersion: gateway.envoyproxy.io/v1alpha1 +kind: BackendTrafficPolicy +metadata: + name: least-request-policy + namespace: envoy-gateway +spec: + targetRefs: + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-barbican-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-cinder-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-cloudformation-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-glance-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-gnocchi-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-heat-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-keystone-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-magnum-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-metadata-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-neutron-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-nova-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-novnc-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-octavia-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-placement-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: internal-loki-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: grafana-gateway-route + - group: gateway.networking.k8s.io + kind: HTTPRoute + name: custom-skyline-gateway-route + loadBalancer: + type: LeastRequest diff --git a/base-kustomize/envoyproxy-gateway/base/envoy-gateway-namespace.yaml b/base-kustomize/envoyproxy-gateway/base/envoy-gateway-namespace.yaml new file mode 100644 index 000000000..2f3d337b5 --- /dev/null +++ b/base-kustomize/envoyproxy-gateway/base/envoy-gateway-namespace.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + labels: + kubernetes.io/metadata.name: envoy-gateway + pod-security.kubernetes.io/audit: privileged + pod-security.kubernetes.io/audit-version: latest + pod-security.kubernetes.io/enforce: privileged + pod-security.kubernetes.io/enforce-version: latest + pod-security.kubernetes.io/warn: privileged + pod-security.kubernetes.io/warn-version: latest + name: envoy-gateway diff --git a/base-kustomize/envoyproxy-gateway/base/envoy-gateway.yaml b/base-kustomize/envoyproxy-gateway/base/envoy-gateway.yaml new file mode 100644 index 000000000..9b4fd362d --- /dev/null +++ b/base-kustomize/envoyproxy-gateway/base/envoy-gateway.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: flex-gateway + namespace: envoy-gateway + annotations: + cert-manager.io/cluster-issuer: flex-gateway-issuer + acme.cert-manager.io/http01-edit-in-place: "true" +spec: + gatewayClassName: eg + infrastructure: + annotations: + metallb.universe.tf/address-pool: gateway-api-external + listeners: + - name: cluster-http + port: 80 + protocol: HTTP + hostname: "*.cluster.local" + allowedRoutes: + namespaces: + from: All + - name: cluster-tls + port: 443 + protocol: HTTPS + hostname: "*.cluster.local" + allowedRoutes: + namespaces: + from: All + tls: + mode: Terminate + certificateRefs: + - kind: Secret + name: wildcard-cluster-tls-secret diff --git a/base-kustomize/envoyproxy-gateway/base/envoy-gatewayclass.yaml b/base-kustomize/envoyproxy-gateway/base/envoy-gatewayclass.yaml new file mode 100644 index 000000000..ad00ef962 --- /dev/null +++ b/base-kustomize/envoyproxy-gateway/base/envoy-gatewayclass.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: gateway.networking.k8s.io/v1 +kind: GatewayClass +metadata: + name: eg + namespace: envoy-gateway +spec: + controllerName: gateway.envoyproxy.io/gatewayclass-controller + parametersRef: + group: gateway.envoyproxy.io + kind: EnvoyProxy + name: custom-proxy-config + namespace: envoy-gateway diff --git a/base-kustomize/envoyproxy-gateway/base/envoy-internal-gateway-issuer.yaml b/base-kustomize/envoyproxy-gateway/base/envoy-internal-gateway-issuer.yaml new file mode 100644 index 000000000..a6b321440 --- /dev/null +++ b/base-kustomize/envoyproxy-gateway/base/envoy-internal-gateway-issuer.yaml @@ -0,0 +1,33 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: flex-gateway-issuer + namespace: envoy-gateway +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: public-endpoint-ca-cert + namespace: cert-manager +spec: + isCA: true + commonName: public-endpoint-ca + secretName: public-endpoint-ca-secret + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: flex-gateway-issuer + kind: ClusterIssuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: public-endpoint-issuer + namespace: envoy-gateway +spec: + ca: + secretName: public-endpoint-ca-secret diff --git a/base-kustomize/envoyproxy-gateway/base/gatewayclass.yaml b/base-kustomize/envoyproxy-gateway/base/gatewayclass.yaml deleted file mode 100644 index db5b25275..000000000 --- a/base-kustomize/envoyproxy-gateway/base/gatewayclass.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: gateway.networking.k8s.io/v1 -kind: GatewayClass -metadata: - name: envoyproxy -spec: - controllerName: gateway.envoyproxy.io/gatewayclass-controller diff --git a/base-kustomize/envoyproxy-gateway/base/kustomization.yaml b/base-kustomize/envoyproxy-gateway/base/kustomization.yaml index a3e01b753..9befd997c 100644 --- a/base-kustomize/envoyproxy-gateway/base/kustomization.yaml +++ b/base-kustomize/envoyproxy-gateway/base/kustomization.yaml @@ -1,13 +1,9 @@ sortOptions: order: fifo resources: - - './namespace.yaml' - - './gatewayclass.yaml' -namespace: envoy-gateway-system -helmGlobals: - chartHome: ../../../submodules/envoyproxy-gateway/charts/ -helmCharts: -- name: gateway-helm - valuesFile: values.yaml - includeCRDs: true - releaseName: envoyproxy-gateway + - envoy-gateway-namespace.yaml + - envoy-internal-gateway-issuer.yaml + - envoy-custom-proxy-config.yaml + - envoy-gatewayclass.yaml + - envoy-gateway.yaml + - envoy-endpoint-policies.yaml diff --git a/base-kustomize/envoyproxy-gateway/base/namespace.yaml b/base-kustomize/envoyproxy-gateway/base/namespace.yaml deleted file mode 100644 index b237e0d30..000000000 --- a/base-kustomize/envoyproxy-gateway/base/namespace.yaml +++ /dev/null @@ -1,8 +0,0 @@ ---- -apiVersion: v1 -kind: Namespace -metadata: - labels: - kubernetes.io/metadata.name: envoy-gateway-system - name: envoy-gateway-system - name: envoy-gateway-system diff --git a/base-kustomize/envoyproxy-gateway/base/values.yaml b/base-kustomize/envoyproxy-gateway/base/values.yaml deleted file mode 100644 index c64911d77..000000000 --- a/base-kustomize/envoyproxy-gateway/base/values.yaml +++ /dev/null @@ -1,52 +0,0 @@ -deployment: - envoyGateway: - image: - repository: docker.io/envoyproxy/gateway - tag: 'v1.0.1' - imagePullPolicy: Always - imagePullSecrets: [] - resources: - limits: - cpu: 500m - memory: 1024Mi - requests: - cpu: 100m - memory: 256Mi - ports: - - name: grpc - port: 18000 - targetPort: 18000 - - name: ratelimit - port: 18001 - targetPort: 18001 - replicas: 1 - pod: - affinity: {} - annotations: {} - labels: {} - -config: - envoyGateway: - gateway: - controllerName: gateway.envoyproxy.io/gatewayclass-controller - provider: - type: Kubernetes - logging: - level: - default: info - -envoyGatewayMetricsService: - port: 19001 - -createNamespace: false - -kubernetesClusterDomain: cluster.local - -certgen: - job: - annotations: {} - resources: {} - ttlSecondsAfterFinished: 0 - rbac: - annotations: {} - labels: {} diff --git a/base-kustomize/gateway/envoyproxy/gateway.yaml b/base-kustomize/gateway/envoyproxy/gateway.yaml deleted file mode 100644 index 97e481741..000000000 --- a/base-kustomize/gateway/envoyproxy/gateway.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: gateway.networking.k8s.io/v1 -kind: Gateway -metadata: - name: flex-gateway - namespace: envoy-gateway-system - annotations: - acme.cert-manager.io/http01-edit-in-place: "true" - cert-manager.io/cluster-issuer: letsencrypt-prod -spec: - gatewayClassName: envoyproxy - infrastructure: - annotations: - metallb.universe.tf/address-pool: openstack-external - listeners: - - name: http - port: 80 - protocol: HTTP - hostname: "*.your.domain.tld" - allowedRoutes: - namespaces: - from: All - - allowedRoutes: - namespaces: - from: All - hostname: '*.your.domain.tld' - name: https - port: 443 - protocol: HTTPS - tls: - certificateRefs: - - group: "" - kind: Secret - name: flex-endpoints - mode: Terminate diff --git a/base-kustomize/gateway/envoyproxy/kustomization.yaml b/base-kustomize/gateway/envoyproxy/kustomization.yaml deleted file mode 100644 index 86ef21927..000000000 --- a/base-kustomize/gateway/envoyproxy/kustomization.yaml +++ /dev/null @@ -1,4 +0,0 @@ -sortOptions: - order: fifo -resources: - - './gateway.yaml' # namespace: envoy-gateway-system (common gateway) diff --git a/base-kustomize/gateway/grafana-routes.yaml b/base-kustomize/gateway/grafana-routes.yaml deleted file mode 100644 index 00e4838d6..000000000 --- a/base-kustomize/gateway/grafana-routes.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: grafana-gateway-route - namespace: grafana -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "grafana.cluster.local" - - "grafana" - - "grafana.grafana" - - "grafana.grafana.svc.cluster.local" - rules: - - backendRefs: - - name: grafana - port: 80 diff --git a/base-kustomize/gateway/nginx-gateway-fabric/endpoint-policies.yaml b/base-kustomize/gateway/nginx-gateway-fabric/endpoint-policies.yaml new file mode 100644 index 000000000..b6e2831aa --- /dev/null +++ b/base-kustomize/gateway/nginx-gateway-fabric/endpoint-policies.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: gateway.nginx.org/v1alpha1 +kind: ClientSettingsPolicy +metadata: + name: gateway-client-settings + namespace: nginx-gateway +spec: + targetRef: + group: gateway.networking.k8s.io + kind: Gateway + name: flex-gateway + body: + maxSize: "120g" # sizes without a unit are bytes. diff --git a/base-kustomize/gateway/nginx-gateway-fabric/kustomization.yaml b/base-kustomize/gateway/nginx-gateway-fabric/kustomization.yaml index 00225f232..b4b96d6bb 100644 --- a/base-kustomize/gateway/nginx-gateway-fabric/kustomization.yaml +++ b/base-kustomize/gateway/nginx-gateway-fabric/kustomization.yaml @@ -3,3 +3,4 @@ sortOptions: resources: - internal-gateway-api.yaml # namespace: nginx-gateway (common gateway) - internal-gateway-issuer.yaml #namespace: nginx-gateway + - endpoint-policies.yaml diff --git a/base-kustomize/gateway/nginx-gateway-fabric/prometheus-routes.yaml b/base-kustomize/gateway/nginx-gateway-fabric/prometheus-routes.yaml deleted file mode 100644 index c492a430c..000000000 --- a/base-kustomize/gateway/nginx-gateway-fabric/prometheus-routes.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: prometheus-gateway-route - namespace: prometheus -spec: - parentRefs: - - name: flex-gateway - sectionName: http - namespace: nginx-gateway - hostnames: - - "prometheus.your.domain.tld" - rules: - - backendRefs: - - name: kube-prometheus-stack-prometheus - port: 9090 diff --git a/base-kustomize/glance/base/client-settings.yaml b/base-kustomize/glance/base/client-settings.yaml index b6e2831aa..e69de29bb 100644 --- a/base-kustomize/glance/base/client-settings.yaml +++ b/base-kustomize/glance/base/client-settings.yaml @@ -1,13 +0,0 @@ ---- -apiVersion: gateway.nginx.org/v1alpha1 -kind: ClientSettingsPolicy -metadata: - name: gateway-client-settings - namespace: nginx-gateway -spec: - targetRef: - group: gateway.networking.k8s.io - kind: Gateway - name: flex-gateway - body: - maxSize: "120g" # sizes without a unit are bytes. diff --git a/base-kustomize/glance/base/glance-routes.yaml b/base-kustomize/glance/base/glance-routes.yaml deleted file mode 100644 index 43650a3d5..000000000 --- a/base-kustomize/glance/base/glance-routes.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: glance-gateway-route - namespace: openstack - labels: - application: gateway-api - service: HTTPRoute - route: glance -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "glance.cluster.local" - rules: - - backendRefs: - - name: glance-api - port: 9292 diff --git a/base-kustomize/glance/base/kustomization.yaml b/base-kustomize/glance/base/kustomization.yaml index a0df4e96a..9d3d0d107 100644 --- a/base-kustomize/glance/base/kustomization.yaml +++ b/base-kustomize/glance/base/kustomization.yaml @@ -5,8 +5,6 @@ resources: - glance-rabbitmq-queue.yaml - all.yaml - hpa-glance-api.yaml - - glance-routes.yaml - - client-settings.yaml patches: - target: diff --git a/base-kustomize/heat/base/heat-routes.yaml b/base-kustomize/heat/base/heat-routes.yaml deleted file mode 100644 index b0e6936ca..000000000 --- a/base-kustomize/heat/base/heat-routes.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: cloudformation-gateway-route - namespace: openstack - labels: - application: gateway-api - service: HTTPRoute - route: cloudformation -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "cloudformation.cluster.local" - rules: - - backendRefs: - - name: heat-cfn - port: 8000 ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: heat-gateway-route - namespace: openstack - labels: - application: gateway-api - service: HTTPRoute - route: heat -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "heat.cluster.local" - rules: - - backendRefs: - - name: heat-api - port: 8004 diff --git a/base-kustomize/heat/base/kustomization.yaml b/base-kustomize/heat/base/kustomization.yaml index 4e2127bce..e7f80c175 100644 --- a/base-kustomize/heat/base/kustomization.yaml +++ b/base-kustomize/heat/base/kustomization.yaml @@ -7,4 +7,3 @@ resources: - hpa-heat-api.yaml - hpa-heat-cfn.yaml - hpa-heat-engine.yaml - - heat-routes.yaml diff --git a/base-kustomize/keystone/base/keystone-routes.yaml b/base-kustomize/keystone/base/keystone-routes.yaml deleted file mode 100644 index d4c021485..000000000 --- a/base-kustomize/keystone/base/keystone-routes.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: keystone-gateway-route - namespace: openstack -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "keystone.cluster.local" - rules: - - backendRefs: - - name: keystone-api - port: 5000 diff --git a/base-kustomize/keystone/base/kustomization.yaml b/base-kustomize/keystone/base/kustomization.yaml index de6538f51..5631e709e 100644 --- a/base-kustomize/keystone/base/kustomization.yaml +++ b/base-kustomize/keystone/base/kustomization.yaml @@ -5,4 +5,3 @@ resources: - keystone-rabbitmq-queue.yaml - all.yaml - hpa-keystone-api.yaml - - keystone-routes.yaml diff --git a/base-kustomize/neutron/base/hpa-neutron-rpc-server.yaml b/base-kustomize/neutron/base/hpa-neutron-rpc-server.yaml new file mode 100644 index 000000000..18b84c962 --- /dev/null +++ b/base-kustomize/neutron/base/hpa-neutron-rpc-server.yaml @@ -0,0 +1,19 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: neutron-rpc-server + namespace: openstack +spec: + maxReplicas: 9 + minReplicas: 3 + metrics: + - resource: + name: memory + target: + type: AverageValue + averageValue: 2Gi + type: Resource + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: neutron-rpc-server diff --git a/base-kustomize/neutron/base/kustomization.yaml b/base-kustomize/neutron/base/kustomization.yaml index cd1b3042b..dbd6c3398 100644 --- a/base-kustomize/neutron/base/kustomization.yaml +++ b/base-kustomize/neutron/base/kustomization.yaml @@ -5,7 +5,7 @@ resources: - neutron-rabbitmq-queue.yaml - all.yaml - hpa-neutron-server.yaml - - neutron-routes.yaml + - hpa-neutron-rpc-server.yaml patches: - target: diff --git a/base-kustomize/neutron/base/neutron-routes.yaml b/base-kustomize/neutron/base/neutron-routes.yaml deleted file mode 100644 index ea3e0a634..000000000 --- a/base-kustomize/neutron/base/neutron-routes.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: neutron-gateway-route - namespace: openstack - labels: - application: gateway-api - service: HTTPRoute - route: neutron -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "neutron.cluster.local" - rules: - - backendRefs: - - name: neutron-server - port: 9696 diff --git a/base-kustomize/octavia/base/hpa-octavia-api.yaml b/base-kustomize/octavia/base/hpa-octavia-api.yaml index 4772cf0e4..52c292044 100644 --- a/base-kustomize/octavia/base/hpa-octavia-api.yaml +++ b/base-kustomize/octavia/base/hpa-octavia-api.yaml @@ -10,7 +10,7 @@ spec: - resource: name: memory target: - averageValue: 500Mi + averageValue: 2200Mi type: Value type: Resource scaleTargetRef: diff --git a/base-kustomize/octavia/base/kustomization.yaml b/base-kustomize/octavia/base/kustomization.yaml index 8b8269df3..863bb1444 100644 --- a/base-kustomize/octavia/base/kustomization.yaml +++ b/base-kustomize/octavia/base/kustomization.yaml @@ -15,70 +15,35 @@ resources: - all.yaml - hpa-octavia-api.yaml - hpa-octavia-worker.yaml - - octavia-routes.yaml # To run the OVN driver, the octavia-api container must have an agent container within the same pod. patches: + - target: + kind: Deployment + name: octavia-api + patch: |- + - op: replace + path: /spec/template/spec/containers/0/securityContext + value: + runAsUser: 0 - target: kind: Deployment name: octavia-api patch: |- - op: add - path: /spec/template/spec/containers + path: /spec/template/spec/containers/- value: - - name: octavia-agent - image: image-octavia-ovn - imagePullPolicy: IfNotPresent - securityContext: - runAsUser: 0 command: - octavia-driver-agent - --config-dir - /etc/octavia/octavia.conf - volumeMounts: - - name: pod-etc-octavia - mountPath: /etc/octavia - - name: octavia-bin - mountPath: /tmp/octavia-api.sh - subPath: octavia-api.sh - readOnly: true - - name: octavia-etc - mountPath: /etc/octavia/octavia.conf - subPath: octavia.conf - readOnly: true - - name: octavia-etc - mountPath: /etc/octavia/logging.conf - subPath: logging.conf - readOnly: true - - mountPath: /var/run/octavia - name: pod-run-octavia - - name: octavia-api image: image-octavia-ovn imagePullPolicy: IfNotPresent securityContext: runAsUser: 0 - command: - - /tmp/octavia-api.sh - - start - lifecycle: - preStop: - exec: - command: - - /tmp/octavia-api.sh - - stop - ports: - - name: o-api - containerPort: 9876 - readinessProbe: - httpGet: - scheme: HTTP - path: / - port: 9876 - livenessProbe: - httpGet: - scheme: HTTP - path: / - port: 9876 + name: octavia-agent + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File volumeMounts: - name: pod-etc-octavia mountPath: /etc/octavia diff --git a/base-kustomize/octavia/base/octavia-routes.yaml b/base-kustomize/octavia/base/octavia-routes.yaml deleted file mode 100644 index ed6fddf15..000000000 --- a/base-kustomize/octavia/base/octavia-routes.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: octavia-api - namespace: openstack - labels: - application: gateway-api - service: HTTPRoute - route: octavia -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "octavia.cluster.local" - rules: - - backendRefs: - - name: octavia-api - port: 9876 diff --git a/base-kustomize/ovn-backup/base/ovn-backup.yaml b/base-kustomize/ovn-backup/base/ovn-backup.yaml index edeafe63f..64673c624 100644 --- a/base-kustomize/ovn-backup/base/ovn-backup.yaml +++ b/base-kustomize/ovn-backup/base/ovn-backup.yaml @@ -12,7 +12,7 @@ metadata: name: ovndb-backup spec: accessModes: - - ReadWriteOnce + - ReadWriteOnce resources: requests: storage: 1Gi @@ -35,25 +35,25 @@ spec: serviceAccountName: "ovn" restartPolicy: "Never" volumes: - - name: backup - persistentVolumeClaim: - claimName: ovndb-backup - - name: backup-script - configMap: - name: ovn-backup-script - defaultMode: 0744 - containers: - - name: ovn-central-backup - envFrom: - - configMapRef: - name: ovn-backup-config - - secretRef: - name: ovn-backup-swift-tempauth-account - command: ["/backup-script/ovn-backup.sh"] - image: docker.io/kubeovn/kube-ovn:v1.11.5 - imagePullPolicy: IfNotPresent - volumeMounts: - name: backup - mountPath: "/backup" + persistentVolumeClaim: + claimName: ovndb-backup - name: backup-script - mountPath: /backup-script + configMap: + name: ovn-backup-script + defaultMode: 0744 + containers: + - name: ovn-central-backup + envFrom: + - configMapRef: + name: ovn-backup-config + - secretRef: + name: ovn-backup-swift-tempauth-account + command: ["/backup-script/ovn-backup.sh"] + image: docker.io/kubeovn/kube-ovn:v1.12.30 + imagePullPolicy: IfNotPresent + volumeMounts: + - name: backup + mountPath: "/backup" + - name: backup-script + mountPath: /backup-script diff --git a/base-kustomize/placement/base/kustomization.yaml b/base-kustomize/placement/base/kustomization.yaml index fb0514057..c91ba2f38 100644 --- a/base-kustomize/placement/base/kustomization.yaml +++ b/base-kustomize/placement/base/kustomization.yaml @@ -4,4 +4,3 @@ resources: - placement-mariadb-database.yaml - all.yaml - hpa-placement-api.yaml - - placement-nova-routes.yaml diff --git a/base-kustomize/placement/base/placement-nova-routes.yaml b/base-kustomize/placement/base/placement-nova-routes.yaml deleted file mode 100644 index 1cb594dbd..000000000 --- a/base-kustomize/placement/base/placement-nova-routes.yaml +++ /dev/null @@ -1,84 +0,0 @@ ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: placement-api - namespace: openstack - labels: - application: gateway-api - service: HTTPRoute - route: placement -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "placement.cluster.local" - rules: - - backendRefs: - - name: placement-api - port: 8778 ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: metadata-gateway-route - namespace: openstack - labels: - application: gateway-api - service: HTTPRoute - route: nova-metadata -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "metadata.cluster.local" - rules: - - backendRefs: - - name: nova-metadata - port: 8775 ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: nova-gateway-route - namespace: openstack - labels: - application: gateway-api - service: HTTPRoute - route: nova -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "nova.cluster.local" - rules: - - backendRefs: - - name: nova-api - port: 8774 ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: novnc-gateway-route - namespace: openstack - labels: - application: gateway-api - service: HTTPRoute - route: novnc -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "novnc.cluster.local" - rules: - - backendRefs: - - name: nova-novncproxy - port: 6080 diff --git a/base-kustomize/prometheus-snmp-exporter/base/kustomization.yaml b/base-kustomize/prometheus-snmp-exporter/base/kustomization.yaml new file mode 100644 index 000000000..10663bac8 --- /dev/null +++ b/base-kustomize/prometheus-snmp-exporter/base/kustomization.yaml @@ -0,0 +1,4 @@ +sortOptions: + order: fifo +resources: + - all.yaml diff --git a/base-kustomize/sealed-secrets/base/kustomization.yaml b/base-kustomize/sealed-secrets/base/kustomization.yaml index 6d8db75e9..a14188baa 100644 --- a/base-kustomize/sealed-secrets/base/kustomization.yaml +++ b/base-kustomize/sealed-secrets/base/kustomization.yaml @@ -3,12 +3,3 @@ sortOptions: resources: - './namespace.yaml' namespace: sealed-secrets -helmGlobals: - chartHome: ../charts/ -helmCharts: -- name: sealed-secrets - includeCRDs: true - releaseName: sealed-secrets - valuesFile: values.yaml - version: 2.14.2 - repo: https://bitnami-labs.github.io/sealed-secrets diff --git a/base-kustomize/sealed-secrets/base/values.yaml b/base-kustomize/sealed-secrets/base/values.yaml deleted file mode 100644 index 15524ee2f..000000000 --- a/base-kustomize/sealed-secrets/base/values.yaml +++ /dev/null @@ -1,495 +0,0 @@ -## @section Common parameters - -## @param kubeVersion Override Kubernetes version -## -kubeVersion: "" -## @param nameOverride String to partially override sealed-secrets.fullname -## -nameOverride: "" -## @param fullnameOverride String to fully override sealed-secrets.fullname -## -fullnameOverride: "sealed-secrets-controller" -## @param namespace Namespace where to deploy the Sealed Secrets controller -## -namespace: "" - -## @param extraDeploy [array] Array of extra objects to deploy with the release -## -extraDeploy: [] -## @param commonAnnotations [object] Annotations to add to all deployed resources -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ -## -commonAnnotations: {} - -## @param commonLabels [object] Labels to add to all deployed resources -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -## -commonLabels: {} - -## @section Sealed Secrets Parameters - -## Sealed Secrets image -## ref: https://hub.docker.com/r/bitnami/sealed-secrets-controller/tags -## @param image.registry Sealed Secrets image registry -## @param image.repository Sealed Secrets image repository -## @param image.tag Sealed Secrets image tag (immutable tags are recommended) -## @param image.pullPolicy Sealed Secrets image pull policy -## @param image.pullSecrets [array] Sealed Secrets image pull secrets -## -image: - registry: docker.io - repository: bitnami/sealed-secrets-controller - tag: 0.25.0 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] -## @param revisionHistoryLimit Number of old history to retain to allow rollback (If not set, default Kubernetes value is set to 10) -## e.g: -revisionHistoryLimit: "" -## @param createController Specifies whether the Sealed Secrets controller should be created -## -createController: true -## @param secretName The name of an existing TLS secret containing the key used to encrypt secrets -## -secretName: "sealed-secrets-key" -## @param updateStatus Specifies whether the Sealed Secrets controller should update the status subresource -## -updateStatus: true -## @param skipRecreate Specifies whether the Sealed Secrets controller should skip recreating removed secrets -## Setting it to true allows to optionally restore backward compatibility in low priviledge -## environments when old versions of the controller did not require watch permissions on secrets -## for secret re-creation. -## -skipRecreate: false -## @param keyrenewperiod Specifies key renewal period. Default 30 days -## e.g -## keyrenewperiod: "720h30m" -## -keyrenewperiod: "0" -## @param rateLimit Number of allowed sustained request per second for verify endpoint -## -rateLimit: "" -## @param rateLimitBurst Number of requests allowed to exceed the rate limit per second for verify endpoint -## -rateLimitBurst: "" -## @param additionalNamespaces List of namespaces used to manage the Sealed Secrets -## -additionalNamespaces: [] -## @param privateKeyAnnotations Map of annotations to be set on the sealing keypairs -## -privateKeyAnnotations: {} -## @param privateKeyLabels Map of labels to be set on the sealing keypairs -## -privateKeyLabels: {} -## @param logInfoStdout Specifies whether the Sealed Secrets controller will log info to stdout -## -logInfoStdout: false -## @param command Override default container command -## -command: [] -## @param args Override default container args -## -args: [] -## Configure extra options for Sealed Secret containers' liveness, readiness and startup probes -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes -## @param livenessProbe.enabled Enable livenessProbe on Sealed Secret containers -## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe -## @param livenessProbe.periodSeconds Period seconds for livenessProbe -## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe -## @param livenessProbe.failureThreshold Failure threshold for livenessProbe -## @param livenessProbe.successThreshold Success threshold for livenessProbe -## -livenessProbe: - enabled: true - initialDelaySeconds: 0 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 -## @param readinessProbe.enabled Enable readinessProbe on Sealed Secret containers -## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe -## @param readinessProbe.periodSeconds Period seconds for readinessProbe -## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe -## @param readinessProbe.failureThreshold Failure threshold for readinessProbe -## @param readinessProbe.successThreshold Success threshold for readinessProbe -## -readinessProbe: - enabled: true - initialDelaySeconds: 0 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 -## @param startupProbe.enabled Enable startupProbe on Sealed Secret containers -## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe -## @param startupProbe.periodSeconds Period seconds for startupProbe -## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe -## @param startupProbe.failureThreshold Failure threshold for startupProbe -## @param startupProbe.successThreshold Success threshold for startupProbe -## -startupProbe: - enabled: false - initialDelaySeconds: 0 - periodSeconds: 10 - timeoutSeconds: 1 - failureThreshold: 3 - successThreshold: 1 -## @param customLivenessProbe Custom livenessProbe that overrides the default one -## -customLivenessProbe: {} -## @param customReadinessProbe Custom readinessProbe that overrides the default one -## -customReadinessProbe: {} -## @param customStartupProbe Custom startupProbe that overrides the default one -## -customStartupProbe: {} -## Sealed Secret resource requests and limits -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## @param resources.limits [object] The resources limits for the Sealed Secret containers -## @param resources.requests [object] The requested resources for the Sealed Secret containers -## -resources: - limits: {} - requests: {} -## Configure Pods Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod -## @param podSecurityContext.enabled Enabled Sealed Secret pods' Security Context -## @param podSecurityContext.fsGroup Set Sealed Secret pod's Security Context fsGroup -## -podSecurityContext: - enabled: true - fsGroup: 65534 -## Configure Container Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod -## @param containerSecurityContext.enabled Enabled Sealed Secret containers' Security Context -## @param containerSecurityContext.readOnlyRootFilesystem Whether the Sealed Secret container has a read-only root filesystem -## @param containerSecurityContext.runAsNonRoot Indicates that the Sealed Secret container must run as a non-root user -## @param containerSecurityContext.runAsUser Set Sealed Secret containers' Security Context runAsUser -## @extra containerSecurityContext.capabilities Adds and removes POSIX capabilities from running containers (see `values.yaml`) -## @skip containerSecurityContext.capabilities.drop -## -containerSecurityContext: - enabled: true - readOnlyRootFilesystem: true - runAsNonRoot: true - runAsUser: 1001 - capabilities: - drop: - - ALL - -## @param podLabels [object] Extra labels for Sealed Secret pods -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -## -podLabels: {} -## @param podAnnotations [object] Annotations for Sealed Secret pods -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ -## -podAnnotations: {} -## @param priorityClassName Sealed Secret pods' priorityClassName -## -priorityClassName: "" -## @param runtimeClassName Sealed Secret pods' runtimeClassName -## -runtimeClassName: "" -## @param affinity [object] Affinity for Sealed Secret pods assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: - enableAntiAffinity: true - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node-role.kubernetes.io/worker - operator: In - values: - - worker -## @param nodeSelector [object] Node labels for Sealed Secret pods assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## -nodeSelector: {} -## @param tolerations [array] Tolerations for Sealed Secret pods assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] -## @param additionalVolumes [object] Extra Volumes for the Sealed Secrets Controller Deployment -## ref: https://kubernetes.io/docs/concepts/storage/volumes/ -## -additionalVolumes: [] -## @param additionalVolumeMounts [object] Extra volumeMounts for the Sealed Secrets Controller container -## ref: https://kubernetes.io/docs/concepts/storage/volumes/ -## -additionalVolumeMounts: [] -## @param hostNetwork Sealed Secrets pods' hostNetwork -hostNetwork: false -## @param dnsPolicy Sealed Secrets pods' dnsPolicy -dnsPolicy: "" - -## @section Traffic Exposure Parameters - -## Sealed Secret service parameters -## -service: - ## @param service.type Sealed Secret service type - ## - type: ClusterIP - ## @param service.port Sealed Secret service HTTP port - ## - port: 8080 - ## @param service.nodePort Node port for HTTP - ## Specify the nodePort value for the LoadBalancer and NodePort service types - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## NOTE: choose port between <30000-32767> - ## - nodePort: "" - ## @param service.annotations [object] Additional custom annotations for Sealed Secret service - ## - annotations: {} -## Sealed Secret ingress parameters -## ref: http://kubernetes.io/docs/user-guide/ingress/ -## -ingress: - ## @param ingress.enabled Enable ingress record generation for Sealed Secret - ## - enabled: false - ## @param ingress.pathType Ingress path type - ## - pathType: ImplementationSpecific - ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) - ## - apiVersion: "" - ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress - ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster. - ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ - ## - ingressClassName: "" - ## @param ingress.hostname Default host for the ingress record - ## - hostname: sealed-secrets.local - ## @param ingress.path Default path for the ingress record - ## - path: /v1/cert.pem - ## @param ingress.annotations [object] Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. - ## Use this parameter to set the required annotations for cert-manager, see - ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations - ## e.g: - ## annotations: - ## kubernetes.io/ingress.class: nginx - ## cert-manager.io/cluster-issuer: cluster-issuer-name - ## - annotations: {} - ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter - ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` - ## You can: - ## - Use the `ingress.secrets` parameter to create this TLS secret - ## - Relay on cert-manager to create it by setting the corresponding annotations - ## - Relay on Helm to create self-signed certificates by setting `ingress.selfSigned=true` - ## - tls: false - ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm - ## - selfSigned: false - ## @param ingress.extraHosts [array] An array with additional hostname(s) to be covered with the ingress record - ## e.g: - ## extraHosts: - ## - name: sealed-secrets.local - ## path: / - ## - extraHosts: [] - ## @param ingress.extraPaths [array] An array with additional arbitrary paths that may need to be added to the ingress under the main host - ## e.g: - ## extraPaths: - ## - path: /* - ## backend: - ## serviceName: ssl-redirect - ## servicePort: use-annotation - ## - extraPaths: [] - ## @param ingress.extraTls [array] TLS configuration for additional hostname(s) to be covered with this ingress record - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls - ## e.g: - ## extraTls: - ## - hosts: - ## - sealed-secrets.local - ## secretName: sealed-secrets.local-tls - ## - extraTls: [] - ## @param ingress.secrets [array] Custom TLS certificates as secrets - ## NOTE: 'key' and 'certificate' are expected in PEM format - ## NOTE: 'name' should line up with a 'secretName' set further up - ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates - ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days - ## It is also possible to create and manage the certificates outside of this helm chart - ## Please see README.md for more information - ## e.g: - ## secrets: - ## - name: sealed-secrets.local-tls - ## key: |- - ## -----BEGIN RSA PRIVATE KEY----- - ## ... - ## -----END RSA PRIVATE KEY----- - ## certificate: |- - ## -----BEGIN CERTIFICATE----- - ## ... - ## -----END CERTIFICATE----- - ## - secrets: [] -## Network policies -## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ -## -networkPolicy: - ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created - ## - enabled: false - ## NetworkPolicy Egress configuration - ## - egress: - ## @param networkPolicy.egress.enabled Specifies wheter a egress is set in the NetworkPolicy - ## - enabled: false - ## @param networkPolicy.egress.kubeapiCidr Specifies the kubeapiCidr, which is the only egress allowed. If not set, kubeapiCidr will be found using Helm lookup - ## - kubeapiCidr: "" - ## @param networkPolicy.egress.kubeapiPort Specifies the kubeapiPort, which is the only egress allowed. If not set, kubeapiPort will be found using Helm lookup - ## - kubeapiPort: "" - -## @section Other Parameters - -## ServiceAccount configuration -## -serviceAccount: - ## @param serviceAccount.annotations [object] Annotations for Sealed Secret service account - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - annotations: {} - ## @param serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: true - ## @param serviceAccount.labels Extra labels to be added to the ServiceAccount - ## - labels: {} - ## @param serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the sealed-secrets.fullname template - ## - name: "" -## RBAC configuration -## -rbac: - ## @param rbac.create Specifies whether RBAC resources should be created - ## - create: true - ## @param rbac.clusterRole Specifies whether the Cluster Role resource should be created - ## - clusterRole: true - ## @param rbac.clusterRoleName Specifies the name for the Cluster Role resource - ## - clusterRoleName: "secrets-unsealer" - ## @param rbac.namespacedRoles Specifies whether the namespaced Roles should be created (in each of the specified additionalNamespaces) - ## - namespacedRoles: false - ## @param rbac.namespacedRolesName Specifies the name for the namesapced Role resource - ## - namespacedRolesName: "secrets-unsealer" - ## @param rbac.labels Extra labels to be added to RBAC resources - ## - labels: {} - ## @param rbac.pspEnabled PodSecurityPolicy - ## - pspEnabled: false - -## @section Metrics parameters - -metrics: - ## Prometheus Operator ServiceMonitor configuration - ## - serviceMonitor: - ## @param metrics.serviceMonitor.enabled Specify if a ServiceMonitor will be deployed for Prometheus Operator - ## - enabled: false - ## @param metrics.serviceMonitor.namespace Namespace where Prometheus Operator is running in - ## - namespace: "" - ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor - ## - labels: {} - ## @param metrics.serviceMonitor.annotations Extra annotations for the ServiceMonitor - ## - annotations: {} - ## @param metrics.serviceMonitor.interval How frequently to scrape metrics - ## e.g: - ## interval: 10s - ## - interval: "" - ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## e.g: - ## scrapeTimeout: 10s - ## - scrapeTimeout: "" - ## @param metrics.serviceMonitor.honorLabels Specify if ServiceMonitor endPoints will honor labels - ## - honorLabels: true - ## @param metrics.serviceMonitor.metricRelabelings [array] Specify additional relabeling of metrics - ## - metricRelabelings: [] - ## @param metrics.serviceMonitor.relabelings [array] Specify general relabeling - ## - relabelings: [] - ## Grafana dashboards configuration - ## - dashboards: - ## @param metrics.dashboards.create Specifies whether a ConfigMap with a Grafana dashboard configuration should be created - ## ref https://github.com/helm/charts/tree/master/stable/grafana#configuration - ## - create: false - ## @param metrics.dashboards.labels Extra labels to be added to the Grafana dashboard ConfigMap - ## - labels: {} - ## @param metrics.dashboards.annotations Annotations to be added to the Grafana dashboard ConfigMap - ## - annotations: {} - ## @param metrics.dashboards.namespace Namespace where Grafana dashboard ConfigMap is deployed - ## - namespace: "" - - ## Sealed Secret Metrics service parameters - ## - service: - ## @param metrics.service.type Sealed Secret Metrics service type - ## - type: ClusterIP - ## @param metrics.service.port Sealed Secret service Metrics HTTP port - ## - port: 8081 - ## @param metrics.service.nodePort Node port for HTTP - ## Specify the nodePort value for the LoadBalancer and NodePort service types - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## NOTE: choose port between <30000-32767> - ## - nodePort: "" - ## @param metrics.service.annotations [object] Additional custom annotations for Sealed Secret Metrics service - ## - annotations: {} - -## @section PodDisruptionBudget Parameters - -pdb: - ## @param pdb.create Specifies whether a PodDisruptionBudget should be created - ## - create: false - ## @param pdb.minAvailable The minimum number of pods (non number to omit) - ## - minAvailable: 1 - ## @param pdb.maxUnavailable The maximum number of unavailable pods (non number to omit) - ## - maxUnavailable: "" diff --git a/base-kustomize/skyline/base/hpa-skyline-apiserver.yaml b/base-kustomize/skyline/base/hpa-skyline-apiserver.yaml index a0463cde9..afac254a6 100644 --- a/base-kustomize/skyline/base/hpa-skyline-apiserver.yaml +++ b/base-kustomize/skyline/base/hpa-skyline-apiserver.yaml @@ -16,4 +16,4 @@ spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment - name: skyline-apiserver + name: skyline diff --git a/base-kustomize/skyline/base/kustomization.yaml b/base-kustomize/skyline/base/kustomization.yaml index 4fe23fcd1..bae20526c 100644 --- a/base-kustomize/skyline/base/kustomization.yaml +++ b/base-kustomize/skyline/base/kustomization.yaml @@ -7,4 +7,3 @@ resources: - deployment-apiserver.yaml - hpa-skyline-apiserver.yaml - pdb-apiserver.yaml - - skyline-routes.yaml diff --git a/base-kustomize/skyline/base/skyline-routes.yaml b/base-kustomize/skyline/base/skyline-routes.yaml deleted file mode 100644 index cba783ddc..000000000 --- a/base-kustomize/skyline/base/skyline-routes.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: skyline-api - namespace: openstack - labels: - application: gateway-api - service: HTTPRoute - route: skyline -spec: - parentRefs: - - name: flex-gateway - sectionName: cluster-tls - namespace: nginx-gateway - hostnames: - - "skyline.cluster.local" - rules: - - backendRefs: - - name: skyline-apiserver - port: 9999 diff --git a/base-kustomize/topolvm/base/kustomization.yaml b/base-kustomize/topolvm/base/kustomization.yaml new file mode 100644 index 000000000..10663bac8 --- /dev/null +++ b/base-kustomize/topolvm/base/kustomization.yaml @@ -0,0 +1,4 @@ +sortOptions: + order: fifo +resources: + - all.yaml diff --git a/base-kustomize/topolvm/general/kustomization.yaml b/base-kustomize/topolvm/general/kustomization.yaml deleted file mode 100644 index 15bfc5e1f..000000000 --- a/base-kustomize/topolvm/general/kustomization.yaml +++ /dev/null @@ -1,52 +0,0 @@ -sortOptions: - order: fifo -resources: - - ns-topolvm.yaml - -helmCharts: - - name: topolvm - releaseName: topolvm - valuesInline: - controller: - replicaCount: 1 - nodeSelector: - node-role.kubernetes.io/control-plane: "" - scheduler: - nodeSelector: - node-role.kubernetes.io/control-plane: "" - cert-manager: - enabled: false - storageClasses: - - name: general # Defines name of storage class. - storageClass: - fsType: xfs # Supported filesystems are: ext4, xfs, and btrfs. - # reclaimPolicy - reclaimPolicy: # Delete - # Additional annotations - annotations: {} - # Default storage class for dynamic volume provisioning - # ref: https://kubernetes.io/docs/concepts/storage/dynamic-provisioning - isDefaultClass: true - # volumeBindingMode can be either WaitForFirstConsumer or Immediate. WaitForFirstConsumer is recommended because TopoLVM cannot schedule pods wisely if volumeBindingMode is Immediate. - volumeBindingMode: Immediate - # enables CSI drivers to expand volumes. This feature is available for Kubernetes 1.16 and later releases. - allowVolumeExpansion: true - additionalParameters: - topolvm.io/device-class: "general" - # mount options - mountOptions: [] - # lvmd service - lvmd: - # lvmd.managed -- If true, set up lvmd service with DaemonSet. - managed: true - # lvmd.socketName -- Specify socketName. - socketName: /run/topolvm/lvmd.sock - # lvmd.deviceClasses -- Specify the device-class settings. - deviceClasses: - - name: general - volume-group: vg-general - default: true - spare-gb: 10 - includeCRDs: true - namespace: topolvm-system - repo: https://topolvm.github.io/topolvm diff --git a/base-kustomize/topolvm/general/ns-topolvm.yaml b/base-kustomize/topolvm/general/ns-topolvm.yaml deleted file mode 100644 index bf0b97fa7..000000000 --- a/base-kustomize/topolvm/general/ns-topolvm.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - kubernetes.io/metadata.name: topolvm-system - name: topolvm-system - name: topolvm-system diff --git a/bin/chart-install-meta.yaml b/bin/chart-install-meta.yaml index ec966216f..61552eba6 100644 --- a/bin/chart-install-meta.yaml +++ b/bin/chart-install-meta.yaml @@ -135,3 +135,12 @@ prometheus-postgres-exporter: version: 6.0.0 valuesFiles: - values.yaml + +prometheus-snmp-exporter: + name: prometheus-snmp-exporter + repoName: prometheus-community + repo: https://prometheus-community.github.io/helm-charts + releaseName: prometheus-snmp-exporter + namespace: prometheus + valuesFiles: + - values.yaml diff --git a/bin/install-argocd.sh b/bin/install-argocd.sh new file mode 100644 index 000000000..034a2692b --- /dev/null +++ b/bin/install-argocd.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Default parameter value +TARGET=${1:-base} + +# Directory to check for YAML files +CONFIG_DIR="/etc/genestack/helm-configs/argocd" + +# Helm command setup +HELM_CMD="helm upgrade --install argocd oci://registry-1.docker.io/bitnamicharts/argo-cd \ + --namespace=argocd \ + --timeout 120m \ + --post-renderer /etc/genestack/kustomize/kustomize.sh \ + --post-renderer-args argocd/${TARGET} \ + -f /opt/genestack/base-helm-configs/argocd/helm-argocd-overrides.yaml" + +# Check if YAML files exist in the specified directory +if compgen -G "${CONFIG_DIR}/*.yaml" > /dev/null; then + # Add all YAML files from the directory to the helm command + for yaml_file in "${CONFIG_DIR}"/*.yaml; do + HELM_CMD+=" -f ${yaml_file}" + done +fi + +# Run the helm command +echo "Executing Helm command:" +echo "${HELM_CMD}" +eval "${HELM_CMD}" diff --git a/bin/install-barbican.sh b/bin/install-barbican.sh index 49040975c..503145ac5 100755 --- a/bin/install-barbican.sh +++ b/bin/install-barbican.sh @@ -3,9 +3,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" CONFIG_DIR="/etc/genestack/helm-configs/barbican" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install barbican ./barbican \ +HELM_CMD="helm upgrade --install barbican openstack-helm/barbican --version 2024.2.208+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -33,9 +31,9 @@ HELM_CMD+=" --set conf.barbican.keystone_authtoken.memcache_secret_key=\"$(kubec HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args barbican/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" - eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-ceilometer.sh b/bin/install-ceilometer.sh index 531b34ac5..36aef0eda 100755 --- a/bin/install-ceilometer.sh +++ b/bin/install-ceilometer.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/ceilometer" BASE_OVERRIDES="/opt/genestack/base-helm-configs/ceilometer/ceilometer-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install ceilometer ./ceilometer \ +HELM_CMD="helm upgrade --install ceilometer openstack-helm/ceilometer --version 2024.2.115+13651f45-628a320c \ --namespace=openstack \ --timeout 10m" @@ -34,18 +32,21 @@ HELM_CMD+=" --set conf.ceilometer.oslo_messaging.transport_url=\"rabbit://ceilom HELM_CMD+=" --set conf.ceilometer.notification.messaging_urls.values=\"{\ rabbit://ceilometer:\$(kubectl --namespace openstack get secret ceilometer-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/ceilometer,\ -rabbit://cinder:\$(kubectl --namespace openstack get secret cinder-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/cinder,\ -rabbit://glance:\$(kubectl --namespace openstack get secret glance-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/glance,\ -rabbit://heat:\$(kubectl --namespace openstack get secret heat-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/heat,\ rabbit://keystone:\$(kubectl --namespace openstack get secret keystone-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/keystone,\ +rabbit://glance:\$(kubectl --namespace openstack get secret glance-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/glance,\ +rabbit://nova:\$(kubectl --namespace openstack get secret nova-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/nova,\ rabbit://neutron:\$(kubectl --namespace openstack get secret neutron-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/neutron,\ -rabbit://nova:\$(kubectl --namespace openstack get secret nova-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/nova}\"" +rabbit://cinder:\$(kubectl --namespace openstack get secret cinder-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/cinder,\ +rabbit://heat:\$(kubectl --namespace openstack get secret heat-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/heat,\ +rabbit://octavia:\$(kubectl --namespace openstack get secret octavia-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/octavia,\ +rabbit://magnum:\$(kubectl --namespace openstack get secret magnum-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)@rabbitmq.openstack.svc.cluster.local:5672/magnum}\"" HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args ceilometer/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-cinder.sh b/bin/install-cinder.sh index 75da36a32..cf286b73e 100755 --- a/bin/install-cinder.sh +++ b/bin/install-cinder.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/cinder" BASE_OVERRIDES="/opt/genestack/base-helm-configs/cinder/cinder-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install cinder ./cinder \ +HELM_CMD="helm upgrade --install cinder openstack-helm/cinder --version 2024.2.409+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -33,8 +31,9 @@ HELM_CMD+=" --set endpoints.oslo_messaging.auth.cinder.password=\"\$(kubectl --n HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args cinder/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-envoy-gateway.sh b/bin/install-envoy-gateway.sh new file mode 100755 index 000000000..f3102550a --- /dev/null +++ b/bin/install-envoy-gateway.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# shellcheck disable=SC2124,SC2145,SC2294 + +GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" +SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/envoyproxy-gateway" +BASE_OVERRIDES="/opt/genestack/base-helm-configs/envoyproxy-gateway/envoy-gateway-helm-overrides.yaml" +ENVOY_VERSION="v1.3.0" +HELM_CMD="helm upgrade --install envoyproxy-gateway oci://docker.io/envoyproxy/gateway-helm \ + --version ${ENVOY_VERSION} \ + --namespace envoyproxy-gateway-system \ + --create-namespace" + +HELM_CMD+=" -f ${BASE_OVERRIDES}" + +for dir in "$GLOBAL_OVERRIDES_DIR" "$SERVICE_CONFIG_DIR"; do + if compgen -G "${dir}/*.yaml" > /dev/null; then + for yaml_file in "${dir}"/*.yaml; do + # Avoid re-adding the base override file if present in the service directory + if [ "${yaml_file}" != "${BASE_OVERRIDES}" ]; then + HELM_CMD+=" -f ${yaml_file}" + fi + done + fi +done + +HELM_CMD+=" $@" + +echo "Executing Helm command:" +echo "${HELM_CMD}" +eval "${HELM_CMD}" + +# Install egctl +if [ ! -f "/usr/local/bin/egctl" ]; then + sudo mkdir -p /opt/egctl-install + pushd /opt/egctl-install || exit 1 + sudo wget "https://github.com/envoyproxy/gateway/releases/download/${ENVOY_VERSION}/egctl_${ENVOY_VERSION}_linux_amd64.tar.gz" -O egctl.tar.gz + sudo tar -xvf egctl.tar.gz + sudo install -o root -g root -m 0755 bin/linux/amd64/egctl /usr/local/bin/egctl + /usr/local/bin/egctl completion bash > /tmp/egctl.bash + sudo mv /tmp/egctl.bash /etc/bash_completion.d/egctl + popd || exit 1 +fi diff --git a/bin/install-glance.sh b/bin/install-glance.sh index 7886e3926..18a34d9ce 100755 --- a/bin/install-glance.sh +++ b/bin/install-glance.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/glance" BASE_OVERRIDES="/opt/genestack/base-helm-configs/glance/glance-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install glance ./glance \ +HELM_CMD="helm upgrade --install glance openstack-helm/glance --version 2024.2.396+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -33,8 +31,9 @@ HELM_CMD+=" --set endpoints.oslo_messaging.auth.glance.password=\"\$(kubectl --n HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args glance/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-gnocchi.sh b/bin/install-gnocchi.sh index 8f5a3035f..ad40af45b 100755 --- a/bin/install-gnocchi.sh +++ b/bin/install-gnocchi.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/gnocchi" BASE_OVERRIDES="/opt/genestack/base-helm-configs/gnocchi/gnocchi-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm-infra || exit 1 - -HELM_CMD="helm upgrade --install gnocchi ./gnocchi \ +HELM_CMD="helm upgrade --install gnocchi openstack-helm-infra/gnocchi --version 2024.2.50+628a320c \ --namespace=openstack \ --timeout 10m" @@ -33,8 +31,9 @@ HELM_CMD+=" --set endpoints.oslo_db_postgresql.auth.gnocchi.password=\"\$(kubect HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args gnocchi/overlay $*" +helm repo add openstack-helm-infra https://tarballs.opendev.org/openstack/openstack-helm-infra +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-heat.sh b/bin/install-heat.sh index 826da2f84..8ca2eee50 100755 --- a/bin/install-heat.sh +++ b/bin/install-heat.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/heat" BASE_OVERRIDES="/opt/genestack/base-helm-configs/heat/heat-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install heat ./heat \ +HELM_CMD="helm upgrade --install heat openstack-helm/heat --version 2024.2.294+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -35,8 +33,9 @@ HELM_CMD+=" --set endpoints.oslo_messaging.auth.heat.password=\"\$(kubectl --nam HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args heat/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-horizon.sh b/bin/install-horizon.sh index 5e2ad6cc9..ceba991ef 100755 --- a/bin/install-horizon.sh +++ b/bin/install-horizon.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/horizon" BASE_OVERRIDES="/opt/genestack/base-helm-configs/horizon/horizon-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install horizon ./horizon \ +HELM_CMD="helm upgrade --install horizon openstack-helm/horizon --version 2024.2.264+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -32,8 +30,9 @@ HELM_CMD+=" --set endpoints.oslo_db.auth.horizon.password=\"\$(kubectl --namespa HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args horizon/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-keystone.sh b/bin/install-keystone.sh index 99ee6f7d2..84d3d99c3 100755 --- a/bin/install-keystone.sh +++ b/bin/install-keystone.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/keystone" BASE_OVERRIDES="/opt/genestack/base-helm-configs/keystone/keystone-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install keystone ./keystone \ +HELM_CMD="helm upgrade --install keystone openstack-helm/keystone --version 2024.2.386+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -34,8 +32,9 @@ HELM_CMD+=" --set endpoints.oslo_messaging.auth.keystone.password=\"\$(kubectl - HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args keystone/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-kube-ovn.sh b/bin/install-kube-ovn.sh index 4071dc432..f7fe1e867 100755 --- a/bin/install-kube-ovn.sh +++ b/bin/install-kube-ovn.sh @@ -4,7 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/kube-ovn" BASE_OVERRIDES="/opt/genestack/base-helm-configs/kube-ovn/kube-ovn-helm-overrides.yaml" -KUBE_OVN_VERSION="v1.12.30" +KUBE_OVN_VERSION="v1.12.31" MASTER_NODES=$(kubectl get nodes -l kube-ovn/role=master -o json | jq -r '[.items[].status.addresses[] | select(.type == "InternalIP") | .address] | join(",")' | sed 's/,/\\,/g') MASTER_NODE_COUNT=$(kubectl get nodes -l kube-ovn/role=master -o json | jq -r '.items[].status.addresses[] | select(.type=="InternalIP") | .address' | wc -l) diff --git a/bin/install-libvirt.sh b/bin/install-libvirt.sh index 572c6b62f..ca4d6dc59 100755 --- a/bin/install-libvirt.sh +++ b/bin/install-libvirt.sh @@ -5,9 +5,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/libvirt" BASE_OVERRIDES="/opt/genestack/base-helm-configs/libvirt/libvirt-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm-infra || exit 1 - -HELM_CMD="helm upgrade --install libvirt ./libvirt \ +HELM_CMD="helm upgrade --install libvirt openstack-helm-infra/libvirt --version 2024.2.92+628a320c \ --namespace=openstack \ --timeout 120m" @@ -26,8 +24,9 @@ done HELM_CMD+=" $@" +helm repo add openstack-helm-infra https://tarballs.opendev.org/openstack/openstack-helm-infra +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-magnum.sh b/bin/install-magnum.sh index 4678b1204..89a5b7d7d 100755 --- a/bin/install-magnum.sh +++ b/bin/install-magnum.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/magnum" BASE_OVERRIDES="/opt/genestack/base-helm-configs/magnum/magnum-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install magnum ./magnum \ +HELM_CMD="helm upgrade --install magnum openstack-helm/magnum --version 2024.2.157+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -35,8 +33,9 @@ HELM_CMD+=" --set conf.magnum.keystone_authtoken.memcache_secret_key=\"\$(kubect HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args magnum/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-neutron.sh b/bin/install-neutron.sh index 66a11d88a..40df28bd5 100755 --- a/bin/install-neutron.sh +++ b/bin/install-neutron.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/neutron" BASE_OVERRIDES="/opt/genestack/base-helm-configs/neutron/neutron-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install neutron ./neutron \ +HELM_CMD="helm upgrade --install neutron openstack-helm/neutron --version 2024.2.529+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -46,8 +44,9 @@ HELM_CMD+=" --set conf.plugins.ml2_conf.ovn.ovn_sb_connection=\"tcp:\$(kubectl - HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args neutron/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-nginx-gateway.sh b/bin/install-nginx-gateway.sh new file mode 100755 index 000000000..a3fdd402b --- /dev/null +++ b/bin/install-nginx-gateway.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# shellcheck disable=SC2124,SC2145,SC2294 + +GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" +SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/nginx-gateway-fabric" +BASE_OVERRIDES="/opt/genestack/base-helm-configs/nginx-gateway-fabric/helm-overrides.yaml" +NGINX_VERSION="1.4.0" +HELM_CMD="helm upgrade --install nginx-gateway-fabric oci://ghcr.io/nginx/charts/nginx-gateway-fabric \ + --create-namespace \ + --namespace=nginx-gateway \ + --post-renderer /etc/genestack/kustomize/kustomize.sh \ + --post-renderer-args gateway/overlay \ + --version ${NGINX_VERSION}" + +HELM_CMD+=" -f ${BASE_OVERRIDES}" + +for dir in "$GLOBAL_OVERRIDES_DIR" "$SERVICE_CONFIG_DIR"; do + if compgen -G "${dir}/*.yaml" > /dev/null; then + for yaml_file in "${dir}"/*.yaml; do + # Avoid re-adding the base override file if present in the service directory + if [ "${yaml_file}" != "${BASE_OVERRIDES}" ]; then + HELM_CMD+=" -f ${yaml_file}" + fi + done + fi +done + +HELM_CMD+=" $@" + +kubectl kustomize "https://github.com/nginxinc/nginx-gateway-fabric/config/crd/gateway-api/standard?ref=v${NGINX_VERSION}" | kubectl apply -f - + +kubectl apply -f /opt/genestack/manifests/nginx-gateway/nginx-gateway-namespace.yaml + +echo "Executing Helm command:" +echo "${HELM_CMD}" +eval "${HELM_CMD}" diff --git a/bin/install-nova.sh b/bin/install-nova.sh index 521ed5a81..16cda0001 100755 --- a/bin/install-nova.sh +++ b/bin/install-nova.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/nova" BASE_OVERRIDES="/opt/genestack/base-helm-configs/nova/nova-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install nova ./nova \ +HELM_CMD="helm upgrade --install nova openstack-helm/nova --version 2024.2.555+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -49,8 +47,9 @@ HELM_CMD+=" --set network.ssh.private_key=\"\$(kubectl -n openstack get secret n HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args nova/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-octavia.sh b/bin/install-octavia.sh index 11c777108..c5fac40b8 100755 --- a/bin/install-octavia.sh +++ b/bin/install-octavia.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/octavia" BASE_OVERRIDES="/opt/genestack/base-helm-configs/octavia/octavia-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install octavia ./octavia \ +HELM_CMD="helm upgrade --install octavia openstack-helm/octavia --version 2024.2.30+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -39,8 +37,9 @@ HELM_CMD+=" --set conf.octavia.ovn.ovn_sb_connection=\"tcp:\$(kubectl --namespac HELM_CMD+=" --post-renderer /etc/genestack/kustomize/kustomize.sh" HELM_CMD+=" --post-renderer-args octavia/overlay $*" +helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm +helm repo update + echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-placement.sh b/bin/install-placement.sh index 6e7b0348b..60dce947c 100755 --- a/bin/install-placement.sh +++ b/bin/install-placement.sh @@ -4,9 +4,7 @@ GLOBAL_OVERRIDES_DIR="/etc/genestack/helm-configs/global_overrides" SERVICE_CONFIG_DIR="/etc/genestack/helm-configs/placement" BASE_OVERRIDES="/opt/genestack/base-helm-configs/placement/placement-helm-overrides.yaml" -pushd /opt/genestack/submodules/openstack-helm || exit 1 - -HELM_CMD="helm upgrade --install placement ./placement \ +HELM_CMD="helm upgrade --install placement openstack-helm/placement --version 2024.2.62+13651f45-628a320c \ --namespace=openstack \ --timeout 120m" @@ -38,5 +36,3 @@ HELM_CMD+=" --post-renderer-args placement/overlay $*" echo "Executing Helm command:" echo "${HELM_CMD}" eval "${HELM_CMD}" - -popd || exit 1 diff --git a/bin/install-sealed-secrets.sh b/bin/install-sealed-secrets.sh new file mode 100644 index 000000000..12f71ba1d --- /dev/null +++ b/bin/install-sealed-secrets.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +# Default parameter value +TARGET=${1:-base} + +# Directory to check for YAML files +CONFIG_DIR="/etc/genestack/helm-configs/sealed-secrets" + +# Helm command setup +HELM_CMD="helm upgrade --install argocd oci://registry-1.docker.io/bitnamicharts/sealed-secrets \ + --namespace=sealed-secrets \ + --timeout 120m \ + --post-renderer /etc/genestack/kustomize/kustomize.sh \ + --post-renderer-args sealed-secrets/${TARGET} \ + -f /opt/genestack/base-helm-configs/sealed-secrets/helm-sealed-secrets-overrides.yaml" + +# Check if YAML files exist in the specified directory +if compgen -G "${CONFIG_DIR}/*.yaml" > /dev/null; then + # Add all YAML files from the directory to the helm command + for yaml_file in "${CONFIG_DIR}"/*.yaml; do + HELM_CMD+=" -f ${yaml_file}" + done +fi + +# Run the helm command +echo "Executing Helm command:" +echo "${HELM_CMD}" +eval "${HELM_CMD}" diff --git a/bin/install-template.sh b/bin/install-template.sh deleted file mode 100755 index 128d2ad43..000000000 --- a/bin/install-template.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# shellcheck disable=SC2124,SC2145,SC2294 - -# Directory to check for YAML files -ALL_DIR="/etc/genestack/helm-configs/all" -CONFIG_DIR="/etc/genestack/helm-configs/libvirt" - -pushd /opt/genestack/submodules/openstack-helm || exit - -# Base helm upgrade command -HELM_CMD="helm upgrade --install libvirt ./libvirt \ - --namespace=openstack \ - --timeout 120m" - -# Add the base overrides file -HELM_CMD+=" -f /opt/genestack/base-helm-configs/libvirt/libvirt-helm-overrides.yaml" - -# Check if YAML files exist in the specified directory -if compgen -G "${ALL_DIR}/*.yaml" > /dev/null; then - # Append all YAML files from the directory to the helm command - for yaml_file in "${ALL_DIR}"/*.yaml; do - HELM_CMD+=" -f ${yaml_file}" - done -fi - -# Check if YAML files exist in the specified directory -if compgen -G "${CONFIG_DIR}/*.yaml" > /dev/null; then - # Append all YAML files from the directory to the helm command - for yaml_file in "${CONFIG_DIR}"/*.yaml; do - HELM_CMD+=" -f ${yaml_file}" - done -fi - -HELM_CMD+=" $@" - -# Run the helm command -echo "Executing Helm command:" -echo "${HELM_CMD}" -eval "${HELM_CMD}" - -popd || exit diff --git a/bin/install-topolvm.sh b/bin/install-topolvm.sh new file mode 100755 index 000000000..8d04094b6 --- /dev/null +++ b/bin/install-topolvm.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Default parameter value +TARGET=${1:-base} + +# Directory to check for YAML files +CONFIG_DIR="/etc/genestack/helm-configs/topolvm" + +# Add the topolvm helm repository +helm repo add topolvm https://topolvm.github.io/topolvm +helm repo update + +# Helm command setup +HELM_CMD="helm upgrade --install topolvm topolvm/topolvm \ + --create-namespace --namespace=topolvm-system \ + --timeout 120m \ + --post-renderer /etc/genestack/kustomize/kustomize.sh \ + --post-renderer-args topolvm/${TARGET} \ + -f /opt/genestack/base-helm-configs/topolvm/helm-topolvm-overrides.yaml" + +# Check if YAML files exist in the specified directory +if compgen -G "${CONFIG_DIR}/*.yaml" > /dev/null; then + # Add all YAML files from the directory to the helm command + for yaml_file in "${CONFIG_DIR}"/*.yaml; do + HELM_CMD+=" -f ${yaml_file}" + done +fi + +# Run the helm command +echo "Executing Helm command:" +echo "${HELM_CMD}" +eval "${HELM_CMD}" diff --git a/bin/setup-envoy-gateway.sh b/bin/setup-envoy-gateway.sh new file mode 100755 index 000000000..872d461b7 --- /dev/null +++ b/bin/setup-envoy-gateway.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# shellcheck disable=SC2045,SC2124,SC2145,SC2164,SC2236,SC2294 + +if [ -z "${ACME_EMAIL}" ]; then + read -rp "Enter a valid email address for use with ACME, press enter to skip: " ACME_EMAIL +fi + +if [ -z "${GATEWAY_DOMAIN}" ]; then + echo "The domain name for the gateway is required, if you do not have a domain name press enter to use the default" + read -rp "Enter the domain name for the gateway [cluster.local]: " GATEWAY_DOMAIN + export GATEWAY_DOMAIN=${GATEWAY_DOMAIN:-cluster.local} +fi + +if [ -z "${GATEWAY_DOMAIN}" ]; then + echo "Gateway domain is required" + exit 1 +fi + +kubectl apply -k /etc/genestack/kustomize/envoyproxy-gateway/overlay + +echo "Waiting for the gateway to be programmed" +kubectl -n envoy-gateway wait --timeout=5m gateways.gateway.networking.k8s.io flex-gateway --for=condition=Programmed + +if [ ! -z "${ACME_EMAIL}" ]; then + cat < "/tmp/${route}" + sed -i 's/namespace: nginx-gateway/namespace: envoy-gateway/g' "/tmp/${route}" + sudo mv -v "/tmp/${route}" "/etc/genestack/gateway-api/routes/${route}" +done + +kubectl apply -f /etc/genestack/gateway-api/routes + +sudo mkdir -p /etc/genestack/gateway-api/listeners +for listener in $(ls -1 /opt/genestack/etc/gateway-api/listeners); do + sed "s/your.domain.tld/${GATEWAY_DOMAIN}/g" "/opt/genestack/etc/gateway-api/listeners/${listener}" > "/tmp/${listener}" + sudo mv -v "/tmp/${listener}" "/etc/genestack/gateway-api/listeners/${listener}" +done + +kubectl patch -n envoy-gateway gateway flex-gateway \ + --type='json' \ + --patch="$(jq -s 'flatten | .' /etc/genestack/gateway-api/listeners/*)" + +echo "Setup Complete" diff --git a/bin/setup-infrastructure.sh b/bin/setup-infrastructure.sh new file mode 100755 index 000000000..de237b1d2 --- /dev/null +++ b/bin/setup-infrastructure.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2124,SC2145,SC2294,SC2086,SC2087,SC2155 +set -e + +set -o pipefail + +if [ -z "${ACME_EMAIL}" ]; then + read -rp "Enter a valid email address for use with ACME, press enter to skip: " ACME_EMAIL + export ACME_EMAIL="${ACME_EMAIL:-}" +fi + +if [ -z "${GATEWAY_DOMAIN}" ]; then + echo "The domain name for the gateway is required, if you do not have a domain name press enter to use the default" + read -rp "Enter the domain name for the gateway [cluster.local]: " GATEWAY_DOMAIN + export GATEWAY_DOMAIN="${GATEWAY_DOMAIN:-cluster.local}" +fi + +if [ "${HYPERCONVERGED:-false}" = "true" ]; then + kubectl label node --all openstack-control-plane=enabled \ + openstack-compute-node=enabled \ + openstack-network-node=enabled \ + openstack-storage-node=enabled \ + node-role.kubernetes.io/worker=worker +else + LABEL_FAIL=0 + for label in openstack-control-plane=enabled \ + openstack-compute-node=enabled \ + openstack-network-node=enabled \ + openstack-storage-node=enabled \ + node-role.kubernetes.io/worker=worker; do + if [ -z "$(kubectl get nodes -l "${label}" -o name)" ]; then + echo "[FAILURE] No nodes with the label ${label} found, please label the nodes you want to use for the OpenStack deployment" + LABEL_FAIL=1 + fi + done + if [ "${LABEL_FAIL}" -eq 1 ]; then + exit 1 + fi +fi + +kubectl label node -l beta.kubernetes.io/os=linux kubernetes.io/os=linux +kubectl label node -l node-role.kubernetes.io/control-plane kube-ovn/role=master +kubectl label node -l ovn.kubernetes.io/ovs_dp_type!=userspace ovn.kubernetes.io/ovs_dp_type=kernel +kubectl label node -l node-role.kubernetes.io/control-plane longhorn.io/storage-node=enabled + +if ! kubectl taint nodes -l node-role.kubernetes.io/control-plane node-role.kubernetes.io/control-plane:NoSchedule-; then + echo "Taint already removed" +fi + +if [ -z "${CONTAINER_INTERFACE}" ]; then + export CONTAINER_INTERFACE=$(ip -details -json link show | \ + jq -r '[.[] | if .linkinfo.info_kind // .link_type == "loopback" or + (.ifname | test("idrac+")) then empty else .ifname end ] | .[0]') + echo "[WARNING] The interface for the OVN network is required." + echo " The script will use the default route interface ${CONTAINER_INTERFACE}" +fi + +if [ -z "${CONTAINER_VLAN_INTERFACE}" ]; then + echo "[WARNING] The vlan interface for the OVN network is required." + echo " The script will use the default route interface ${CONTAINER_INTERFACE}" + export CONTAINER_VLAN_INTERFACE="${CONTAINER_INTERFACE}" +fi + +if [ -z "${COMPUTE_INTERFACE}" ]; then + export COMPUTE_INTERFACE=$(ip -details -json link show | \ + jq -r '[.[] | if .linkinfo.info_kind // .link_type == "loopback" or + (.ifname | test("idrac+")) then empty else .ifname end ] | .[-1]') + echo "[WARNING] The interface for the compute network is required." + echo " The script will use the last interface found ${COMPUTE_INTERFACE}" +fi + +if [ "${COMPUTE_INTERFACE}" = "${CONTAINER_INTERFACE}" ]; then + echo "[ERROR] The compute interface cannot be the same as the container interface" + exit 1 +fi + +kubectl annotate \ + nodes \ + -l openstack-compute-node=enabled -l openstack-network-node=enabled \ + ovn.openstack.org/int_bridge='br-int' +kubectl annotate \ + nodes \ + -l openstack-compute-node=enabled -l openstack-network-node=enabled \ + ovn.openstack.org/bridges='br-ex' +kubectl annotate \ + nodes \ + -l openstack-compute-node=enabled -l openstack-network-node=enabled \ + ovn.openstack.org/ports="br-ex:${COMPUTE_INTERFACE}" +kubectl annotate \ + nodes \ + -l openstack-compute-node=enabled -l openstack-network-node=enabled \ + ovn.openstack.org/mappings='physnet1:br-ex' +kubectl annotate \ + nodes \ + -l openstack-compute-node=enabled -l openstack-network-node=enabled \ + ovn.openstack.org/availability_zones='az1' +kubectl annotate \ + nodes \ + -l openstack-network-node=enabled \ + ovn.openstack.org/gateway='enabled' + +# Deploy kube-ovn +if [ ! -f /etc/genestack/helm-configs/kube-ovn/kube-ovn-helm-overrides.yaml ]; then +cat > /etc/genestack/helm-configs/kube-ovn/kube-ovn-helm-overrides.yaml < "/tmp/${route}" + sudo mv -v "/tmp/${route}" "/etc/genestack/gateway-api/routes/${route}" +done + +kubectl apply -f /etc/genestack/gateway-api/routes + +sudo mkdir -p /etc/genestack/gateway-api/listeners +for listener in $(ls -1 /opt/genestack/etc/gateway-api/listeners); do + sed "s/your.domain.tld/${GATEWAY_DOMAIN}/g" "/opt/genestack/etc/gateway-api/listeners/${listener}" > "/tmp/${listener}" + sudo mv -v "/tmp/${listener}" "/etc/genestack/gateway-api/listeners/${listener}" +done + +kubectl patch -n nginx-gateway gateway flex-gateway \ + --type='json' \ + --patch="$(jq -s 'flatten | .' /etc/genestack/gateway-api/listeners/*)" + +echo "Setup Complete" diff --git a/bin/setup-openstack-rc.sh b/bin/setup-openstack-rc.sh new file mode 100755 index 000000000..4f8065443 --- /dev/null +++ b/bin/setup-openstack-rc.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +set -e + +function installYq() { + export VERSION=v4.2.0 + export BINARY=yq_linux_amd64 + wget https://github.com/mikefarah/yq/releases/download/${VERSION}/${BINARY}.tar.gz -q -O - | tar xz && mv ${BINARY} /usr/local/bin/yq +} + +if ! yq --version 2> /dev/null; then + echo "yq is not installed. Attempting to install yq" + installYq +fi + +USER_NAME="$(whoami)" +USER_PATH="$(getent passwd ${USER_NAME} | awk -F':' '{print $6}')" +CONFIG_PATH="${USER_PATH}/.config/openstack" +CONFIG_FILE="${CONFIG_PATH}/genestack-clouds.yaml" + +mkdir -p "${CONFIG_PATH}" + +cat > "${CONFIG_FILE}" <Variable | Description |
Default
| +|----------|-------------|---------| +| `ACME_EMAIL` | Email address for Let's Encrypt. If an email address is defined and a real domain is used, the deployment will attempt to pull production certificates. | "" | +| `GATEWAY_DOMAIN` | Domain name used for routes within the gateway API. If a valid domain is used, it will be associated with the gateway routes. | "cluster.local" | +| `OS_CLOUD` | OpenStack cloud name. | "default" | +| `OS_FLAVOR` | OpenStack instance flavor, this will automatically select a flavor with < 24GiB of RAM. | "gp.X.8.16" | +| `OS_IMAGE` | OpenStack image name. | "Ubuntu 20.04" | +| `HYPERCONVERGED_DEV` | enable hyperconverged development mode. This will attempt to sync a local copy of Genestack to the development environment. | `false` | +| `LAB_NAME_PREFIX` | Prefix for the lab environment. Useful when building multiple labs in a single project | "hyperconverged" | -### Create a VirtualEnv +All of the variables can be defined on the command line using environment variables. -This is optional but always recommended. There are multiple tools for this, pick your poison. +!!! example "Deploying a Hyper-converged Lab Environment with Environment Variables" -### Install Ansible Dependencies + ``` shell + export ACME_EMAIL="user@domain.com" + export GATEWAY_DOMAIN="cluster.local" + export OS_CLOUD="default" + export OS_FLAVOR="gp.0.8.16" + export OS_IMAGE="Ubuntu 20.04" + export HYPERCONVERGED_DEV="false" + /opt/genestack/scripts/hyperconverged-lab.sh + ``` -!!! info +## Overview - Activate your venv if you're using one. +A simple reference architecture for a hyper-converged lab environment is shown below. This environment consists of three nodes +that are connected to a two networks. The networks are connected via a router that provides external connectivity. -``` -pip install ansible openstacksdk -``` - -### Configure openstack client - -The openstacksdk used by the ansible playbook needs a valid configuration to your environment to stand up the test resources. - -An example `clouds.yaml`: - -``` yaml -cache: - auth: true - expiration_time: 3600 -clouds: - dfw: - auth: - auth_url: https://$YOUR_KEYSTONE_HOST/v3 - project_name: $YOUR_PROJECT_ID - project_domain_name: $YOUR_PROJECT_DOMAIN - username: $YOUR_USER - password: $YOUR_PASS - user_domain_name: $YOUR_USER_DOMAIN - region_name: - - DFW3 - interface: public - identity_api_version: "3" -``` +``` mermaid +flowchart TB + %% Define clusters/subgraphs for clarity + subgraph Public_Network + PF["Floating IP
(203.0.113.x)"] + end -See the configuration guide [here](https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html) for more examples. + subgraph Router + TR["hyperconverged-router
(with external gateway)"] + end -## Create a Test Environment + subgraph Hyperconverged_Net + TN["hyperconverged-net
(192.168.100.x)"] + end -!!! abstract + subgraph Hyperconverged_Compute_Net + TCN["hyperconverged-compute-net
(192.168.102.x)"] + end - This is used to deploy new infra on an existing OpenStack cloud. If you're deploying on baremetal this document can be skipped. + %% Hyperconverged Nodes + subgraph Node_0 + HPC0["hyperconverged-0"] + end -If deploying in a lab environment on an OpenStack cloud, you can run the `infra-deploy.yaml` playbook which will create all of the resources needed to operate the test environment. + subgraph Node_1 + HPC1["hyperconverged-1"] + end -Before running the `infra-deploy.yaml` playbook, be sure you have the required ansible collections installed. + subgraph Node_2 + HPC2["hyperconverged-2"] + end -``` shell -ansible-galaxy collection install -r ansible-collection-requirements.yml -``` + %% Connections + PF --> TR + TR --> TN -Move to the ansible playbooks directory within Genestack. + TN -- mgmt port --> HPC0 + TN -- mgmt port --> HPC1 + TN -- mgmt port --> HPC2 -``` shell -cd ansible/playbooks + HPC0 -- compute port --> TCN + HPC1 -- compute port --> TCN + HPC2 -- compute port --> TCN ``` -Run the test infrastructure deployment. - -!!! tip +## Build Phases - Ensure `os_cloud_name` as well as other values within your `infra-deploy.yaml` match a valid cloud name in your openstack configuration as well as resource names within it. +The deployment script will perform the following steps: -!!! note - - Pay close attention to the values for both `kube_ovn_iface` and `kube_ovn_default_interface_name`, they will need to match the desired interface name(s) within your test hosts! - -``` shell -ansible-playbook -i localhost, infra-deploy.yaml -``` - -Here's an example where all of the cloud defaults have been overridden to use known options within an OpenStack Cloud environment. - -``` shell -ansible-playbook -i localhost, infra-deploy.yaml -e os_image_id=Ubuntu-22.04 \ - -e os_cloud_name=dfw \ - -e os_launcher_flavor=m1.small \ - -e os_node_flavor=m1.large -``` +- Create a new OpenStack router +- Create a new OpenStack networks +- Create a new OpenStack security groups +- Create a new OpenStack ports +- Create a new OpenStack keypair +- Create a new OpenStack instance +- Create a new OpenStack floating IP +- Execute the basic Genestack installation -The test infrastructure will create the following OpenStack resources. +## Post Deployment -* Neutron Network/Subnet - * Assign a floating IP -* Cinder Volumes -* Nova Servers +After the deployment is complete, the script will output the internal and external floating IP address information. -The result of the playbook will look something like this. - -![lab-skyline-diagram](assets/images/lab-diagram.png) - -* The first three nodes within the build playbook will be assumed as controllers -* The last three nodes will be assumed to be storage nodes with 3 volumes attached to them each -* All other nodes will be assumed to be compute nodes. - -### Running the deployment - -The lab deployment playbook will build an environment suitable for running Genestack, however, it does not by itself run the full deployment. Once your resources are online, you can login to the "launcher" node and begin running the deployment. To make things fairly simple, the working development directory will be sync'd to the launcher node, along with keys and your generated inventory. - -!!! tip - - If you're wanting to inspect the generated inventory, you can find it in your home directory. - -### SSH to lab - -If you have not set your .ssh config do not forget to put in your path for your openstack-keypair. Your Ip will be present after running the infra-deploy.yaml. - -``` shell -ssh -i /path/to/.ssh/openstack-keypair.key ubuntu@X.X.X.X - -``` +With this information, operators can login to the Genestack instance and begin to explore the platform. ## Demo -[![asciicast](https://asciinema.org/a/629776.svg)](https://asciinema.org/a/629776) +[![asciicast](https://asciinema.org/a/706976.svg)](https://asciinema.org/a/706976) diff --git a/docs/grafana.md b/docs/grafana.md index 655619401..8624cbc0f 100644 --- a/docs/grafana.md +++ b/docs/grafana.md @@ -63,7 +63,7 @@ Before running the deployment script, you must set the `custom_host` value `graf ### Listeners and Routes -Listeners and Routes should have been configureed when you installed the Gateway API. If so some reason they were not created, please following the install guide here: [Gateway API](infrastructure-gateway-api-custom.md) +Listeners and Routes should have been configureed when you installed the Gateway API. If so some reason they were not created, please following the install guide here: [Gateway API](infrastructure-gateway-api.md) ### Deployment diff --git a/docs/infrastructure-argocd.md b/docs/infrastructure-argocd.md new file mode 100644 index 000000000..e46e7255a --- /dev/null +++ b/docs/infrastructure-argocd.md @@ -0,0 +1,16 @@ +# Deploy a argocd + +## Install Argocd + +!!! example "Run the argocd deployment Script `bin/install-argocd.sh`" + + ``` shell + --8<-- "bin/install-argocd.sh" + ``` + + +## Verify readiness with the following command. + +``` shell +kubectl --namespace argocd get horizontalpodautoscaler.autoscaling argocd -w +``` diff --git a/docs/infrastructure-envoy-gateway-api.md b/docs/infrastructure-envoy-gateway-api.md new file mode 100644 index 000000000..415ff5b5f --- /dev/null +++ b/docs/infrastructure-envoy-gateway-api.md @@ -0,0 +1,64 @@ +--- +hide: + - footer +--- + +# Envoy Gateway API + +The [Envoy Gateway](https://gateway.envoyproxy.io/) is an open-source project that provides an implementation +of the Gateway API using Envoyproxy as the data plane. The Gateway API is a set of APIs that allow users to configure +API gateways using a declarative configuration model. + +## Installation + +Run the helm command to install Envoy Gateway. + +??? example "Run the Envoy Gateway deployment Script `/opt/genestack/bin/install-envoy-gateway.sh`" + + ``` shell + --8<-- "bin/install-envoy-gateway.sh" + ``` + +The install script will deploy Envoy Gateway to the `envoy-gateway-system` namespace via Helm. + +## Setup + +??? example "Run the Envoy Gateway setup Script `/opt/genestack/bin/setup-envoy-gateway.sh`" + + ``` shell + --8<-- "bin/setup-envoy-gateway.sh" + ``` + +The setup script will ask the following questions: + +* Enter a valid email address for use with ACME, press enter to skip" +* Enter the domain name for the gateway" + +These values will be used to generate a certificate for the gateway and set the routes used within the flex-gateway, +typically for OpenStack. This script can also be fully automated by providing the required values as arguments. + +!!! example "Run the Envoy Gateway setup Script with arguments" + + ``` shell + ACME_EMAIL="username@your.domain.tld" GATEWAY_DOMAIN="your.domain.tld" /opt/genestack/bin/setup-envoy-gateway.sh + ``` + +## Validation + +At this stage, Envoy Gateway should be operational. To validate the configuration, run the following command. + +``` shell +kubectl -n openstack get httproute +``` + +``` shell +kubectl -n envoy-gateway get gateways.gateway.networking.k8s.io flex-gateway +``` + +## Troubleshooting + +If you encounter any issues, check the logs of the `envoy-gateway` deployment. + +``` shell +kubectl logs -n envoy-gateway-system deployment/envoy-gateway +``` diff --git a/docs/infrastructure-gateway-api.md b/docs/infrastructure-gateway-api.md index 9c6bfdb6e..a47d03284 100644 --- a/docs/infrastructure-gateway-api.md +++ b/docs/infrastructure-gateway-api.md @@ -1,3 +1,8 @@ +--- +hide: + - footer +--- + # Gateway API Gateway API is L4 and L7 layer routing project in Kubernetes. It represents next generation of k8s Ingress, LB and Service Mesh APIs. @@ -15,308 +20,10 @@ For more information on the project see: [Gateway API SIG.](https://gateway-api. External --> External_VIP_Address --> MetalLB_VIP_Address --> Gateway_Service ``` -## Move from Ingress to Gateway APIs - -Since Gateway APIs are successor to Ingress Controllers there needs to be a one time migration from Ingress to GW API resources. - -!!! tip "Learn more about migrating to the Gateway API: [Ingress Migration](https://gateway-api.sigs.k8s.io/guides/migrating-from-ingress/#migrating-from-ingress)" - -## Resource Models in Gateway API - -There are 3 main resource models in gateway apis: - -1. GatewayClass - Mostly managed by a controller. -2. Gateway - An instance of traffic handling infra like a LB. -3. Routes - Defines HTTP-specific rules for mapping traffic from a Gateway listener to a representation of backend network endpoints. - -!!! warning "k8s Gateway API is NOT the same as API Gateways" - -While both sound the same, API Gateway is a more of a general concept that defines a set of resources that exposes capabilities of a backend service but -also provide other functionalities like traffic management, rate limiting, authentication and more. It is geared towards commercial API management and monetisation. - -From the gateway api sig: - -!!! note - - Most Gateway API implementations are API Gateways to some extent, but not all API Gateways are Gateway API implementations. - -## Controller Selection - -There are various implementations of the Gateway API. In this document, we will cover two of them: - -=== "NGINX Gateway Fabric _(Recommended)_" - - [NGINX Gateway Fabric](https://github.com/nginxinc/nginx-gateway-fabric) is an open-source project that provides an implementation of the Gateway - API using nginx as the data plane. - - ### Create the Namespace - - ``` shell - kubectl apply -f /opt/genestack/manifests/nginx-gateway/nginx-gateway-namespace.yaml - ``` - - ### Install the Gateway API Resource from Kubernetes - - === "Stable _(Recommended)_" - - ``` shell - kubectl kustomize "https://github.com/nginxinc/nginx-gateway-fabric/config/crd/gateway-api/standard?ref=v1.4.0" | kubectl apply -f - - ``` - - === "Experimental" - - The experimental version of the Gateway API is available in the `v1.6.1` checkout. Use with caution. - - ``` shell - kubectl kustomize "https://github.com/nginx/nginx-gateway-fabric/config/crd/gateway-api/experimental?ref=v1.6.1" | kubectl apply -f - - ``` - - ### Install the NGINX Gateway Fabric controller - - !!! tip - - If attempting to perform an **upgrade** of an existing Gateway API deployment, note that the Helm install does not automatically upgrade the CRDs for - this resource. To upgrade them, refer to the process outlined by the - [Nginx upgrade documentation](https://docs.nginx.com/nginx-gateway-fabric/installation/installing-ngf/helm/#upgrade-nginx-gateway-fabric-crds). You - can safely ignore this note for new installations. - - === "Stable _(Recommended)_" - - ``` shell - pushd /opt/genestack/submodules/nginx-gateway-fabric/charts || exit 1 - helm upgrade --install nginx-gateway-fabric ./nginx-gateway-fabric \ - --namespace=nginx-gateway \ - --create-namespace \ - -f /opt/genestack/base-helm-configs/nginx-gateway-fabric/helm-overrides.yaml \ - -f /etc/genestack/helm-configs/nginx-gateway-fabric/helm-overrides.yaml \ - --post-renderer /etc/genestack/kustomize/kustomize.sh \ - --post-renderer-args gateway/overlay - popd || exit 1 - ``` - - === "Experimental" - - The experimental version of the Gateway API is available in the `v1.6.1` checkout. Use with caution. - - Update the submodule with the experimental version of the Gateway API. - - Edit the file `/etc/genestack/helm-configs/nginx-gateway-fabric/helm-overrides.yaml`. - - ``` yaml - nginxGateway: - replicaCount: 3 - gwAPIExperimentalFeatures: - enable: true - service: - ## The externalTrafficPolicy of the service. The value Local preserves the client source IP. - externalTrafficPolicy: Cluster - ## The annotations of the NGINX Gateway Fabric service. - annotations: - "metallb.universe.tf/address-pool": "gateway-api-external" - "metallb.universe.tf/allow-shared-ip": "openstack-external-svc" - ``` - - Run the helm command to install the experimental version of the Gateway API. - - ``` shell - helm upgrade --install nginx-gateway-fabric oci://ghcr.io/nginx/charts/nginx-gateway-fabric \ - --create-namespace \ - --namespace=nginx-gateway \ - -f /etc/genestack/helm-configs/nginx-gateway-fabric/helm-overrides.yaml \ - --post-renderer /etc/genestack/kustomize/kustomize.sh \ - --post-renderer-args gateway/overlay \ - --version 1.6.1 - ``` - - Once deployed ensure a system rollout has been completed for Cert Manager. - - ``` shell - kubectl rollout restart deployment cert-manager --namespace cert-manager - ``` - - ### Create the shared gateway resource - - ``` shell - kubectl kustomize /etc/genestack/kustomize/gateway/nginx-gateway-fabric | kubectl apply -f - - ``` - -=== "Envoyproxy" - - [Envoyproxy](https://gateway.envoyproxy.io/) is an open-source project that provides an implementation of the Gateway API using Envoyproxy as the data plane. - - ### Installation - - Update the `/etc/genestack/kustomize/envoyproxy-gateway/base/values.yaml` file according to your requirements. - - Apply the configuration using the following command: - - ``` shell - kubectl kustomize --enable-helm /etc/genestack/kustomize/envoyproxy-gateway/overlay | kubectl apply -f - - ``` - - ### After installation - - You need to create Gateway and HTTPRoute resources based on your requirements - - !!! example "exposing an application using Gateway API (Envoyproxy)" - - In this example, we will demonstrate how to expose an application through a gateway. Apply the Kustomize configuration which will create `Gateway` resource: - - ``` shell - kubectl kustomize /etc/genestack/kustomize/gateway/envoyproxy | kubectl apply -f - - ``` - - Once gateway is created, user can expose an application by creating `HTTPRoute` resource. - - ??? abstract "Sample `HTTPRoute` resource" - - ``` yaml - --8<-- "etc/gateway-api/gateway-envoy-http-routes.yaml" - ``` - - !!! example "Example modifying and apply the routes" - - ``` shell - mkdir -p /etc/genestack/gateway-api - sed 's/your.domain.tld//g' /opt/genestack/etc/gateway-api/gateway-envoy-http-routes.yaml > /etc/genestack/gateway-api/gateway-envoy-http-routes.yaml - kubectl apply -f /etc/genestack/gateway-api/gateway-envoy-http-routes.yaml - ``` - ----- - -## Deploy with Let's Encrypt Certificates - -By default, certificates are issued by an instance of the selfsigned-cluster-issuer. This section focuses on replacing that with a -Let's Encrypt issuer to ensure valid certificates are deployed in our cluster. - -[![asciicast](https://asciinema.org/a/h7npXnDjkSpn3uQtuQwWG9zju.svg)](https://asciinema.org/a/h7npXnDjkSpn3uQtuQwWG9zju) - -### Apply the Let's Encrypt Cluster Issuer - -Before we can have Cert Manager start coordinating Let's Encrypt certificate -requests for us, we need to add an ACME issuer with a valid, monitored -email (for expiration reminders and other important ACME related information). - -``` yaml -read -p "Enter a valid email address for use with ACME: " ACME_EMAIL; \ -cat </g' /opt/genestack/etc/gateway-api/listeners/$listener > /etc/genestack/gateway-api/listeners/$listener - done - ``` - -``` shell -kubectl patch -n nginx-gateway gateway flex-gateway \ - --type='json' \ - --patch="$(jq -s 'flatten | .' /etc/genestack/gateway-api/listeners/*)" -``` - -## Apply Related Gateway routes - -Another example with most of the OpenStack services is located at `/opt/genestack/etc/gateway-api/routes/http-wildcard-listener.yaml`. Similarly, you must modify -and apply them as shown below, or apply your own. - -??? abstract "Example routes file" - - ``` yaml - --8<-- "etc/gateway-api/routes/http-wildcard-listener.yaml" - ``` - -All routes can be found at `/etc/genestack/gateway-api/routes`. - -!!! example "Example modifying all available Gateway routes with `your.domain.tld`" - - ``` shell - mkdir -p /etc/genestack/gateway-api/routes - for route in $(ls -1 /opt/genestack/etc/gateway-api/routes); do - sed 's/your.domain.tld//g' /opt/genestack/etc/gateway-api/routes/$route > /etc/genestack/gateway-api/routes/$route - done - ``` - -``` shell -kubectl apply -f /etc/genestack/gateway-api/routes -``` - -## Patch Gateway with Let's Encrypt Cluster Issuer - -??? abstract "Example patch to enable LetsEncrypt `/etc/genestack/gateway-api/gateway-letsencrypt.yaml`" - - ``` yaml - --8<-- "etc/gateway-api/gateway-letsencrypt.yaml" - ``` - -``` shell -kubectl patch --namespace nginx-gateway \ - --type merge \ - --patch-file /etc/genestack/gateway-api/gateway-letsencrypt.yaml \ - gateway flex-gateway -``` - -## Example Implementation with Prometheus UI (NGINX Gateway Fabric) - -In this example we will look at how Prometheus UI is exposed through the gateway. For other services the gateway kustomization file for the service. - -First, create the shared gateway and then the httproute resource for prometheus. - -??? abstract "Example patch to enable Prometheus `/etc/genestack/gateway-api/gateway-prometheus.yaml`" - - ``` yaml - --8<-- "etc/gateway-api/gateway-prometheus.yaml" - ``` - -!!! example "Example modifying Prometheus' Gateway deployment" - - ``` shell - mkdir -p /etc/genestack/gateway-api - sed 's/your.domain.tld//g' /opt/genestack/etc/gateway-api/gateway-prometheus.yaml > /etc/genestack/gateway-api/gateway-prometheus.yaml - ``` - -``` shell -kubectl apply -f /etc/genestack/gateway-api/gateway-prometheus.yaml -``` - -At this point, flex-gateway has a listener pointed to the port 80 matching *.your.domain.tld hostname. The HTTPRoute resource configures routes -for this gateway. Here, we match all path and simply pass any request from the matching hostname to kube-prometheus-stack-prometheus backend service. +The k8s Gateway API is NOT the same an API Gateway. While both sound the same, API Gateway is a more of a general +concept that defines a set of resources that exposes capabilities of a backend service but also provide other +functionalities like traffic management, rate limiting, authentication and more. It is geared towards commercial +API management and monetisation. ## Cross Namespace Routing @@ -325,3 +32,21 @@ Namespace boundaries. This allows user access control to be applied differently control to different parts of the cluster-wide routing configuration. More information on cross namespace routing can be found [here](https://gateway-api.sigs.k8s.io/guides/multiple-ns/). + +## Resource Models in Gateway API + +| Type | Description | +| ---- | ----------- | +| [GatewayClass](https://gateway-api.sigs.k8s.io/api-types/gatewayclass/) | Represents a class of Gateway instances. | +| [Gateway](https://gateway-api.sigs.k8s.io/api-types/gateway/) | Represents a single Gateway instance. | +| [HTTPRoute](https://gateway-api.sigs.k8s.io/api-types/httproute/) | Represents a set of HTTP-specific rules for mapping traffic to a backend. | +| [Listener](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.Listener) | Represents a network endpoint that can accept incoming traffic. | + +## Choosing a Gateway API Implementation + +Within Genestack, multiple options are available for use as Gateway API implementations. The following table provides a comparison of the available options. + +| Backend Options | Status |
Overview
| +| --------------- | ------ | --------------------------------------- | +| [Envoy](infrastructure-envoy-gateway-api.md) | **Recommended** | Feature rich, large community, recommended for Production environments. | +| [NGINX](infrastructure-nginx-gateway-api.md) | | Stable codebase, simple implementation | diff --git a/docs/gateway-api-ca-issuer.md b/docs/infrastructure-nginx-gateway-api-ca-issuer.md similarity index 98% rename from docs/gateway-api-ca-issuer.md rename to docs/infrastructure-nginx-gateway-api-ca-issuer.md index badf5e6b1..f5e95d5fd 100644 --- a/docs/gateway-api-ca-issuer.md +++ b/docs/infrastructure-nginx-gateway-api-ca-issuer.md @@ -1,4 +1,4 @@ -# Creating a CA issuer for Gateway API +# NGINX Creating a CA issuer for Gateway API By default in Genestack the selfSigned issuer is used to issue certificates to Gateway API listeners. This is a fairly simple issuer to create and requires a very simple yaml manifest. Although the main purpose of the selfSigned issuer to create a local PKI i.e bootstrap a local self-signed CA which can then be used to issue certificates as required. This is helpful for test environments. The selfSigned issuer itself doesn't represent a certificate authority by rather indicates that the certificates will sign themselves. diff --git a/docs/infrastructure-gateway-api-custom.md b/docs/infrastructure-nginx-gateway-api-custom.md similarity index 100% rename from docs/infrastructure-gateway-api-custom.md rename to docs/infrastructure-nginx-gateway-api-custom.md diff --git a/docs/infrastructure-nginx-gateway-api.md b/docs/infrastructure-nginx-gateway-api.md new file mode 100644 index 000000000..e107a316d --- /dev/null +++ b/docs/infrastructure-nginx-gateway-api.md @@ -0,0 +1,57 @@ +--- +hide: + - footer +--- + +# NGINX Gateway API + +The [NGINX Gateway Fabric](https://github.com/nginxinc/nginx-gateway-fabric) is an open-source project that provides an +implementation of the Gateway API using NGINX as the data plane. + +## Installation + +Run the helm command to install NGINX Gateway. + +??? example "Run the NGINX Gateway deployment Script `/opt/genestack/bin/install-nginx-gateway.sh`" + + ``` shell + --8<-- "bin/install-nginx-gateway.sh" + ``` + +The install script will deploy NGINX Gateway to the `nginx-gateway` namespace via Helm. + +## Setup + +??? example "Run the NGINX Gateway setup Script `/opt/genestack/bin/setup-nginx-gateway.sh`" + + ``` shell + --8<-- "bin/setup-nginx-gateway.sh" + ``` + +The setup script will ask the following questions: + +* Enter a valid email address for use with ACME, press enter to skip" +* Enter the domain name for the gateway" + +These values will be used to generate a certificate for the gateway and set the routes used within the flex-gateway, +typically for OpenStack. This script can also be fully automated by providing the required values as arguments. + +!!! example "Run the NGINX Gateway setup Script with arguments" + + ``` shell + ACME_EMAIL="username@your.domain.tld" GATEWAY_DOMAIN="your.domain.tld" /opt/genestack/bin/setup-nginx-gateway.sh + ``` + +## Validation + +At this point, flex-gateway has a listener pointed to the port 80 matching *.your.domain.tld hostname. The +HTTPRoute resource configures routes for this gateway. Here, we match all path and simply pass any request +from the matching hostname to kube-prometheus-stack-prometheus backend service. + +``` shell +kubectl -n openstack get httproute +``` + +``` shell +kubectl -n nginx-gateway get gateways.gateway.networking.k8s.io flex-gateway +``` diff --git a/docs/infrastructure-ovn-db-backup.md b/docs/infrastructure-ovn-db-backup.md index 86866755b..06cdbbc67 100644 --- a/docs/infrastructure-ovn-db-backup.md +++ b/docs/infrastructure-ovn-db-backup.md @@ -84,7 +84,7 @@ spec: - "/usr/bin/sleep" args: - "infinity" - image: docker.io/kubeovn/kube-ovn:v1.11.5 + image: docker.io/kubeovn/kube-ovn:v1.12.30 volumeMounts: - mountPath: /etc/ovn name: host-config-ovn diff --git a/docs/infrastructure-rabbitmq.md b/docs/infrastructure-rabbitmq.md index ab3530534..25c29e209 100644 --- a/docs/infrastructure-rabbitmq.md +++ b/docs/infrastructure-rabbitmq.md @@ -31,3 +31,44 @@ kubectl apply -k /etc/genestack/kustomize/rabbitmq-cluster/overlay ``` shell kubectl --namespace openstack get rabbitmqclusters.rabbitmq.com -w ``` + +## RabbitMQ Operator Monitoring + +RabbitMQ Operator provides ServiceMonitor and PodMonitor CRDs to expose scrape endpoints for rabbitmq +cluster and operator. + +!!! warning + + Make sure Prometheus Operator is deployed prior to running these commands. It will error out if the + rquired CRDs are not already installed. + +Check if the required CRDs are installed + +``` shell +kubectl get customresourcedefinitions.apiextensions.k8s.io servicemonitors.monitoring.coreos.com +``` + +if the CRDs are present you can run the following + +```shell +kubectl apply --filename https://raw.githubusercontent.com/rabbitmq/cluster-operator/main/observability/prometheus/monitors/rabbitmq-servicemonitor.yml + +kubectl apply --filename https://raw.githubusercontent.com/rabbitmq/cluster-operator/main/observability/prometheus/monitors/rabbitmq-cluster-operator-podmonitor.yml +``` + +then, + +```shell +for file in $(curl -s https://api.github.com/repos/rabbitmq/cluster-operator/contents/observability/prometheus/rules/rabbitmq | jq -r '.[].download_url'); do kubectl apply -n prometheus -f $file; done + +for file in $(curl -s https://api.github.com/repos/rabbitmq/cluster-operator/contents/observability/prometheus/rules/rabbitmq-per-object | jq -r '.[].download_url'); do kubectl apply -n prometheus -f $file; done +``` + +In order for these to work we need to also make sure that they match the `ruleSelector` from Prometheus deploy. +For genestack deploys run + +```shell +kubectl get prometheusrule -n prometheus -o name | xargs -I {} kubectl label -n prometheus {} release=kube-prometheus-stack --overwrite +``` +This will get all the rules in prometheus namespace and apply `release=kube-prometheus-stack` label. At this point the alerts will be configured +in prometheus. diff --git a/docs/infrastructure-sealed-secrets.md b/docs/infrastructure-sealed-secrets.md new file mode 100644 index 000000000..493548568 --- /dev/null +++ b/docs/infrastructure-sealed-secrets.md @@ -0,0 +1,16 @@ +# Deploy Sealed Secrets + +## Install sealed secrets + +!!! example "Run the deployment Script `bin/install-sealed-secrets.sh`" + + ``` shell + --8<-- "bin/install-sealed-secrets.sh" + ``` + + +## Verify readiness with the following command. + +``` shell +kubectl --namespace sealed-secrets get horizontalpodautoscaler.autoscaling sealed-secrets -w +``` diff --git a/docs/k8s-kubespray.md b/docs/k8s-kubespray.md index f43ce22cc..0b5d88023 100644 --- a/docs/k8s-kubespray.md +++ b/docs/k8s-kubespray.md @@ -114,7 +114,7 @@ ansible-playbook host-setup.yml ``` shell cd /opt/genestack/submodules/kubespray - ansible-playbook cluster.yml + ansible-playbook cluster.yml --become ``` !!! tip diff --git a/docs/monitoring-getting-started.md b/docs/monitoring-getting-started.md index 03006b4e2..44ddfa4b8 100644 --- a/docs/monitoring-getting-started.md +++ b/docs/monitoring-getting-started.md @@ -43,10 +43,14 @@ Configure the alert manager to send the specified alerts to slack as an example, ### Update alerting rules -Within the genestack repo we can update our alerting rules via the alerting_rules.yaml to fit our needs +Within the genestack repo we can update our custom alerting rules via the alerting_rules.yaml to fit our needs View alerting_rules.yaml in: ``` shell less /etc/genestack/helm-configs/prometheus/alerting_rules.yaml ``` + +However, many opreators comes with ServiceMonitor and PodMonitor services. These services expose, scrape endpoints +out of the box. These operators will also provide alerting rules curated for the specific service. See specific +service install for any monitoring rules. Example: [RabbitMQ Operator Monitoring](infrastructure-rabbitmq.md#rabbitmq-operator-monitoring) diff --git a/docs/monitoring-info.md b/docs/monitoring-info.md index b732d667a..c2b440e96 100644 --- a/docs/monitoring-info.md +++ b/docs/monitoring-info.md @@ -110,7 +110,7 @@ Once we've ran the apply command we will have installed ServiceMonitors for Kube You can view more information about OVN monitoring in the [OVN Monitoring Introduction Docs](ovn-monitoring-introduction.md). * ### Nginx Gateway Monitoring: -Genestack makes use of the [Nginx Gateway Fabric](https://github.com/nginxinc/nginx-gateway-fabric/tree/main/charts/nginx-gateway-fabric) for its implementation of [Kubernetes Gateway API](https://gateway-api.sigs.k8s.io/). Genestack deploys this as part of its infrastructure, view the [Nginx Gateway Deployment Doc](infrastructure-gateway-api.md) for more information. +Genestack makes use of the Gateway API for its implementation of [Kubernetes Gateway API](https://gateway-api.sigs.k8s.io/). Genestack deploys this as part of its infrastructure, view the [Gateway Deployment Doc](infrastructure-gateway-api.md) for more information. Nginx Gateway does expose important metrics for us to gather but it does not do so via a service. Instead we must make use another Prometheus CRD the [PodMonitor](https://prometheus-operator.dev/docs/getting-started/design/#podmonitor). The install is similar to the above OVN monitoring as you can see in the [Nginx Gateway Exporter Deployment Doc](prometheus-nginx-gateway.md). The primary difference is the need to target and match on a pod that's exposing the metrics rather than a service. You can view more information about the metrics exposed by the Nginx Gateway by viewing the [Nginx Gateway Fabric Docs](https://docs.nginx.com/nginx-gateway-fabric/how-to/monitoring/prometheus/). @@ -151,6 +151,13 @@ The [Prometheus Push Gateway](https://github.com/prometheus/pushgateway) is used It's not capable of turning Prometheus into a push-based monitoring system and should only be used when there is no other way to collect the desired metrics. Currently, in Genestack the push gateway is only being used to gather stats from the OVN-Backup CronJob as noted in the [Pushgateway Deployment Doc](prometheus-pushgateway.md). +* ### SNMP Exporter: +The [Prometheus SNMP Exporter](https://github.com/prometheus/snmp_exporter) is +used for gathering SNMP metrics. A default Genestack installation does not make +use of it, so you do not need to install it unless you plan to do additional +configuration beyond Genestack defaults and specifically plan to monitor some +SNMP-enabled devices. + * ### Textfile Collector: It's possible to gather node/host metrics that aren't exposed by any of the above exporters by utilizing the [Node Exporter Textfile Collector](https://github.com/prometheus/node_exporter?tab=readme-ov-file#textfile-collector). Currently, in Genestack the textfile-collector is used to collect kernel-taint stats. To view more information about the textfile-collector and how to deploy your own custom exporter view the [Custom Metrics Deployment Doc](prometheus-custom-node-metrics.md). diff --git a/docs/multi-region-support.md b/docs/multi-region-support.md index d2a2b0939..4c0084894 100644 --- a/docs/multi-region-support.md +++ b/docs/multi-region-support.md @@ -121,35 +121,7 @@ We're going to simply add another `-f` flag below that one to include our overri So, our helm command that we'll run against sjc will now look like: ``` shell -cd /opt/genestack/submodules/openstack-helm - -helm upgrade --install nova ./nova \ - --namespace=openstack \ - --timeout 120m \ - -f /etc/genestack/helm-configs/nova/nova-helm-overrides.yaml \ - -f /etc/genestack/helm-configs/nova/region1-nova-helm-overrides.yaml \ - --set conf.nova.neutron.metadata_proxy_shared_secret="$(kubectl --namespace openstack get secret metadata-shared-secret -o jsonpath='{.data.password}' | base64 -d)" \ - --set endpoints.identity.auth.admin.password="$(kubectl --namespace openstack get secret keystone-admin -o jsonpath='{.data.password}' | base64 -d)" \ - --set endpoints.identity.auth.nova.password="$(kubectl --namespace openstack get secret nova-admin -o jsonpath='{.data.password}' | base64 -d)" \ - --set endpoints.identity.auth.neutron.password="$(kubectl --namespace openstack get secret neutron-admin -o jsonpath='{.data.password}' | base64 -d)" \ - --set endpoints.identity.auth.ironic.password="$(kubectl --namespace openstack get secret ironic-admin -o jsonpath='{.data.password}' | base64 -d)" \ - --set endpoints.identity.auth.placement.password="$(kubectl --namespace openstack get secret placement-admin -o jsonpath='{.data.password}' | base64 -d)" \ - --set endpoints.identity.auth.cinder.password="$(kubectl --namespace openstack get secret cinder-admin -o jsonpath='{.data.password}' | base64 -d)" \ - --set endpoints.oslo_db.auth.admin.password="$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d)" \ - --set endpoints.oslo_db.auth.nova.password="$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)" \ - --set endpoints.oslo_db_api.auth.admin.password="$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d)" \ - --set endpoints.oslo_db_api.auth.nova.password="$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)" \ - --set endpoints.oslo_db_cell0.auth.admin.password="$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d)" \ - --set endpoints.oslo_db_cell0.auth.nova.password="$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)" \ - --set conf.nova.database.slave_connection="mysql+pymysql://nova:$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/nova" \ - --set conf.nova.api_database.slave_connection="mysql+pymysql://nova:$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/nova_api" \ - --set conf.nova.cell0_database.slave_connection="mysql+pymysql://nova:$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/nova_cell0" \ - --set endpoints.oslo_messaging.auth.admin.password="$(kubectl --namespace openstack get secret rabbitmq-default-user -o jsonpath='{.data.password}' | base64 -d)" \ - --set endpoints.oslo_messaging.auth.nova.password="$(kubectl --namespace openstack get secret nova-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)" \ - --set network.ssh.public_key="$(kubectl -n openstack get secret nova-ssh-keypair -o jsonpath='{.data.public_key}' | base64 -d)"$'\n' \ - --set network.ssh.private_key="$(kubectl -n openstack get secret nova-ssh-keypair -o jsonpath='{.data.private_key}' | base64 -d)"$'\n' \ - --post-renderer /etc/genestack/kustomize/kustomize.sh \ - --post-renderer-args nova/overlay +/opt/genestack/bin/install-nova.sh -f /etc/genestack/helm-configs/nova/region1-nova-helm-overrides.yaml ``` Like mentioned above the only difference here is the additional flag to include our custom override and that's it, we can now version custom changes while maintaining upstream parity across many regions and/or staging envorinments! diff --git a/docs/octavia-flavor-and-flavorprofile-guide.md b/docs/octavia-flavor-and-flavorprofile-guide.md index 019ef2842..cef939189 100644 --- a/docs/octavia-flavor-and-flavorprofile-guide.md +++ b/docs/octavia-flavor-and-flavorprofile-guide.md @@ -74,6 +74,14 @@ You can extend the flavor profile with additional provider capabilities as neede $ openstack loadbalancer flavorprofile set --flavor-data '{"loadbalancer_topology": "ACTIVE_STANDBY", "amp_image_tag": "amphora-image-v2", "sriov_vip": false}' 5f4d2c7c-e294-4a9c-b97a-54a2b97a17a5 ``` +!!! note "Loadbalancer Topologies" + + The `loadbalancer_topology` field in the flavor data specifies the number of Amphora instances per + load balancer. The possible values are: + + - `SINGLE`: One Amphora per load balancer. + - `ACTIVE_STANDBY`: Two Amphora per load balancer. + ## Flavors To create a flavor using the previously defined flavor profile, run the following command: diff --git a/docs/openstack-barbican.md b/docs/openstack-barbican.md index 9dcd6dc26..fa81b5358 100644 --- a/docs/openstack-barbican.md +++ b/docs/openstack-barbican.md @@ -26,6 +26,25 @@ OpenStack Barbican is the dedicated security service within the OpenStack ecosys --from-literal=password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" ``` +## Setup Barbican Overrides + +When deploying barbican, it is important to provide the necessary configuration values to ensure that the service is properly +configured and integrated with other OpenStack services. The `/etc/genestack/helm-configs/barbican/barbican-helm-overrides.yaml` +file contains the necessary configuration values for Barbican, including database connection details, RabbitMQ credentials, and other +service-specific settings. By providing these values, you can customize the deployment of Barbican to meet your specific requirements +and ensure that the service operates correctly within your OpenStack environment. + +!!! tip "Set the `host_href` value" + + The `host_href` value should be set to the public endpoint of the Barbican service. This value is used by other OpenStack services and public consumers to communicate with Barbican and should be accessible from all OpenStack services. + + ``` yaml + conf: + barbican: + DEFAULT: + host_href: "https://barbican.your.domain.tld" + ``` + ## Run the package deployment !!! example "Run the Barbican deployment Script `bin/install-barbican.sh`" diff --git a/docs/openstack-glance-images.md b/docs/openstack-glance-images.md index a26ad830a..90c64cf9d 100644 --- a/docs/openstack-glance-images.md +++ b/docs/openstack-glance-images.md @@ -1,4 +1,4 @@ -# Download Images +# Glance Images Overview The following page highlights how to retrieve various images and upload them into Glance. diff --git a/docs/openstack-glance-swift-store.md b/docs/openstack-glance-swift-store.md new file mode 100644 index 000000000..5da1a82fe --- /dev/null +++ b/docs/openstack-glance-swift-store.md @@ -0,0 +1,84 @@ +# Connecting Glance to External Swift + +When operating a cloud environment, it is often necessary to store images in a separate storage system. This can be useful for a number of reasons, such as: + +* To provide a scalable storage solution for images +* To provide a storage solution that is separate from the compute nodes +* To provide a storage solution that is separate from the control plane +* Offsite backups for instances and instance snapshots +* Disaster recovery for instances and instance snapshots + +In this guide, we will show you how to connect Glance to an external Swift storage system. This will allow you to store images in Swift, while still using Glance to manage the images. + +## Prerequisites + +Before you begin, you will need the following: + +* A running OpenStack environment +* A running Swift environment +* A running Glance environment +* The IP address of the Swift server +* The port number of the Swift server +* The username and password for the Swift server + +## Information Needed + +The following information is needed to configure Glance to use Swift as an external storage system. + +| Property | Value | Notes | +| -------- | ----- | ----- | +| KEYSTONE_AUTH_URL | STRING | Keystone V3 or later authentication endpoint where Swift is available within the service catalog | +| SUPER_SECRETE_KEY | STRING | Authentication password or key | +| CLOUD_DOMAIN_NAME | STRING | The domain name associated with the cloud account | +| CLOUD_PROJECT_NAME | STRING | The name of the project where objects will be stored | +| CLOUD_USERNAME | STRING | The username of that will be accessing the cloud project | + +!!! note "For Rackspace OpenStack Flex Users" + + If you're using Rackspace OpenStack Flex, you can use the following options for the swift object storage. + + * `KEYSTONE_AUTH_URL` will be defined as "https://keystone.api.${REGION}.rackspacecloud.com/v3" + * Replace `${REGION}` with the region where the Swift object storage is located, See [Rackspace Cloud Regions](api-status.md) for more information on available regions. + * `CLOUD_DOMAIN_NAME` will be defined as "rackspace_cloud_domain" + +### Step 1: Configure Glance to use Swift + +Update the Helm overrides at `/etc/genestack/helm-configs/glance/glance-helm-overrides.yaml` with the following configuration to connect Glance to Swift. + +``` yaml +--- +conf: + glance: + DEFAULT: + enabled_backends: swift:swift + glance_store: + default_backend: swift + default_store: swift + swift_store: | + [ref1] + auth_address = $KEYSTONE_AUTH_URL + auth_version = 3 + key = $SUPER_SECRETE_KEY + project_domain_id = + project_domain_name = $CLOUD_DOMAIN_NAME + swift_buffer_on_upload = true + swift_store_container = glance + swift_store_create_container_on_put = true + swift_store_endpoint_type = publicURL + swift_store_multi_tenant = false + swift_store_region = SJC3 + swift_upload_buffer_dir = /var/lib/glance/images + user = $CLOUD_PROJECT_NAME:$CLOUD_USERNAME + user_domain_id = + user_domain_name = $CLOUD_DOMAIN_NAME +``` + +### Step 2: Apply the Configuration + +Apply the configuration to the Glance Helm chart. + +``` bash +/opt/genestack/bin/install-glance.sh +``` + +Once the configuration has been applied, Glance will be configured to use Swift as an external storage system. You can now store images in Swift using Glance. diff --git a/docs/openstack-glance.md b/docs/openstack-glance.md index b1e6ea586..6d32bc5e1 100644 --- a/docs/openstack-glance.md +++ b/docs/openstack-glance.md @@ -53,6 +53,12 @@ OpenStack Glance is the image service within the OpenStack ecosystem, responsibl kubectl --namespace openstack exec -ti openstack-admin-client -- openstack image list ``` +!!! genestack "External Image Store" + + If glance will be deployed with an external swift storage backend, review the + [OpenStack Glance Swift Store](openstack-glance-swift-store.md) operator documentation + for additional steps and setup. + ## Demo [![asciicast](https://asciinema.org/a/629806.svg)](https://asciinema.org/a/629806) diff --git a/docs/openstack-helm-make.md b/docs/openstack-helm-make.md deleted file mode 100644 index 8c4d3cde8..000000000 --- a/docs/openstack-helm-make.md +++ /dev/null @@ -1,18 +0,0 @@ -# OpenStack Helm - -Before running a deploy the helm charts need to be built. If the charts for your deployment have already been built, -then there's no need to do it a second time. - -## Install Helm - -While `helm` should already be installed with the **host-setup** playbook, you will need to install helm manually on nodes. There are lots of ways to install helm, check the upstream [docs](https://helm.sh/docs/intro/install/) to learn more about installing helm. - -## Run `make` for our helm components - -``` shell -cd /opt/genestack/submodules/openstack-helm && -make all - -cd /opt/genestack/submodules/openstack-helm-infra && -make all -``` diff --git a/docs/prometheus-monitoring-overview.md b/docs/prometheus-monitoring-overview.md index 119e0756c..d46c3b259 100644 --- a/docs/prometheus-monitoring-overview.md +++ b/docs/prometheus-monitoring-overview.md @@ -19,6 +19,7 @@ Prometheus makes use of various metric exporters used to collect monitoring data * Memcached Exporter(Memcached metrics) * Openstack Exporter(Metrics from various Openstack products) * Pushgateway (metrics from short-lived jobs) +* SNMP exporter (for monitoring with SNMP)
![Prometheus Monitoring Diagram](assets/images/prometheus-monitoring.png){ style="filter:drop-shadow(#3c3c3c 0.5rem 0.5rem 10px);" } diff --git a/docs/prometheus-snmp-exporter.md b/docs/prometheus-snmp-exporter.md new file mode 100644 index 000000000..346ba2024 --- /dev/null +++ b/docs/prometheus-snmp-exporter.md @@ -0,0 +1,20 @@ +# Prometheus SNMP Exporter + +You will not generally need the Prometheus SNMP Exporter unless you have +specific SNMP monitoring needs and take additional steps to configure the +Prometheus SNMP Exporter. The default Genestack configuration doesn't make +immediate use of it without site-specific customization, such as writing an +applicable snmp.conf + +Use the Prometheus SNMP exporter for getting metrics from monitoring with SNMP +into Prometheus. + +#### Install the Prometheus SNMP Exporter Helm Chart + + +``` shell +bin/install-chart.sh prometheus-snmp-exporter +``` + +!!! success + If the installation is successful, you should see the prometheus-snmp-exporter pod running in the prometheus namespace. diff --git a/docs/rackspace-infrastructure-gateway-api.md b/docs/rackspace-infrastructure-nginx-gateway-api.md similarity index 100% rename from docs/rackspace-infrastructure-gateway-api.md rename to docs/rackspace-infrastructure-nginx-gateway-api.md diff --git a/docs/release-notes.md b/docs/release-notes.md new file mode 100644 index 000000000..41380d6d7 --- /dev/null +++ b/docs/release-notes.md @@ -0,0 +1,12 @@ +# Release Notes + +All release notes are generated using [reno](https://docs.openstack.org/reno/latest/). + +To manaually generate your release notes and see this file populated, run the following commands + +``` shell +pip install -r doc-requirements.txt -r dev-requirements.txt +apt update && apt install -y pandoc +reno report -o /tmp/reno.rst +pandoc /tmp/reno.rst -f rst -t markdown -o docs/release-notes.md +``` diff --git a/docs/sdlc.md b/docs/sdlc.md new file mode 100644 index 000000000..48b6c77a4 --- /dev/null +++ b/docs/sdlc.md @@ -0,0 +1,37 @@ +# Genestack SDLC + +![SDLC](assets/images/sdlc.png){align=left : style="max-width:200px"} +Software Development Life Cycle (SDLC) is the process used to ensure high quality solutions are delivered in a predictable, repeatable fashion. The SDLC aims to produce outcomes that meet or exceed customer expectations while reaching completion within time and cost estimates. + +
The process is broken down into 6 distinct phases: +
__Scope__, __Implement__, __Document__, __Test__, __Deployment__, and __Maintain__. + +### Scope + +The scope phase is where new work is identified, based off the stated objective, and the level of effort determined. The plan portion of scope is where the work is assigned to various sprints to ensure timely completion. This step is vital to ensure we are meeting stated goals for the business and the community. + +### Implement + +In the implement phase, development teams use the requirements gathered in the scope phase to create code or process that meets the deliverable ask. + +### Document + +Documentation must reflect the current state of the codebase for any deployed application, service or process. If functionality has been added, removed, or changed the documentation is updated to reflect the change. + +Tl;dr changed something, added something, removed something -- document it. + +### Test + +The test phase is used to ensure that the deliverable is free from defects and meets the specified requirements. This is accomplished via a three-step or phased approach: +
1. Github pre-commit checks +
2. Unit testing against development environment +
3. Functional checks using [Rally](https://opendev.org/openstack/rally) + against our Development and Staging environments + +### Deployment + +In the deployment phase, the development team deploys deliverables using a multi-environment deployment process. This ensures deliverables are tested through a staging environment for functionality and reliability before reaching production. + +### Maintenance + +In the maintenance phase, the team focuses on monitoring the various environments, fixing bugs, and addressing any issues brought forth by customers or stakeholders. diff --git a/docs/storage-ceph-rook-external.md b/docs/storage-ceph-rook-external.md index 8e304a821..2dc1e3228 100644 --- a/docs/storage-ceph-rook-external.md +++ b/docs/storage-ceph-rook-external.md @@ -34,7 +34,7 @@ ceph orch apply mds myfs label:mds ``` shell ./cephadm shell -yum install wget -y ; wget https://raw.githubusercontent.com/rook/rook/release-1.12/deploy/examples/create-external-cluster-resources.py +yum install wget -y ; wget https://raw.githubusercontent.com/rook/rook/release-1.16/deploy/examples/create-external-cluster-resources.py python3 create-external-cluster-resources.py --rbd-data-pool-name general --cephfs-filesystem-name general-multi-attach --namespace rook-ceph-external --format bash ``` @@ -74,7 +74,8 @@ kubectl apply -k /etc/genestack/kustomize/rook-operator/ /opt/genestack/scripts/import-external-cluster.sh helm repo add rook-release https://charts.rook.io/release kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.13.7 -helm install --create-namespace --namespace rook-ceph-external rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f /opt/genestack/submodules/rook/deploy/charts/rook-ceph-cluster/values-external.yaml +wget https://raw.githubusercontent.com/rook/rook/refs/tags/v1.16.5/deploy/charts/rook-ceph-cluster/values-external.yaml -O /etc/genestack/helm-configs/rook-values-external.yaml +helm install --create-namespace --namespace rook-ceph-external rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f /etc/genestack/helm-configs/rook-values-external.yaml kubectl patch storageclass general -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' ``` diff --git a/docs/storage-ceph-rook-internal.md b/docs/storage-ceph-rook-internal.md index 159b8f965..e70cb25e0 100644 --- a/docs/storage-ceph-rook-internal.md +++ b/docs/storage-ceph-rook-internal.md @@ -17,7 +17,7 @@ kubectl apply -k /etc/genestack/kustomize/rook-operator/ example of how one can pin the operator version if so desired. ``` shell - kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.13.7 + kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.16.5 ``` ### Label the Storage Nodes diff --git a/docs/storage-external-block.md b/docs/storage-external-block.md index 0ae757ae8..372af81dd 100644 --- a/docs/storage-external-block.md +++ b/docs/storage-external-block.md @@ -12,7 +12,7 @@ For some Topo/Ceph/NFS are not great fits, Genestack allows for external block d Follow Documentation on getting a storage class presented to k8s, name it "general" and mark that storage class as default, in this example storage is provided by democratic csi driver over iscsi. ``` shell -(genestack) root@genestack-controller1:/opt/genestack/submodules/openstack-helm# kubectl get sc +(genestack) root@genestack-controller1:# kubectl get sc NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE general (default) org.democratic-csi.iscsi Delete Immediate true 3h15m ``` @@ -37,7 +37,7 @@ kubectl apply -k /etc/genestack/kustomize/rook-cluster-external-pvc/ Monitor cluster state, once cluster HEALTH_OK proceed to the next step ``` shell -(genestack) root@genestack-controller1:/opt/genestack/submodules/openstack-helm# kubectl --namespace rook-ceph get cephclusters.ceph.rook.io +(genestack) root@genestack-controller1:# kubectl --namespace rook-ceph get cephclusters.ceph.rook.io NAME DATADIRHOSTPATH MONCOUNT AGE PHASE MESSAGE HEALTH EXTERNAL FSID rook-ceph /var/lib/rook 3 129m Ready Cluster created successfully HEALTH_OK 9a6657cd-f3ab-4d70-b276-a05e2ca03e1b ``` @@ -51,7 +51,7 @@ kubectl apply -k /etc/genestack/kustomize/rook-defaults-external-pvc/ You should now have two storage class providers configured for Genestack ``` shell -(genestack) root@genestack-controller1:/opt/genestack/submodules/openstack-helm# kubectl get sc -A +(genestack) root@genestack-controller1:# kubectl get sc -A NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE general (default) org.democratic-csi.iscsi Delete Immediate true 3h25m general-multi-attach rook-ceph.cephfs.csi.ceph.com Delete Immediate true 85m diff --git a/docs/storage-topolvm.md b/docs/storage-topolvm.md index 571f5fcae..6772de4c1 100644 --- a/docs/storage-topolvm.md +++ b/docs/storage-topolvm.md @@ -11,7 +11,7 @@ The following steps are one way to set it up, however, consult the [documentatio ## Create the target volume group on your hosts -TopoLVM requires access to a volume group on the physical host to work, which means we need to set up a volume group on our hosts. By default, TopoLVM will use the controllers as storage hosts. The genestack Kustomize solution sets the general storage volume group to `vg-general`. This value can be changed within Kustomize found at `kustomize/topolvm/general/kustomization.yaml`. +TopoLVM requires access to a volume group on the physical host to work, which means we need to set up a volume group on our hosts. By default, TopoLVM will use the controllers as storage hosts. The genestack Helm solution sets the general storage volume group to `vg-general`. This value can be changed within Helm overrides file found at `/opt/genestack/base-helm-configs/topolvm/helm-topolvm-overrides.yaml`. !!! example "Simple example showing how to create the needed volume group" @@ -25,6 +25,8 @@ Once the volume group is on your storage nodes, the node is ready for use. ### Deploy the TopoLVM Provisioner -``` shell -kubectl kustomize --enable-helm /etc/genestack/kustomize/topolvm/general | kubectl apply -f - -``` +!!! example "Run the topolvm deployment Script bin/install-topolvm.sh" + + ``` shell + --8<-- "bin/install-topolvm.sh" + ``` diff --git a/etc/gateway-api/gateway-envoy-http-routes.yaml b/etc/gateway-api/gateway-envoy-http-routes.yaml deleted file mode 100644 index 93831c64a..000000000 --- a/etc/gateway-api/gateway-envoy-http-routes.yaml +++ /dev/null @@ -1,17 +0,0 @@ ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: -name: test_application -namespace: test_app -spec: -parentRefs: -- name: flex-gateway - sectionName: http - namespace: envoy-gateway-system -hostnames: -- "test_application.sjc.your.domain.tld" -rules: - - backendRefs: - - name: test_application - port: 8774 diff --git a/base-kustomize/gateway/nginx-gateway-fabric/alertmanager-routes.yaml b/etc/gateway-api/routes/custom-alertmanager-routes.yaml similarity index 88% rename from base-kustomize/gateway/nginx-gateway-fabric/alertmanager-routes.yaml rename to etc/gateway-api/routes/custom-alertmanager-routes.yaml index d1d2c877f..7c61de56f 100644 --- a/base-kustomize/gateway/nginx-gateway-fabric/alertmanager-routes.yaml +++ b/etc/gateway-api/routes/custom-alertmanager-routes.yaml @@ -1,7 +1,7 @@ apiVersion: gateway.networking.k8s.io/v1 kind: HTTPRoute metadata: - name: alertmanger-gateway-route + name: custom-alertmanger-gateway-route namespace: prometheus spec: parentRefs: diff --git a/etc/gateway-api/routes/custom-grafana-routes.yaml b/etc/gateway-api/routes/custom-grafana-routes.yaml index 5cf98baa3..c82b9ed54 100644 --- a/etc/gateway-api/routes/custom-grafana-routes.yaml +++ b/etc/gateway-api/routes/custom-grafana-routes.yaml @@ -6,6 +6,10 @@ metadata: spec: hostnames: - grafana.your.domain.tld + - "grafana.cluster.local" + - "grafana" + - "grafana.grafana" + - "grafana.grafana.svc.cluster.local" parentRefs: - group: gateway.networking.k8s.io kind: Gateway diff --git a/etc/gateway-api/gateway-prometheus.yaml b/etc/gateway-api/routes/custom-prometheus-gateway-route.yaml similarity index 89% rename from etc/gateway-api/gateway-prometheus.yaml rename to etc/gateway-api/routes/custom-prometheus-gateway-route.yaml index 4fe78b965..173feae85 100644 --- a/etc/gateway-api/gateway-prometheus.yaml +++ b/etc/gateway-api/routes/custom-prometheus-gateway-route.yaml @@ -2,7 +2,7 @@ apiVersion: gateway.networking.k8s.io/v1 kind: HTTPRoute metadata: - name: prometheus-gateway-route + name: custom-prometheus-gateway-route namespace: prometheus spec: parentRefs: diff --git a/manifests/metallb/metallb-openstack-service-lb.yml b/manifests/metallb/metallb-openstack-service-lb.yml index 956a2d455..8b0cc6a3d 100644 --- a/manifests/metallb/metallb-openstack-service-lb.yml +++ b/manifests/metallb/metallb-openstack-service-lb.yml @@ -17,12 +17,8 @@ metadata: spec: ipAddressPools: - gateway-api-external - # nodeSelectors: # Optional block to limit nodes for a given advertisement - # - matchLabels: - # kubernetes.io/hostname: controller01.your.domain.tld - # - matchLabels: - # kubernetes.io/hostname: controller02.your.domain.tld - # - matchLabels: - # kubernetes.io/hostname: controller03.your.domain.tld + nodeSelectors: # Optional block to limit nodes for a given advertisement + - matchLabels: + node-role.kubernetes.io/worker: worker # interfaces: # Optional block to limit ifaces used to advertise VIPs # - br-mgmt diff --git a/mkdocs.yml b/mkdocs.yml index ad5413a6e..1d5c000a1 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -133,8 +133,10 @@ nav: - Architecture: genestack-architecture.md - Components: genestack-components.md - Swift Object Storage: openstack-object-storage-swift.md + - Release Notes: release-notes.md - Design Guide: - Introduction: openstack-cloud-design-intro.md + - SDLC: sdlc.md - Cloud Design: - Cloud Topology: openstack-cloud-design-topology.md - Regions: openstack-cloud-design-regions.md @@ -166,7 +168,6 @@ nav: - Kube-OVN: k8s-cni-kube-ovn.md - Retrieve kube config: k8s-config.md - Prometheus: prometheus.md - - OpenStack Preparations: openstack-helm-make.md - Storage: - storage-overview.md - Ceph Internal: storage-ceph-rook-internal.md @@ -179,7 +180,10 @@ nav: - infrastructure-overview.md - Namespace: infrastructure-namespace.md - MetalLB: infrastructure-metallb.md - - Gateway API: infrastructure-gateway-api.md + - Gateway API: + - Gateway API Overview: infrastructure-gateway-api.md + - Envoy Gateway: infrastructure-envoy-gateway-api.md + - NGINX Gateway: infrastructure-nginx-gateway-api.md - MariaDB: - infrastructure-mariadb.md - RabbitMQ: @@ -189,6 +193,7 @@ nav: - OVN: infrastructure-ovn-setup.md - FluentBit: infrastructure-fluentbit.md - Loki: infrastructure-loki.md + - Sealed Secrets: infrastructure-sealed-secrets.md - OpenStack: - openstack-overview.md - OpenStack Services: @@ -231,6 +236,7 @@ nav: - Openstack Exporter: prometheus-openstack-metrics-exporter.md - Blackbox Exporter: prometheus-blackbox-exporter.md - Pushgateway: prometheus-pushgateway.md + - SNMP Exporter: prometheus-snmp-exporter.md - Custom Node Metrics: prometheus-custom-node-metrics.md - Alert Manager Examples: - alertmanager-slack.md @@ -265,9 +271,10 @@ nav: - MariaDB: - Operations: infrastructure-mariadb-ops.md - Gateway API: - - Custom Routes: infrastructure-gateway-api-custom.md - - Rackspace Example Gateway Overview: rackspace-infrastructure-gateway-api.md - - Creating self-signed CA issuer for Gateway API: gateway-api-ca-issuer.md + - NGINX Gateway: + - Custom Routes: infrastructure-nginx-gateway-api-custom.md + - Rackspace Example Gateway Overview: rackspace-infrastructure-nginx-gateway-api.md + - Creating self-signed CA issuer for Gateway API: infrastructure-nginx-gateway-api-ca-issuer.md - Observability: - Observability Overview: observability-info.md - Monitoring Overview: monitoring-info.md @@ -290,14 +297,15 @@ nav: - Quota Management: openstack-quota-managment.md - Images: - Glance Images Creation: openstack-glance-images.md + - Glance External Swift Image Store: openstack-glance-swift-store.md - Identity: - Keystone Federation to Rackspace: openstack-keystone-federation.md - Keystone Readonly Users: openstack-keystone-readonly.md - Networking: - Creating Networks: openstack-neutron-networks.md - - Magnum: + - Containers: - Creating kubernetes clusters: magnum-kubernetes-cluster-setup-guide.md - - Octavia: + - Loadbalancers: - Creating Flavor Profiles and Flavors: octavia-flavor-and-flavorprofile-guide.md - Creating Cloud Load Balancers: octavia-loadbalancer-setup-guide.md - Object Storage: diff --git a/releasenotes/config.yaml b/releasenotes/config.yaml new file mode 100644 index 000000000..9c0006656 --- /dev/null +++ b/releasenotes/config.yaml @@ -0,0 +1,3 @@ +--- +collapse_pre_releases: false +stop_at_branch_base: true diff --git a/releasenotes/notes/barbican-chart-d7ddbde0e3f6cfd6.yaml b/releasenotes/notes/barbican-chart-d7ddbde0e3f6cfd6.yaml new file mode 100644 index 000000000..20cb738ac --- /dev/null +++ b/releasenotes/notes/barbican-chart-d7ddbde0e3f6cfd6.yaml @@ -0,0 +1,17 @@ +--- +deprecations: + - | + The barbican chart will now use the online OSH helm repository. This change + will allow the barbican chart to be updated more frequently and will allow + the barbican chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall barbican + kubectl -n openstack delete -f /etc/genestack/kustomize/barbican/base/barbican-rabbitmq-queue.yaml + /opt/genestack/bin/install-barbican.sh + + This operation should have no operational impact on running VMs but should be + performed during a maintenance window. diff --git a/releasenotes/notes/beta-to-stable-bcae689e8700c667.yaml b/releasenotes/notes/beta-to-stable-bcae689e8700c667.yaml new file mode 100644 index 000000000..92a26d389 --- /dev/null +++ b/releasenotes/notes/beta-to-stable-bcae689e8700c667.yaml @@ -0,0 +1,277 @@ +--- +prelude: > + Creating the foundation for the stable branch. +features: + - | + Kubernetes deployment and management with Kubespray. This release uses Kubernetes + 1.30.x and Kubespray 2.27.0. + + https://docs.rackspacecloud.com/k8s-kubespray/ + - | + Kube-OVN CNI plugin is supported in this release. Kube-OVN can be deployed to + provide networking services to the Kubernetes cluster. + + https://docs.rackspacecloud.com/k8s-cni-kube-ovn/ + - | + Prometheus monitoring with the Prometheus Operator. This release uses Prometheus + Operator to deploy and manage Prometheus, which is used for system monitoring and + alerting. + + https://docs.rackspacecloud.com/prometheus/ + - | + Kubernetes PVC Storage with Rook Ceph is supported in this release. Rook Ceph can + be deployed internal to the Kubernetes cluster to provide persistent storage options. + + https://docs.rackspacecloud.com/storage-ceph-rook-internal/ + - | + Kubernetes PVC Storage with Rook Ceph is supported in this release. Rook Ceph can + be deployed external to the Kubernetes cluster to provide persistent storage options. + + https://docs.rackspacecloud.com/storage-ceph-rook-external/ + - | + Kubernetes PVC Storage with NFS is supported in this release. NFS can be deployed + external to the Kubernetes cluster to provide persistent storage options. + + https://docs.rackspacecloud.com/storage-nfs-external/ + - | + Kubernetes PVC Storage with TopoLVM is supported in this release. TopoLVM can be + deployed internal to the Kubernetes cluster to provide persistent storage options. + + https://docs.rackspacecloud.com/storage-topolvm/ + - | + Kubernetes PVC Storage with Longhorn (recommended) is supported in this release. + Longhorn can be deployed internal to the Kubernetes cluster to provide persistent + storage options. + + https://docs.rackspacecloud.com/storage-longhorn/ + - | + MetalLB LoadBalancer is supported in this release. MetalLB can be deployed to + provide LoadBalancer services to the Kubernetes cluster. This is used by default + for VIP address functionality within platform service loadbalancers. + + https://docs.rackspacecloud.com/infrastructure-metallb/ + - | + NGINX Gateway API is supported in this release. NGINX Gateway API can be deployed + to provide Gateway services to the Kubernetes cluster. This is used by default for + all internal services and external ingress into the platform services. + + https://docs.rackspacecloud.com/infrastructure-gateway-api/ + - | + MariaDB Operator is supported in this release. MariaDB Operator can be deployed + to provide database services to the Kubernetes cluster. This is used by default + for OpenStack Services + + https://docs.rackspacecloud.com/databases-mariadb-operator/ + - | + Postgres Operator is supported in this release. The Zalando Postgres Operator can + be deployed to provide database services for applications. This is used by default + for OpenStack metering services. + + https://docs.rackspacecloud.com/infrastructure-postgresql/ + - | + RabbitMQ Operator is supported in this release. RabbitMQ Operator can be deployed + to provide message queue services to the Kubernetes cluster. This is used by default + for OpenStack Services. + + https://docs.rackspacecloud.com/infrastructure-rabbitmq/ + - | + Memcached is supported in this release. Memcached can be deployed to provide fast + caching services to the Kubernetes cluster. This is used by default for OpenStack + Services. + + https://docs.rackspacecloud.com/infrastructure-memcached/ + - | + Libvirt is supported in this release for virtualization. Libvirt can be deployed + to provide virtualization services to the Kubernetes cluster. This is used by default + for OpenStack Services. + + https://docs.rackspacecloud.com/infrastructure-libvirt/ + - | + OVN for OpenStack is supported in this release. OVN is deployed to provide + networking services to OpenStack Consumers and is default for OpenStack. + + https://docs.rackspacecloud.com/infrastructure-ovn-setup/ + - | + Log collection is supported in this release. Fluentbit can be deployed to provide + log collection services to the Kubernetes cluster. This is used by default for + all services. + + https://docs.rackspacecloud.com/infrastructure-fluentbit/ + - | + Log aggregation is supported in this release. Loki can be deployed to provide + log aggregation services to the Kubernetes cluster. This is used by default for + all services. + + https://docs.rackspacecloud.com/infrastructure-loki/ + - | + OpenStack Keystone is supported in this release. OpenStack Keystone can be deployed + to provide identity services for OpenStack and is used by default. + + https://docs.rackspacecloud.com/openstack-keystone/ + - | + OpenStack Glance is supported in this release. OpenStack Glance can be deployed + to provide image services for OpenStack and is used by default. + + https://docs.rackspacecloud.com/openstack-glance/ + - | + OpenStack Heat is supported in this release. OpenStack Heat can be deployed to + provide orchestration services for OpenStack and is used by default. + + https://docs.rackspacecloud.com/openstack-heat/ + - | + OpenStack Barbican is supported in this release. OpenStack Barbican can be deployed + to provide key management services for OpenStack and is used by default. + + https://docs.rackspacecloud.com/openstack-barbican/ + - | + OpenStack Cinder is supported in this release. OpenStack Cinder can be deployed + to provide block storage services for OpenStack and is used by default. + + https://docs.rackspacecloud.com/openstack-cinder/ + - | + OpenStack Placement is supported in this release. OpenStack Placement can be deployed + to provide resource management services for OpenStack and is used by default. + + https://docs.rackspacecloud.com/openstack-compute-kit-placement/ + - | + OpenStack Nova is supported in this release. OpenStack Nova can be deployed to + provide compute services for OpenStack and is used by default. + + https://docs.rackspacecloud.com/openstack-compute-kit-nova/ + - | + OpenStack Neutron is supported in this release. OpenStack Neutron can be deployed + to provide networking services for OpenStack and is used by default. + + https://docs.rackspacecloud.com/openstack-network-kit-neutron/ + - | + OpenStack Skyline is supported in this release. OpenStack Skyline can be deployed + to provide dashboard services for OpenStack and is used by default. + + https://docs.rackspacecloud.com/openstack-skyline/ + - | + OpenStack Octavia is supported in this release. OpenStack Octavia can be deployed + to provide load balancing services for OpenStack and is used by default. + + https://docs.rackspacecloud.com/openstack-octavia/ + - | + OpenStack Magnum is supported in this release. OpenStack Magnum can be deployed + to provide container orchestration services for OpenStack and is used by default. + + https://docs.rackspacecloud.com/openstack-magnum/ + - | + OpenStack Ceilometer is supported in this release. OpenStack Ceilometer can be + deployed to provide telemetry services for OpenStack and is used by default. + + https://docs.rackspacecloud.com/openstack-ceilometer/ + - | + Gnocchi is supported in this release. Gnocchi can be deployed to provide metric + services for OpenStack and is used by default within the metering stack. + + https://docs.rackspacecloud.com/openstack-gnocchi/ + - | + Grafana is supported in this release. Grafana can be deployed to provide metric + visualization services for OpenStack and is used by default within the metering stack. + + https://docs.rackspacecloud.com/grafana/ + - | + Service metric collection is supported in this release and is interconnected with + prometheus and grafana to provide metric visualization services throughout the cluster. + + Supported Exporters: + * Kube-OVN + * NGINX Gateway Fabris + * RabbitMQ + * Memcached + * MariaDB + * Postgres + * OpenStack + * Blackbox + * Pushgateway + + Dashboards are all pre-configured for all supported exporters and visualized via + Grafana. + - | + Alert Manager is supported in this release. Alert Manager can be deployed to + provide alerting services for the cluster and is used by default. + + https://docs.rackspacecloud.com/alertmanager-slack/ +issues: + - | + The OVN loadbalancers options are by default available within Genestack but is + currently "tech preview" and not recommended for production use. + - | + Skyline UI currently limits the loadbalancer types to Amphora. This is a known + issue and will be resolved in a future release. +upgrade: + - | + When upgrading from a pre-release to stable, the following changes will need + to be made to the ansible inventory or group_vars to support stable cert-manager + + .. code-block:: yaml + + cert_manager_controller_extra_args: + - "--enable-gateway-api" + + In previous builds the ``--enable-gateway-api`` was unset, but it is now a + required option. + - | + When upgrading from a pre-release to stable, the following changes will need + to be made to the ansible inventory or group_vars to support stable metallb + + .. code-block:: yaml + + metallb_enabled: false + + In previous builds the ``metallb_enabled`` was set to true, but it is now + managed by the MetalLB helm chart. + - | + When upgrading from a pre-release to stable, the following changes will need + to be made to the ansible inventory or group_vars to eliminate the CNI plugin + from the Kubespray Management. + + .. code-block:: yaml + + kube_network_plugin: none + + In previous builds the ``kube_network_plugin`` was set to kube-ovn, but it is + now managed by the Kube-OVN helm chart. + - | + When upgrading from a pre-release to stable, the following changes will need + to be made to the ansible inventory or group_vars to eliminate the previous + assumption of the kubeadm patch files. + + .. code-block:: yaml + + kubeadm_patches: [] + + In previous builds the ``kubeadm_patches`` was set to a dictionary of patches + that would deploy files into the environment. This interface was changed + upstream and now must be a list of string type patches. Review the upstream + documentation[0] for more information. + + [0] https://github.com/kubernetes-sigs/kubespray/blob/v2.27.0/roles/kubernetes/kubeadm_common/defaults/main.yml + - | + When upgrading from a pre-release to stable, the following file no longer has + any effect on the environment and can be removed from the ansible group_vars. + + + /etc/genestack/inventory/group_vars/k8s_cluster/k8s-net-kube-ovn.yml + + This file can be eliminated. + - | + When upgrading from a pre-release to stable, review the Kube-OVN to Helm + migration documentation at https://docs.rackspacecloud.com/k8s-cni-kube-ovn-helm-conversion + as this step will be required before running Kubespray again. +deprecations: + - | + In early builds of Genestack Kube-OVN was deployed and managed by Kubespray; + however, this is no longer the case. The Kube-OVN helm chart is now used to + deploy and manage the Kube-OVN CNI plugin. + - | + In early builds of Genestack MetalLB was deployed and managed by Kubespray; + however, this is no longer the case. The MetalLB helm chart is now used to + deploy and manage the MetalLB LoadBalancer. + - | + In early builds of Genestack the cert-manager option ``ExperimentalGatewayAPISupport`` + was set to true, within the ansible group_vars. This option should be removed as it + no longer has any effect. diff --git a/releasenotes/notes/ceilometer-chart-a8655e866388369b.yaml b/releasenotes/notes/ceilometer-chart-a8655e866388369b.yaml new file mode 100644 index 000000000..6a67b449b --- /dev/null +++ b/releasenotes/notes/ceilometer-chart-a8655e866388369b.yaml @@ -0,0 +1,17 @@ +--- +deprecations: + - | + The ceilometer chart will now use the online OSH helm repository. This change + will allow the ceilometer chart to be updated more frequently and will allow + the ceilometer chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall ceilometer + kubectl -n openstack delete -f /etc/genestack/kustomize/ceilometer/base/ceilometer-rabbitmq-queue.yaml + /opt/genestack/bin/install-ceilometer.sh + + This operation should have no operational impact but should be + performed during a maintenance window. diff --git a/releasenotes/notes/cinder-chart-3e9c76938ef9ebe8.yaml b/releasenotes/notes/cinder-chart-3e9c76938ef9ebe8.yaml new file mode 100644 index 000000000..6f5927f02 --- /dev/null +++ b/releasenotes/notes/cinder-chart-3e9c76938ef9ebe8.yaml @@ -0,0 +1,17 @@ +--- +deprecations: + - | + The cinder chart will now use the online OSH helm repository. This change + will allow the cinder chart to be updated more frequently and will allow + the cinder chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall cinder + kubectl -n openstack delete -f /etc/genestack/kustomize/cinder/base/cinder-rabbitmq-queue.yaml + /opt/genestack/bin/install-cinder.sh + + This operation should have no operational impact on running VMs but should be + performed during a maintenance window. diff --git a/releasenotes/notes/envoy-cd4ecf26ec8d6033.yaml b/releasenotes/notes/envoy-cd4ecf26ec8d6033.yaml new file mode 100644 index 000000000..36f111241 --- /dev/null +++ b/releasenotes/notes/envoy-cd4ecf26ec8d6033.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Envoy Gateway with a full featured configuration in support of OpenStack. + Envoy implements the Gateway API with traffic policies, loadbalancers, and + listeners which are configured to support the OpenStack APIs. diff --git a/releasenotes/notes/glance-chart-d355eaa64eb4b89c.yaml b/releasenotes/notes/glance-chart-d355eaa64eb4b89c.yaml new file mode 100644 index 000000000..06a0473ea --- /dev/null +++ b/releasenotes/notes/glance-chart-d355eaa64eb4b89c.yaml @@ -0,0 +1,17 @@ +--- +deprecations: + - | + The glance chart will now use the online OSH helm repository. This change + will allow the glance chart to be updated more frequently and will allow + the glance chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall glance + kubectl -n openstack delete -f /etc/genestack/kustomize/glance/base/glance-rabbitmq-queue.yaml + /opt/genestack/bin/install-glance.sh + + This operation should have no operational impact on running VMs but should be + performed during a maintenance window. diff --git a/releasenotes/notes/gnocchi-chart-822265ae2b8e6d8b.yaml b/releasenotes/notes/gnocchi-chart-822265ae2b8e6d8b.yaml new file mode 100644 index 000000000..bf8f3763c --- /dev/null +++ b/releasenotes/notes/gnocchi-chart-822265ae2b8e6d8b.yaml @@ -0,0 +1,14 @@ +--- +deprecations: + - | + The gnocchi chart will now use the online OSH helm repository. This change + will allow the gnocchi chart to be updated more frequently and will allow + the gnocchi chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall gnocchi + kubectl -n openstack delete -f /etc/genestack/kustomize/gnocchi/base/gnocchi-rabbitmq-queue.yaml + /opt/genestack/bin/install-gnocchi.sh diff --git a/releasenotes/notes/heat-chart-e469c7543a21ad8e.yaml b/releasenotes/notes/heat-chart-e469c7543a21ad8e.yaml new file mode 100644 index 000000000..c6918e0a9 --- /dev/null +++ b/releasenotes/notes/heat-chart-e469c7543a21ad8e.yaml @@ -0,0 +1,17 @@ +--- +deprecations: + - | + The heat chart will now use the online OSH helm repository. This change + will allow the heat chart to be updated more frequently and will allow + the heat chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall heat + kubectl -n openstack delete -f /etc/genestack/kustomize/heat/base/heat-rabbitmq-queue.yaml + /opt/genestack/bin/install-heat.sh + + This operation should have no operational impact on running VMs but should be + performed during a maintenance window. diff --git a/releasenotes/notes/horizon-chart-dab65a869cc84de3.yaml b/releasenotes/notes/horizon-chart-dab65a869cc84de3.yaml new file mode 100644 index 000000000..a877889a2 --- /dev/null +++ b/releasenotes/notes/horizon-chart-dab65a869cc84de3.yaml @@ -0,0 +1,16 @@ +--- +deprecations: + - | + The horizon chart will now use the online OSH helm repository. This change + will allow the horizon chart to be updated more frequently and will allow + the horizon chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall horizon + /opt/genestack/bin/install-horizon.sh + + This operation should have no operational impact on running VMs but should be + performed during a maintenance window. diff --git a/releasenotes/notes/keystone-chart-90138b428d5871b7.yaml b/releasenotes/notes/keystone-chart-90138b428d5871b7.yaml new file mode 100644 index 000000000..06b613864 --- /dev/null +++ b/releasenotes/notes/keystone-chart-90138b428d5871b7.yaml @@ -0,0 +1,18 @@ + +--- +deprecations: + - | + The keystone chart will now use the online OSH helm repository. This change + will allow the keystone chart to be updated more frequently and will allow + the keystone chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall keystone + kubectl -n openstack delete -f /etc/genestack/kustomize/keystone/base/keystone-rabbitmq-queue.yaml + /opt/genestack/bin/install-keystone.sh + + This operation should have no operational impact on running VMs but should be + performed during a maintenance window. diff --git a/releasenotes/notes/libvirt-chart-2f3d090799aff3e0.yaml b/releasenotes/notes/libvirt-chart-2f3d090799aff3e0.yaml new file mode 100644 index 000000000..a811e33d8 --- /dev/null +++ b/releasenotes/notes/libvirt-chart-2f3d090799aff3e0.yaml @@ -0,0 +1,16 @@ +--- +deprecations: + - | + The libvirt chart will now use the online OSH helm repository. This change + will allow the libvirt chart to be updated more frequently and will allow + the libvirt chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall libvirt + /opt/genestack/bin/install-libvirt.sh + + This operation should have no operational impact on running VMs but should be + performed during a maintenance window. diff --git a/releasenotes/notes/magnum-chart-5bcf6c0c706a9ad5.yaml b/releasenotes/notes/magnum-chart-5bcf6c0c706a9ad5.yaml new file mode 100644 index 000000000..7bacd14a3 --- /dev/null +++ b/releasenotes/notes/magnum-chart-5bcf6c0c706a9ad5.yaml @@ -0,0 +1,17 @@ +--- +deprecations: + - | + The magnum chart will now use the online OSH helm repository. This change + will allow the magnum chart to be updated more frequently and will allow + the magnum chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall magnum + kubectl -n openstack delete -f /etc/genestack/kustomize/magnum/base/magnum-rabbitmq-queue.yaml + /opt/genestack/bin/install-magnum.sh + + This operation should have no operational impact on running VMs but should be + performed during a maintenance window. diff --git a/releasenotes/notes/neutron-chart-037f6740362c5d3c.yaml b/releasenotes/notes/neutron-chart-037f6740362c5d3c.yaml new file mode 100644 index 000000000..b6f2c1365 --- /dev/null +++ b/releasenotes/notes/neutron-chart-037f6740362c5d3c.yaml @@ -0,0 +1,14 @@ +--- +deprecations: + - | + The neutron chart will now use the online OSH helm repository. This change + will allow the neutron chart to be updated more frequently and will allow + the neutron chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall neutron + kubectl -n openstack delete -f /etc/genestack/kustomize/neutron/base/neutron-rabbitmq-queue.yaml + /opt/genestack/bin/install-neutron.sh diff --git a/releasenotes/notes/nova-chart-f3c262b4d46c5178.yaml b/releasenotes/notes/nova-chart-f3c262b4d46c5178.yaml new file mode 100644 index 000000000..d447031c1 --- /dev/null +++ b/releasenotes/notes/nova-chart-f3c262b4d46c5178.yaml @@ -0,0 +1,17 @@ +--- +deprecations: + - | + The nova chart will now use the online OSH helm repository. This change + will allow the nova chart to be updated more frequently and will allow + the nova chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall nova + kubectl -n openstack delete -f /etc/genestack/kustomize/nova/base/nova-rabbitmq-queue.yaml + /opt/genestack/bin/install-nova.sh + + This operation should have no operational impact on running VMs but should be + performed during a maintenance window. diff --git a/releasenotes/notes/octavia-chart-4471a89ed3631d45.yaml b/releasenotes/notes/octavia-chart-4471a89ed3631d45.yaml new file mode 100644 index 000000000..d1f311997 --- /dev/null +++ b/releasenotes/notes/octavia-chart-4471a89ed3631d45.yaml @@ -0,0 +1,25 @@ +--- +deprecations: + - | + The octavia chart will now use the online OSH helm repository. This change + will allow the octavia chart to be updated more frequently and will allow + the octavia chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall octavia + kubectl -n openstack delete -f /etc/genestack/kustomize/octavia/base/octavia-rabbitmq-queue.yaml + /opt/genestack/bin/install-octavia.sh + + Depending on the state of the Octavia deployment, it may be nessessary to + rerun the ansible-playbook for the octavia deployment. Note that this playbook + will drop a marker file ``/tmp/octavia_hm_controller_ip_port_list`` which may + need to be cleaned up before rerunning the playbook. + + https://docs.rackspacecloud.com/openstack-octavia/#run-the-playbook + + That said, if the deployment was healthy before, the cleanup steps should not + be needed. This operation should have no operational impact on running VMs but + should be performed during a maintenance window. diff --git a/releasenotes/notes/placement-chart-2b02ca15631a0af1.yaml b/releasenotes/notes/placement-chart-2b02ca15631a0af1.yaml new file mode 100644 index 000000000..5b7130100 --- /dev/null +++ b/releasenotes/notes/placement-chart-2b02ca15631a0af1.yaml @@ -0,0 +1,16 @@ +--- +deprecations: + - | + The placement chart will now use the online OSH helm repository. This change + will allow the placement chart to be updated more frequently and will allow + the placement chart to be used with the OpenStack-Helm project. Upgrading to + this chart may require changes to the deployment configuration. Simple + updates can be made by running the following command: + + .. code-block:: shell + + helm -n openstack uninstall placement + /opt/genestack/bin/install-placement.sh + + This operation should have no operational impact on running VMs but should be + performed during a maintenance window. diff --git a/requirements.txt b/requirements.txt index d76e3abe9..a0df29883 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ ansible>9.0,<10.0 ansible-core<2.17.0 cryptography==43.0.1 -jinja2==3.1.4 +jinja2==3.1.5 jmespath==1.0.1 jsonschema<=4.23.0 MarkupSafe==2.1.3 diff --git a/scripts/hyperconverged-lab-uninstall.sh b/scripts/hyperconverged-lab-uninstall.sh new file mode 100755 index 000000000..1fadac967 --- /dev/null +++ b/scripts/hyperconverged-lab-uninstall.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2124,SC2145,SC2294,SC2086,SC2087,SC2155 + +set -o pipefail +set -e +SECONDS=0 + +if [ -z "${OS_CLOUD}" ]; then + read -rp "Enter name of the cloud configuration used for this build [default]: " OS_CLOUD + export OS_CLOUD="${OS_CLOUD:-default}" +fi + +export LAB_NAME_PREFIX="${LAB_NAME_PREFIX:-hyperconverged}" + +function serverDelete() { + if ! openstack server delete "${1}" 2> /dev/null; then + echo "Failed to delete server ${1}" + fi +} + +function portDelete() { + if ! openstack port delete "${1}" 2> /dev/null; then + echo "Failed to delete port ${1}" + fi +} + +function securityGroupDelete() { + if ! openstack security group delete "${1}" 2> /dev/null; then + echo "Failed to delete security group ${1}" + fi +} + +function networkDelete() { + if ! openstack network delete "${1}" 2> /dev/null; then + echo "Failed to delete network ${1}" + fi +} + +function subnetDelete() { + if ! openstack subnet delete "${1}" 2> /dev/null; then + echo "Failed to delete subnet ${1}" + fi +} + +for i in $(openstack floating ip list --router ${LAB_NAME_PREFIX}-router -f value -c "Floating IP Address"); do + if ! openstack floating ip unset "${i}" 2> /dev/null; then + echo "Failed to unset floating ip ${i}" + fi + if ! openstack floating ip delete "${i}" 2> /dev/null; then + echo "Failed to delete floating ip ${i}" + fi +done + +serverDelete ${LAB_NAME_PREFIX}-2 +serverDelete ${LAB_NAME_PREFIX}-1 +serverDelete ${LAB_NAME_PREFIX}-0 + +if ! openstack keypair delete ${LAB_NAME_PREFIX}-key 2> /dev/null; then + echo "Failed to delete keypair ${LAB_NAME_PREFIX}-key" +fi + +portDelete ${LAB_NAME_PREFIX}-2-compute-port +portDelete ${LAB_NAME_PREFIX}-1-compute-port +portDelete ${LAB_NAME_PREFIX}-0-compute-port +for i in {100..109}; do + portDelete "${LAB_NAME_PREFIX}-0-compute-float-${i}-port" +done +portDelete ${LAB_NAME_PREFIX}-2-mgmt-port +portDelete ${LAB_NAME_PREFIX}-1-mgmt-port +portDelete ${LAB_NAME_PREFIX}-0-mgmt-port +portDelete metallb-vip-0-port + +securityGroupDelete ${LAB_NAME_PREFIX}-jump-secgroup +securityGroupDelete ${LAB_NAME_PREFIX}-http-secgroup +securityGroupDelete ${LAB_NAME_PREFIX}-secgroup + +if ! openstack router remove subnet ${LAB_NAME_PREFIX}-router ${LAB_NAME_PREFIX}-subnet 2> /dev/null; then + echo "Failed to remove ${LAB_NAME_PREFIX}-subnet from router ${LAB_NAME_PREFIX}-router" +fi +if ! openstack router remove subnet ${LAB_NAME_PREFIX}-router ${LAB_NAME_PREFIX}-compute-subnet 2> /dev/null; then + echo "Failed to remove ${LAB_NAME_PREFIX}-compute-subnet from router ${LAB_NAME_PREFIX}-router" +fi +if ! openstack router remove gateway ${LAB_NAME_PREFIX}-router PUBLICNET 2> /dev/null; then + echo "Failed to remove gateway from router ${LAB_NAME_PREFIX}-router" +fi +if ! openstack router delete ${LAB_NAME_PREFIX}-router 2> /dev/null; then + echo "Failed to delete router ${LAB_NAME_PREFIX}-router" +fi + +subnetDelete ${LAB_NAME_PREFIX}-compute-subnet +subnetDelete ${LAB_NAME_PREFIX}-subnet + +networkDelete ${LAB_NAME_PREFIX}-compute-net +networkDelete ${LAB_NAME_PREFIX}-net + +echo "Cleanup complete" +echo "The lab uninstall took ${SECONDS} seconds to complete." diff --git a/scripts/hyperconverged-lab.sh b/scripts/hyperconverged-lab.sh new file mode 100755 index 000000000..655e20f89 --- /dev/null +++ b/scripts/hyperconverged-lab.sh @@ -0,0 +1,701 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2124,SC2145,SC2294,SC2086,SC2087,SC2155 + +set -o pipefail +set -e +SECONDS=0 +if [ -z "${ACME_EMAIL}" ]; then + read -rp "Enter a valid email address for use with ACME, press enter to skip: " ACME_EMAIL + export ACME_EMAIL="${ACME_EMAIL:-}" +fi + +if [ -z "${GATEWAY_DOMAIN}" ]; then + echo "The domain name for the gateway is required, if you do not have a domain name press enter to use the default" + read -rp "Enter the domain name for the gateway [cluster.local]: " GATEWAY_DOMAIN + export GATEWAY_DOMAIN="${GATEWAY_DOMAIN:-cluster.local}" +fi + +if [ -z "${OS_CLOUD}" ]; then + read -rp "Enter name of the cloud configuration used for this build [default]: " OS_CLOUD + export OS_CLOUD="${OS_CLOUD:-default}" +fi + +if [ -z "${OS_FLAVOR}" ]; then + # List compatible flavors + FLAVORS=$(openstack flavor list --min-ram 16000 --min-disk 100 --sort-column Name -c Name -c RAM -c Disk -c VCPUs -f json) + DEFAULT_OS_FLAVOR=$(echo "${FLAVORS}" | jq -r '[.[] | select( all(.RAM; . < 24576) )] | .[0].Name') + echo "The following flavors are available for use with this build" + echo "${FLAVORS}" | jq -r '["Name", "RAM", "Disk", "VCPUs"], (.[] | [.Name, .RAM, .Disk, .VCPUs]) | @tsv' | column -t + read -rp "Enter name of the flavor to use for the instances [${DEFAULT_OS_FLAVOR}]: " OS_FLAVOR + export OS_FLAVOR=${OS_FLAVOR:-${DEFAULT_OS_FLAVOR}} +fi + +# Set the default image and ssh username +export OS_IMAGE="${OS_IMAGE:-Ubuntu 24.04}" +if [ -z "${SSH_USERNAME}" ]; then + if ! IMAGE_DEFAULT_PROPERTY=$(openstack image show "${OS_IMAGE}" -f json -c properties); then + read -rp "Image not found. Enter the image name: " OS_IMAGE + IMAGE_DEFAULT_PROPERTY=$(openstack image show "${OS_IMAGE}" -f json -c properties) + fi + if [ "${IMAGE_DEFAULT_PROPERTY}" ]; then + if SSH_USERNAME=$(echo "${IMAGE_DEFAULT_PROPERTY}" | jq -r '.properties.default_user'); then + echo "Discovered the default username for the image ${OS_IMAGE} as ${SSH_USERNAME}" + fi + fi + if [ -z "${SSH_USERNAME}" ] || [ "${SSH_USERNAME}" = "null" ]; then + echo "The image ${OS_IMAGE} does not have a default user property, please enter the default username" + read -rp "Enter the default username for the image: " SSH_USERNAME + fi +fi + +export LAB_NAME_PREFIX="${LAB_NAME_PREFIX:-hyperconverged}" + +if ! openstack router show ${LAB_NAME_PREFIX}-router 2> /dev/null; then + openstack router create ${LAB_NAME_PREFIX}-router --external-gateway PUBLICNET +fi + +if ! openstack network show ${LAB_NAME_PREFIX}-net 2> /dev/null; then + openstack network create ${LAB_NAME_PREFIX}-net +fi + +if ! TENANT_SUB_NETWORK_ID=$(openstack subnet show ${LAB_NAME_PREFIX}-subnet -f json 2> /dev/null | jq -r '.id'); then + echo "Creating the ${LAB_NAME_PREFIX}-subnet" + TENANT_SUB_NETWORK_ID=$( + openstack subnet create ${LAB_NAME_PREFIX}-subnet \ + --network ${LAB_NAME_PREFIX}-net \ + --subnet-range 192.168.100.0/24 \ + --dns-nameserver 1.1.1.1 \ + --dns-nameserver 1.0.0.1 \ + -f json | jq -r '.id' + ) +fi + +if ! openstack router show ${LAB_NAME_PREFIX}-router -f json 2> /dev/null | jq -r '.interfaces_info.[].subnet_id' | grep -q ${TENANT_SUB_NETWORK_ID}; then + openstack router add subnet ${LAB_NAME_PREFIX}-router ${LAB_NAME_PREFIX}-subnet +fi + +if ! openstack network show ${LAB_NAME_PREFIX}-compute-net 2> /dev/null; then + openstack network create ${LAB_NAME_PREFIX}-compute-net \ + --disable-port-security +fi + +if ! TENANT_COMPUTE_SUB_NETWORK_ID=$(openstack subnet show ${LAB_NAME_PREFIX}-compute-subnet -f json 2> /dev/null | jq -r '.id'); then + echo "Creating the ${LAB_NAME_PREFIX}-compute-subnet" + TENANT_COMPUTE_SUB_NETWORK_ID=$( + openstack subnet create ${LAB_NAME_PREFIX}-compute-subnet \ + --network ${LAB_NAME_PREFIX}-compute-net \ + --subnet-range 192.168.102.0/24 \ + --no-dhcp -f json | jq -r '.id' + ) +fi + +if ! openstack router show ${LAB_NAME_PREFIX}-router -f json | jq -r '.interfaces_info.[].subnet_id' | grep -q ${TENANT_COMPUTE_SUB_NETWORK_ID} 2> /dev/null; then + openstack router add subnet ${LAB_NAME_PREFIX}-router ${LAB_NAME_PREFIX}-compute-subnet +fi + +if ! openstack security group show ${LAB_NAME_PREFIX}-http-secgroup 2> /dev/null; then + openstack security group create ${LAB_NAME_PREFIX}-http-secgroup +fi + +if ! openstack security group show ${LAB_NAME_PREFIX}-http-secgroup -f json 2> /dev/null | jq -r '.rules.[].port_range_max' | grep -q 443; then + openstack security group rule create ${LAB_NAME_PREFIX}-http-secgroup \ + --protocol tcp \ + --ingress \ + --remote-ip 0.0.0.0/0 \ + --dst-port 443 \ + --description "https" +fi +if ! openstack security group show ${LAB_NAME_PREFIX}-http-secgroup -f json 2> /dev/null | jq -r '.rules.[].port_range_max' | grep -q 80; then + openstack security group rule create ${LAB_NAME_PREFIX}-http-secgroup \ + --protocol tcp \ + --ingress \ + --remote-ip 0.0.0.0/0 \ + --dst-port 80 \ + --description "http" +fi + +if ! openstack security group show ${LAB_NAME_PREFIX}-secgroup 2> /dev/null; then + openstack security group create ${LAB_NAME_PREFIX}-secgroup +fi + +if ! openstack security group show ${LAB_NAME_PREFIX}-secgroup -f json 2> /dev/null | jq -r '.rules.[].description' | grep -q "all internal traffic"; then + openstack security group rule create ${LAB_NAME_PREFIX}-secgroup \ + --protocol any \ + --ingress \ + --remote-ip 192.168.100.0/24 \ + --description "all internal traffic" +fi + +if ! openstack security group show ${LAB_NAME_PREFIX}-jump-secgroup 2> /dev/null; then + openstack security group create ${LAB_NAME_PREFIX}-jump-secgroup +fi + +if ! openstack security group show ${LAB_NAME_PREFIX}-jump-secgroup -f json 2> /dev/null | jq -r '.rules.[].port_range_max' | grep -q 22; then + openstack security group rule create ${LAB_NAME_PREFIX}-jump-secgroup \ + --protocol tcp \ + --ingress \ + --remote-ip 0.0.0.0/0 \ + --dst-port 22 \ + --description "ssh" +fi +if ! openstack security group show ${LAB_NAME_PREFIX}-jump-secgroup -f json 2> /dev/null | jq -r '.rules.[].protocol' | grep -q icmp; then + openstack security group rule create ${LAB_NAME_PREFIX}-jump-secgroup \ + --protocol icmp \ + --ingress \ + --remote-ip 0.0.0.0/0 \ + --description "ping" +fi + +if ! METAL_LB_IP=$(openstack port show metallb-vip-0-port -f json 2> /dev/null | jq -r '.fixed_ips[0].ip_address'); then + echo "Creating the MetalLB VIP port" + METAL_LB_IP=$(openstack port create --security-group ${LAB_NAME_PREFIX}-http-secgroup --network ${LAB_NAME_PREFIX}-net metallb-vip-0-port -f json | jq -r '.fixed_ips[0].ip_address') +fi + +METAL_LB_PORT_ID=$(openstack port show metallb-vip-0-port -f value -c id) + +if ! METAL_LB_VIP=$(openstack floating ip list --port ${METAL_LB_PORT_ID} -f json 2> /dev/null | jq -r '.[]."Floating IP Address"'); then + echo "Creating the MetalLB VIP floating IP" + METAL_LB_VIP=$(openstack floating ip create PUBLICNET --port ${METAL_LB_PORT_ID} -f json | jq -r '.floating_ip_address') +elif [ -z "${METAL_LB_VIP}" ]; then + METAL_LB_VIP=$(openstack floating ip create PUBLICNET --port ${METAL_LB_PORT_ID} -f json | jq -r '.floating_ip_address') +fi + +if ! WORKER_0_PORT=$(openstack port show ${LAB_NAME_PREFIX}-0-mgmt-port -f value -c id 2> /dev/null); then + export WORKER_0_PORT=$( + openstack port create --allowed-address ip-address=${METAL_LB_IP} \ + --security-group ${LAB_NAME_PREFIX}-secgroup \ + --security-group ${LAB_NAME_PREFIX}-jump-secgroup \ + --security-group ${LAB_NAME_PREFIX}-http-secgroup \ + --network ${LAB_NAME_PREFIX}-net \ + -f value \ + -c id \ + ${LAB_NAME_PREFIX}-0-mgmt-port + ) +fi + +if ! WORKER_1_PORT=$(openstack port show ${LAB_NAME_PREFIX}-1-mgmt-port -f value -c id 2> /dev/null); then + export WORKER_1_PORT=$( + openstack port create --allowed-address ip-address=${METAL_LB_IP} \ + --security-group ${LAB_NAME_PREFIX}-secgroup \ + --security-group ${LAB_NAME_PREFIX}-http-secgroup \ + --network ${LAB_NAME_PREFIX}-net \ + -f value \ + -c id \ + ${LAB_NAME_PREFIX}-1-mgmt-port + ) +fi + +if ! WORKER_2_PORT=$(openstack port show ${LAB_NAME_PREFIX}-2-mgmt-port -f value -c id 2> /dev/null); then + export WORKER_2_PORT=$( + openstack port create --allowed-address ip-address=${METAL_LB_IP} \ + --security-group ${LAB_NAME_PREFIX}-secgroup \ + --security-group ${LAB_NAME_PREFIX}-http-secgroup \ + --network ${LAB_NAME_PREFIX}-net \ + -f value \ + -c id \ + ${LAB_NAME_PREFIX}-2-mgmt-port + ) +fi + +if ! JUMP_HOST_VIP=$(openstack floating ip list --port ${WORKER_0_PORT} -f json 2> /dev/null | jq -r '.[]."Floating IP Address"'); then + JUMP_HOST_VIP=$(openstack floating ip create PUBLICNET --port ${WORKER_0_PORT} -f json | jq -r '.floating_ip_address') +elif [ -z "${JUMP_HOST_VIP}" ]; then + JUMP_HOST_VIP=$(openstack floating ip create PUBLICNET --port ${WORKER_0_PORT} -f json | jq -r '.floating_ip_address') +fi + +echo "Creating pre-defined compute ports for the flat test network" +for i in {100..109}; do + if ! openstack port show ${LAB_NAME_PREFIX}-0-compute-float-${i}-port 2> /dev/null; then + openstack port create --network ${LAB_NAME_PREFIX}-compute-net \ + --disable-port-security \ + --fixed-ip ip-address="192.168.102.${i}" \ + ${LAB_NAME_PREFIX}-0-compute-float-${i}-port + fi +done + +if ! COMPUTE_0_PORT=$(openstack port show ${LAB_NAME_PREFIX}-0-compute-port -f value -c id 2> /dev/null); then + export COMPUTE_0_PORT=$( + openstack port create --network ${LAB_NAME_PREFIX}-compute-net \ + --no-fixed-ip \ + --disable-port-security \ + -f value \ + -c id \ + ${LAB_NAME_PREFIX}-0-compute-port + ) +fi + +if ! COMPUTE_1_PORT=$(openstack port show ${LAB_NAME_PREFIX}-1-compute-port -f value -c id 2> /dev/null); then + export COMPUTE_1_PORT=$( + openstack port create --network ${LAB_NAME_PREFIX}-compute-net \ + --no-fixed-ip \ + --disable-port-security \ + -f value \ + -c id \ + ${LAB_NAME_PREFIX}-1-compute-port + ) +fi + +if ! COMPUTE_2_PORT=$(openstack port show ${LAB_NAME_PREFIX}-2-compute-port -f value -c id 2> /dev/null); then + export COMPUTE_2_PORT=$( + openstack port create --network ${LAB_NAME_PREFIX}-compute-net \ + --no-fixed-ip \ + --disable-port-security \ + -f value \ + -c id \ + ${LAB_NAME_PREFIX}-2-compute-port + ) +fi + +if ! openstack keypair show ${LAB_NAME_PREFIX}-key 2> /dev/null; then + if [ ! -f ~/.ssh/${LAB_NAME_PREFIX}-key.pem ]; then + openstack keypair create ${LAB_NAME_PREFIX}-key > ~/.ssh/${LAB_NAME_PREFIX}-key.pem + chmod 600 ~/.ssh/${LAB_NAME_PREFIX}-key.pem + openstack keypair show ${LAB_NAME_PREFIX}-key --public-key > ~/.ssh/${LAB_NAME_PREFIX}-key.pub + else + if [ -f ~/.ssh/${LAB_NAME_PREFIX}-key.pub ]; then + openstack keypair create ${LAB_NAME_PREFIX}-key --public-key ~/.ssh/${LAB_NAME_PREFIX}-key.pub + fi + fi +fi + +ssh-add ~/.ssh/${LAB_NAME_PREFIX}-key.pem + +# Create the three lab instances +if ! openstack server show ${LAB_NAME_PREFIX}-0 2> /dev/null; then + openstack server create ${LAB_NAME_PREFIX}-0 \ + --port ${WORKER_0_PORT} \ + --port ${COMPUTE_0_PORT} \ + --image "${OS_IMAGE}" \ + --key-name ${LAB_NAME_PREFIX}-key \ + --flavor ${OS_FLAVOR} +fi + +if ! openstack server show ${LAB_NAME_PREFIX}-1 2> /dev/null; then + openstack server create ${LAB_NAME_PREFIX}-1 \ + --port ${WORKER_1_PORT} \ + --port ${COMPUTE_1_PORT} \ + --image "${OS_IMAGE}" \ + --key-name ${LAB_NAME_PREFIX}-key \ + --flavor ${OS_FLAVOR} +fi + +if ! openstack server show ${LAB_NAME_PREFIX}-2 2> /dev/null; then + openstack server create ${LAB_NAME_PREFIX}-2 \ + --port ${WORKER_2_PORT} \ + --port ${COMPUTE_2_PORT} \ + --image "${OS_IMAGE}" \ + --key-name ${LAB_NAME_PREFIX}-key \ + --flavor ${OS_FLAVOR} +fi + +echo "Waiting for the jump host to be ready" +COUNT=0 +while ! ssh -o ConnectTimeout=2 -o ConnectionAttempts=3 -o UserKnownHostsFile=/dev/null -q ${SSH_USERNAME}@${JUMP_HOST_VIP} exit; do + sleep 2 + echo "SSH is not ready, Trying again..." + COUNT=$((COUNT+1)) + if [ $COUNT -gt 30 ]; then + echo "Failed to ssh into the jump host" + exit 1 + fi +done + +# Run bootstrap +if [ "${HYPERCONVERGED_DEV:-false}" = "true" ]; then + export SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + if [ ! -d "${SCRIPT_DIR}" ]; then + echo "HYPERCONVERGED_DEV is true, but we've failed to determine the base genestack directory" + exit 1 + fi + ssh -o ForwardAgent=yes -o UserKnownHostsFile=/dev/null -t ${SSH_USERNAME}@${JUMP_HOST_VIP} \ + "timeout 1m bash -c 'while ! sudo apt update; do sleep 2; done' && sudo apt install -y rsync git" + echo "Copying the development source code to the jump host" + rsync -az \ + -e "ssh -o ForwardAgent=yes -o UserKnownHostsFile=/dev/null" \ + --rsync-path="sudo rsync" \ + $(readlink -fn ${SCRIPT_DIR}/../) ${SSH_USERNAME}@${JUMP_HOST_VIP}:/opt/ +fi + +ssh -o ForwardAgent=yes -o UserKnownHostsFile=/dev/null -t ${SSH_USERNAME}@${JUMP_HOST_VIP} < /etc/genestack/manifests/metallb/metallb-openstack-service-lb.yml < /etc/genestack/inventory/inventory.yaml < /etc/genestack/helm-configs/barbican/barbican-helm-overrides.yaml < /etc/genestack/helm-configs/cinder/cinder-helm-overrides.yaml < /etc/genestack/helm-configs/glance/glance-helm-overrides.yaml < /etc/genestack/helm-configs/gnocchi/gnocchi-helm-overrides.yaml < /etc/genestack/helm-configs/heat/heat-helm-overrides.yaml < /etc/genestack/helm-configs/keystone/keystone-helm-overrides.yaml < /etc/genestack/helm-configs/neutron/neutron-helm-overrides.yaml < /etc/genestack/helm-configs/magnum/magnum-helm-overrides.yaml < /etc/genestack/helm-configs/nova/nova-helm-overrides.yaml < /etc/genestack/helm-configs/octavia/octavia-helm-overrides.yaml < /etc/genestack/helm-configs/placement/placement-helm-overrides.yaml < /opt/genestack/bin/install-prometheus.sh - - name: Build openstack-helm + - name: Init openstack-helm ansible.builtin.shell: | - cd /opt/genestack/submodules/openstack-helm - make all + helm repo add openstack-helm https://tarballs.opendev.org/openstack/openstack-helm + helm repo update - - name: Build openstack-helm-infra + - name: Init openstack-helm-infra ansible.builtin.shell: | - cd /opt/genestack/submodules/openstack-helm-infra - make all + helm repo add openstack-helm-infra https://tarballs.opendev.org/openstack/openstack-helm-infra + helm repo update - name: Update package lists and install open-iscsi and cryptsetup hosts: all @@ -244,10 +244,7 @@ - name: Install gateway controller shell: | - cd /opt/genestack/submodules/nginx-gateway-fabric/charts - helm upgrade --install nginx-gateway-fabric ./nginx-gateway-fabric \ - --namespace=nginx-gateway \ - -f /etc/genestack/helm-configs/nginx-gateway-fabric/helm-overrides.yaml + /opt/genestack/bin/install-envoy-gateway.sh && /opt/genestack/bin/setup-envoy-gateway.sh args: executable: /bin/bash