Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 11 additions & 8 deletions .github/actions/test/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,16 +33,10 @@ runs:
- name: Run test suite
shell: bash
run: |
set -eo pipefail
set -e
source ./ci.env
source ./bin/activate "$AZIMUTH_CONFIG_ENVIRONMENT" "$AZIMUTH_ENVIRONMENT"
test_directory="$(ansible -m debug -a "var=$VAR_NAME" all | jq -r ".plays[0].tasks[0].hosts.localhost.$VAR_NAME")"
robot --loglevel debug --consolecolors on "$test_directory"
env:
ANSIBLE_LOAD_CALLBACK_PLUGINS: "true"
ANSIBLE_STDOUT_CALLBACK: json
MOZ_HEADLESS: "1"
VAR_NAME: generate_tests_suite_directory
./bin/run-tests

- name: Upload test report artifacts
uses: actions/upload-artifact@v3
Expand All @@ -54,6 +48,15 @@ runs:
report.html
if: ${{ always() }}

- name: Clean up test platforms
shell: bash
run: |
set -e
source ./ci.env
source ./bin/activate "$AZIMUTH_CONFIG_ENVIRONMENT" "$AZIMUTH_ENVIRONMENT"
./bin/run-tests --include delete
if: ${{ cancelled() }}

- name: Create debug bundle
shell: bash
run: |
Expand Down
157 changes: 157 additions & 0 deletions .gitlab-ci-magnum.yml.sample
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
---

default:
image: ubuntu:jammy

variables:
# Because we are installing git-crypt as part of the job, we cannot reuse old
# checkouts where git-crypt is already initialised as this results in an error
GIT_STRATEGY: clone
# Use the pipeline credentials for Terraform
# This assumes that we are using GitLab-managed Terraform state (recommended when available)
TF_HTTP_USERNAME: gitlab-ci-token
TF_HTTP_PASSWORD: $CI_JOB_TOKEN

stages:
# This stage owns the scheduled job that checks for upstream changes
- scheduled
# This stage owns the deploy job for the staging environment
- staging
# This stage owns the deploy job for the production environment
- production

#####
# This job checks to see if there is a new release that needs to be merged
#
# If there is, it will create a new branch containing the changes and a corresponding merge request
#
# It runs as a scheduled job, for which a suitable schedule must be defined, e.g. daily or weekly
#
# This job writes back to the repository and to the merge requests API
# To do this, it needs more power than is granted to the CI token
# So CI variables must be set that contain an access token and the corresponding username
# This can be a Project Access Token (paid feature, recommended if available) or a Personal Access Token (not ideal)
#####
check_for_release:
stage: scheduled
rules:
- if: $CI_PIPELINE_SOURCE == "schedule" && $CI_COMMIT_BRANCH == "main"
variables:
GIT_STRATEGY: none
before_script:
- apt update -y
- apt install -y curl git jq
script:
# Configure git to use the available credentials
- git config --global credential.helper store
# Do our own clone to make sure we don't get unrelated history errors from detached heads
- git clone https://${GITLAB_PAT_USERNAME}:${GITLAB_PAT_TOKEN}@${CI_SERVER_HOST}/${CI_PROJECT_PATH}.git ${CI_PROJECT_NAME}
- cd ${CI_PROJECT_NAME}
# Tell git who we are for commits
- git config user.email "${CI_PROJECT_PATH_SLUG}-ci@${CI_SERVER_HOST}"
- git config user.name "${CI_PROJECT_NAME} CI"
# Create the merge branch
- ./bin/create-merge-branch
# Create a merge request for the branch
- |
if [ -f ".mergeenv" ]; then
source ".mergeenv"
BODY="{
\"id\": ${CI_PROJECT_ID},
\"title\": \"Upgrade config to upstream version ${RELEASE_TAG}\",
\"source_branch\": \"${BRANCH_NAME}\",
\"target_branch\": \"main\",
\"remove_source_branch\": true,
\"assignee_id\": \"${GITLAB_USER_ID}\"
}"
curl -kfsSL -X POST \
"${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/merge_requests" \
--header "Authorization: Bearer ${GITLAB_PAT_TOKEN}" \
--header "Content-Type: application/json" \
--data "${BODY}"
fi

#####
# This job deploys a staging/test version of the Magnum CAPI management cluster
#
# It runs automatically for every commit to main that changes one of the files
# that affects the environment.
# NOTE: If the target site doesn't have a separate staging cloud with it's own Magnum
# deployment then it may still be worth including a management cluster staging env in
# this config repo which is a stripped down (1 master, 1 worker) version of the prod
# env. Although this will not allow for testing the interaction between Magnum and the
# CAPI management cluster, it will at least validate the deployment config before a
# production rollout is performed.
#####
deploy_staging:
stage: staging
rules:
# Prevent the job from running on any branch that is not main
- if: $CI_COMMIT_BRANCH != "main"
when: never
# Allow deployments to be manually triggered on main even when there are no changed files
- if: $CI_PIPELINE_SOURCE == "web"
# Run for commits to main that change particular files
- if: $CI_PIPELINE_SOURCE == "push"
changes:
# Files that affect the staging environment
- env
- env.secret
- requirements.yml
- environments/base/**/*
- environments/ha/**/*
- environments/capi-mgmt/**/*
# TODO: Change these to actual site environment names
- environments/site-base/**/*
- environments/site-staging/**/*
environment:
# TODO: Change this to site staging environment name
name: site-staging
variables:
ANSIBLE_FORCE_COLOR: "true"
before_script:
- source ./bin/ci-setup
script:
- ansible-playbook stackhpc.azimuth_ops.provision_capi_mgmt

#####
# This job deploys the Magnum CAPI management cluster to the production environment
#
# It runs for every commit to main that changes one of the files that affects
# the environment, but only if the staging deployment succeeded
#
# It also includes a manual gate that can be used as a confirmation that the
# relevant testing has taken place on staging
#####
deploy_production:
stage: production
rules:
# Prevent the job from running on any branch that is not main
- if: $CI_COMMIT_BRANCH != "main"
when: never
# Allow deployments to be manually triggered on main even when there are no changed files
- if: $CI_PIPELINE_SOURCE == "web"
when: manual
# Run for commits to main that change particular files
- if: $CI_PIPELINE_SOURCE == "push"
changes:
- env
- env.secret
- requirements.yml
- environments/base/**/*
- environments/ha/**/*
- environments/capi-mgmt/**/*
# TODO: Change these to actual site environment names
- environments/site-base/**/*
- environments/site-staging/**/*
when: manual
environment:
# TODO: Change this to site prod environment name
name: site-prod
variables:
ANSIBLE_FORCE_COLOR: "true"
before_script:
- source ./bin/ci-setup
script:
- ansible-playbook stackhpc.azimuth_ops.provision_capi_mgmt
6 changes: 4 additions & 2 deletions bin/tilt-images-apply
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,13 @@ helm_get_values_proc = exec_cmd(
os.environ["TILT_RELEASE_NAME"],
"--namespace",
os.environ["TILT_RELEASE_NAMESPACE"],
"--output",
"json",
],
stdout = subprocess.PIPE
)
current_values = yaml.safe_load(helm_get_values_proc.stdout)
current_values.pop("USER-SUPPLIED VALUES")
current_values = json.loads(helm_get_values_proc.stdout) or {}
current_values.pop("USER-SUPPLIED VALUES", None)


# Build and run the Helm upgrade command
Expand Down
2 changes: 1 addition & 1 deletion docs/configuration/08-zenith.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ zenith_registrar_subdomain_token_signing_key: "<some secret key>"

!!! tip

This key should be a long, random string - at least 32 bytes (256 bits) is recommended.
This key must be a long, random string - at least 32 bytes (256 bits) is required.
A suitable key can be generated using `openssl rand -hex 32`.

!!! danger
Expand Down
9 changes: 8 additions & 1 deletion docs/configuration/16-local-customisations.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,15 @@ compiled CSS file. For example, the following configuration tells Azimuth to use
[Zephyr theme from Bootswatch](https://bootswatch.com/zephyr/):

```yaml title="environments/my-site/inventory/group_vars/all/variables.yml"
azimuth_theme_bootstrap_css_url: https://docs.example.org/azimuth
azimuth_theme_bootstrap_css_url: https://bootswatch.com/5/zephyr/bootstrap.css
```
!!! tip

In order for the theming changes to take effect you may need to do a hard refresh of
the page due to the aggressive nature of CSS caching.

Mac: <kbd>⇧ Shift</kbd> + <kbd>⌘ Command</kbd> + <kbd>R</kbd>
Windows: <kbd> ctrl</kbd> + <kbd>⇧ Shift</kbd> + <kbd>R</kbd> / <kbd> ctrl</kbd> + <kbd> F5</kbd>

### Injecting custom CSS

Expand Down
25 changes: 25 additions & 0 deletions docs/debugging/kubernetes.md
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,31 @@ kubectl -n capo-system logs deploy/capo-controller-manager
kubectl -n capi-addon-system logs deploy/cluster-api-addon-provider
```

## Accessing tenant clusters

The kubeconfigs for all tenant clusters are stored as secrets. First, you need
to find the name and namespace of the cluster you want to debug. This can be
seen from the list of clusters:

```command title="On the K3s node, targetting the HA cluster if deployed"
$ kubectl get cluster -A
```

Then, you can retrieve and decode the kubeconfig with the following:

```command title="On the K3s node, targetting the HA cluster if deployed"
$ kubectl -n <namespace> get secret <clustername>-kubeconfig -o json | \
jq -r '.data.value' | \
base64 -d \
> kubeconfig-tenant.yaml
```

This can now be used by exporting the path to this file:

```command title="On the K3s node, targetting the HA cluster if deployed"
$ export KUBECONFIG=kubeconfig-tenant.yaml
```

## Zenith service issues

Zenith services are enabled on Kubernetes clusters using the
Expand Down
5 changes: 5 additions & 0 deletions environments/example/inventory/group_vars/all/secrets.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@
# It should be encrypted if stored in version control
# https://stackhpc.github.io/azimuth-config/repository/secrets/
#####
# Unless explicitly mentioned otherwise, a long, random string - at least 32 bytes (256 bits) is recommended.
# A suitable key can be generated using the following command.
# openssl rand -hex 32

# https://stackhpc.github.io/azimuth-config/configuration/05-secret-key/
# The secret key for signing Azimuth cookies
Expand All @@ -15,12 +18,14 @@ keycloak_admin_password: "<secure password>"

# https://stackhpc.github.io/azimuth-config/configuration/08-zenith/
# The secret key for signing Zenith registrar tokens
# This MUST be a minimum of 32 characters
zenith_registrar_subdomain_token_signing_key: "<secure secret key>"

# https://stackhpc.github.io/azimuth-config/configuration/10-kubernetes-clusters/#harbor-registry
# The password for the Harbor admin account
harbor_admin_password: "<secure password>"
# The secret key for Harbor
# This MUST be exactly 16 alphanumeric characters
harbor_secret_key: "<secure secret key>"

# https://stackhpc.github.io/azimuth-config/configuration/14-monitoring/#accessing-web-interfaces
Expand Down
2 changes: 1 addition & 1 deletion requirements.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
collections:
- name: https://github.com/stackhpc/ansible-collection-azimuth-ops.git
type: git
version: 0.7.2
version: 0.8.2
# For local development
# - type: dir
# source: ../ansible-collection-azimuth-ops