Skip to content

fix: velero linode plugin #9558

fix: velero linode plugin

fix: velero linode plugin #9558

Workflow file for this run

name: Build and publish Docker
on:
push:
branches:
- '**'
tags-ignore:
- '*'
env:
CACHE_REGISTRY: ghcr.io
CACHE_REPO: linode/apl-core
REPO: otomi/core
jobs:
build-test-cache:
if: (!contains(github.event.head_commit.message, 'ci skip') && !startsWith(github.ref, 'refs/tags/') && !github.event.act)
runs-on: ubuntu-latest
env:
COMMIT_MSG: ${{ github.event.head_commit.message }}
BOT_EMAIL: ${{ vars.BOT_EMAIL }}
BOT_USERNAME: ${{ vars.BOT_USERNAME }}
steps:
- name: Set env
run: |
tag=${GITHUB_REF##*/}
echo "Creating tag: $tag"
echo "TAG=$tag" >> $GITHUB_ENV
git config --global user.email $BOT_EMAIL
git config --global user.name $BOT_USERNAME
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Github Packages
uses: docker/login-action@v3
with:
registry: ${{ env.CACHE_REGISTRY }}
username: ${{ env.BOT_USERNAME }}
password: '${{ secrets.BOT_TOKEN }}'
- name: CI tests, image build and push tag for main or branch
uses: docker/build-push-action@v5
with:
push: true
tags: |
${{ env.CACHE_REGISTRY }}/${{ env.CACHE_REPO }}:${{ env.TAG }}
push-to-docker:
needs: build-test-cache
if: always() && ((contains(needs.build-test-cache.result, 'success') && !contains(needs.integration.outputs.started, 'true')) || (contains(needs.integration.result, 'success'))) && !github.event.act
runs-on: ubuntu-latest
env:
BOT_EMAIL: ${{ vars.BOT_EMAIL }}
BOT_USERNAME: ${{ vars.BOT_USERNAME }}
steps:
- name: Push to docker hub
run: |
set -u
TAG=${GITHUB_REF##*/}
docker login ghcr.io -u $BOT_USERNAME -p ${{ secrets.BOT_TOKEN }}
image="$CACHE_REGISTRY/$CACHE_REPO:$TAG"
docker pull $image
docker tag $image $REPO:$TAG
docker login -u otomi -p ${{ secrets.DOCKERHUB_OTOMI_TOKEN }}
docker push $REPO:$TAG
- name: Show me the logic
run: |
echo github.ref == ${{ github.ref }}
release:
needs: push-to-docker
if: always() && (startsWith(github.ref, 'refs/heads/releases/') || startsWith(github.ref, 'refs/heads/main')) && startsWith(github.event.head_commit.message, 'chore(release)') && !github.event.act
runs-on: ubuntu-latest
env:
COMMIT_MSG: ${{ github.event.head_commit.message }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set env
run: |
git config --global user.email $BOT_EMAIL
git config --global user.name $BOT_USERNAME
- name: Create and push git tag
id: git_tag
run: |
TAG=${GITHUB_REF##*/}
docker login -u otomi -p ${{ secrets.DOCKERHUB_OTOMI_TOKEN }}
docker pull $REPO:$TAG
docker tag $REPO:$TAG $REPO:latest
docker push $REPO:latest
release_tag="v$(echo $COMMIT_MSG | cut -d' ' -f2)"
echo tag=$release_tag >> $GITHUB_OUTPUT
echo "Releasing $REPO:$release_tag"
docker tag $REPO:$TAG $REPO:$release_tag
docker push $REPO:$release_tag
docker login -u $BOT_USERNAME -p '${{ secrets.BOT_TOKEN }}' ghcr.io
docker tag $REPO:$TAG $CACHE_REGISTRY/$CACHE_REPO:$release_tag
docker push $CACHE_REGISTRY/$CACHE_REPO:$release_tag
echo "machine github.com login ${{ env.BOT_USERNAME }} password ${{ secrets.BOT_TOKEN }}" > ~/.netrc
git tag -am "$COMMIT_MSG" $release_tag && git push --follow-tags
#Cut the CHANGELOG.md file up to the first occurence of the "### \[[0-9]*" (meaning three #, a space,a square bracket and any number after it)
sed -n '/### \[[0-9]*/q;p' CHANGELOG.md > NEW_CHANGELOG.md
- name: Create GitHub release
uses: ncipollo/release-action@v1.12.0
env:
token: ${{ secrets.GITHUB_TOKEN }}
with:
tag: ${{ steps.git_tag.outputs.tag }}
name: Release ${{ steps.git_tag.outputs.tag }}
bodyFile: 'NEW_CHANGELOG.md'
generateReleaseNotes: true
chart-release:
needs: release
if: always() && contains(needs.release.result, 'success') && !github.event.act
runs-on: ubuntu-latest
container:
image: otomi/tools:v1.4.20
options: --user 0
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Prepare chart
id: prepare_chart
run: |
# Install and update helm repo
helm repo add apl https://linode.github.io/apl-core
helm repo update
# Retrieve the app version from package.json
app_version=$(jq -r '.version' package.json)
if [ -z "$app_version" ]; then
echo "Error: Could not retrieve app version from package.json"
exit 1
fi
# Extract major and minor from the app version
new_app_major=$(echo "$app_version" | cut -d '.' -f 1)
new_app_minor=$(echo "$app_version" | cut -d '.' -f 2)
# Get existing helm charts in the registry
helm_output=$(helm search repo otomi -l -o json)
# Use jq to parse the output and find the latest version for the given $new_app_major.$new_app_minor app version
existing_version=$(echo "$helm_output" | jq -r --arg major "$new_app_major" --arg minor "$new_app_minor" '
map(select(.app_version | startswith("v\($major).\($minor)"))) |
max_by(.version | split(".") | map(tonumber)) |
.version'
)
# Update Chart.yaml and values.yaml with the new app version
sed -i "s/0.0.0-chart-version/$app_version/g" chart/apl/Chart.yaml
sed -i "s/APP_VERSION_PLACEHOLDER/v$app_version/g" chart/apl/Chart.yaml
echo "Chart and values files updated successfully with version $app_version"
# Copy readme from repo into the charts and add tpl/chart-values.md
cp README.md chart/apl/
printf "\n\n" >>chart/apl/README.md
cat tpl/chart-values.md >>chart/apl/README.md
# Generate schema
npx js-yaml values-schema.yaml > chart/apl/values.schema.json
# Set the global id for git as it seems needed by the next step when a custom image is used
git config --global user.email ${{ env.BOT_EMAIL }}
git config --global user.name ${{ env.BOT_USERNAME }}
- name: Create and publish otomi chart release
id: chart_release
uses: helm/chart-releaser-action@v1.6.0
with:
charts_dir: chart
skip_existing: true
mark_as_latest: false
env:
CR_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
test-otomi-release:
name: Test Helm Chart Installation
needs: [release, chart-release]
runs-on: ubuntu-latest
steps:
- name: Use Scaleway CLI
uses: scaleway/action-scw@v0
with:
save-config: true
export-config: true
version: v2.26.0
access-key: ${{ secrets.SCW_ACCESS_KEY }}
secret-key: ${{ secrets.SCW_SECRET_KEY }}
default-project-id: ${{ secrets.SCW_DEFAULT_PROJECT_ID }}
default-organization-id: ${{ secrets.SCW_DEFAULT_ORGANIZATION_ID }}
- name: Pulling the helm chart
run: |
# Install and update helm repo
helm repo add apl https://linode.github.io/apl-core
helm repo update
# Get latest version of otomi
latest_version=$(helm search repo otomi -l | grep -m 1 otomi | awk '{print $2}')
echo The latest version to be tested is: $latest_version
- name: Creating the cluster
run: |
# Create cluster private network and get ID
SCALEWAY_PRIVATE_NETWORK_ID=$(scw vpc private-network create project-id=${{ secrets.SCW_DEFAULT_PROJECT_ID }} name='otomi-test-release' region=nl-ams -ojson | jq -r .id)
# Get k8s 1.27 patch version
K8s_VERSION=$(scw k8s version list -o json | jq -ce '.[] | .name' -r | grep 1.27)
# Create cluster
scw k8s cluster create \
name=otomi-test-release \
pools.0.node-type=PRO2-M \
private-network-id=$SCALEWAY_PRIVATE_NETWORK_ID \
auto-upgrade.enable=false \
cni=calico \
pools.0.name=otomi-test-release \
pools.0.size=3 \
pools.0.max-size=3 \
pools.0.autohealing=true \
pools.0.autoscaling=true \
pools.0.root-volume-size=50GB \
version=$K8s_VERSION \
region=nl-ams \
project-id=${{ secrets.SCW_DEFAULT_PROJECT_ID }} \
--wait
echo "Cluster deployed successfully"
- name: Installing new otomi release
run: |
# Get cluster ID and set env var
cluster_id=$(scw k8s cluster list region=nl-ams -o json | jq -r '.[] | select(.name == "otomi-test-release") | .id')
echo "Cluster ID: $cluster_id"
echo SCALEWAY_CLUSTER_ID=$cluster_id >> $GITHUB_ENV
# Get kubeconfig
scw k8s kubeconfig install $cluster_id region=nl-ams
echo "Kubeconfig installed successfully"
# Update values.yaml integration test file
SCALEWAY_CLUSTER_CONTEXT=`kubectl config current-context`
# Install otomi
helm install otomi otomi/otomi \
--wait --wait-for-jobs --timeout 30m0s \
--set cluster.provider=scaleway \
--set cluster.name=otomi-test-release \
--set cluster.k8sContext=$SCALEWAY_CLUSTER_CONTEXT
- name: Gather k8s events on failure
if: failure()
run: |
kubectl get events --sort-by='.lastTimestamp' -A
- name: Gather k8s pods on failure
if: failure()
run: |
kubectl get pods -A -o wide
- name: Gather otomi logs on failure
if: failure()
run: |
kubectl logs jobs/otomi --tail 150
- name: Delete k8s cluster at Scaleway
if: always()
run: |
scw k8s cluster delete ${{ env.SCALEWAY_CLUSTER_ID }} with-additional-resources=true region=nl-ams
notification:
needs: [build-test-cache, push-to-docker, release, chart-release]
if: always()
runs-on: ubuntu-latest
steps:
- name: Slack Notification
uses: rtCamp/action-slack-notify@v2
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: github-ci
SLACK_COLOR: ${{ job.status }}
SLACK_ICON: https://github.com/redkubes.png?size=48
SLACK_TITLE: CI run
SLACK_USERNAME: RedKubesBot