Skip to content

Update dependency @types/node to v20 #984

Update dependency @types/node to v20

Update dependency @types/node to v20 #984

Workflow file for this run

name: provision
on: [push]
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: 'eu-central-1'
ROUTE53_DOMAIN_NAME: tekton-argocd.de
TEKTON_CLI_VERSION: '0.29.1'
jobs:
provision-eks-with-pulumi:
runs-on: ubuntu-latest
env:
PULUMI_ACCESS_TOKEN: ${{ secrets.PULUMI_ACCESS_TOKEN }}
# Create an GitHub environment referencing our EKS cluster endpoint
environment:
name: traefik-dashboard
url: ${{ steps.traefik-expose.outputs.traefik_url }}
# using outputs for providing the Pulumi created kubeconfig to subsequent jobs
# see https://stackoverflow.com/a/61236803/4964553
# & see https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idoutputs
outputs:
kubeconfig: ${{ steps.pulumi-up.outputs.kubeconfig }}
steps:
- name: Checkout
uses: actions/checkout@master
- name: Setup node env
uses: actions/setup-node@v3.6.0
with:
node-version: '18'
- name: Cache node_modules
uses: actions/cache@v3
with:
path: ~/.npm
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Install Pulumi dependencies before npm run generate to prevent it from breaking the build
run: npm install
working-directory: ./eks-deployment
- name: Install Pulumi CLI
uses: pulumi/action-install-pulumi-cli@v2.0.0
- name: Provision AWS EKS cluster with Pulumi
id: pulumi-up
run: |
pulumi stack select dev
pulumi preview
echo "lets use --suppress-outputs here in order to prevent Pulumi from logging the kubeconfig into public GitHub Action logs"
pulumi up --yes --suppress-outputs
echo "Create ~/.kube dir only, if not already existent (see https://stackoverflow.com/a/793867/4964553)"
mkdir -p ~/.kube
echo "Create kubeconfig and supply it for depending Action jobs"
pulumi stack output kubeconfig > ~/.kube/config
echo "::set-output name=kubeconfig::$(pulumi stack output kubeconfig)"
working-directory: ./eks-deployment
- name: Try to connect to our EKS cluster using kubectl
run: kubectl get nodes
- name: Configure Amazon EBS CSI driver for working PersistentVolumes
run: |
echo "--- Install eksctl"
curl --silent --location "https://github.com/weaveworks/eksctl/releases/latest/download/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp
sudo mv /tmp/eksctl /usr/local/bin
echo "--- Enable IAM OIDC provider"
eksctl utils associate-iam-oidc-provider --region=$AWS_DEFAULT_REGION --cluster=$(pulumi stack output clusterName) --approve
echo "--- Create Amazon EBS CSI driver IAM role"
eksctl create iamserviceaccount \
--name ebs-csi-controller-sa \
--namespace kube-system \
--cluster $(pulumi stack output clusterName) \
--attach-policy-arn arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy \
--approve \
--role-only \
--role-name AmazonEKS_EBS_CSI_DriverRole
echo "--- Add the Amazon EBS CSI add-on"
eksctl create addon --name aws-ebs-csi-driver --cluster $(pulumi stack output clusterName) --service-account-role-arn arn:aws:iam::$(aws sts get-caller-identity --query Account --output text):role/AmazonEKS_EBS_CSI_DriverRole --force
working-directory: ./eks-deployment
- name: Install Traefik via Helm
run: |
echo "--- Install Traefik via Helm (which is already installed in GitHub Actions environment https://github.com/actions/virtual-environments)"
helm dependency update traefik/install
helm upgrade -i traefik traefik/install
- name: Create or update Route53 hosted zone A record to point to ELB Traefik is configured to
run: |
echo "--- Obtaining the Route53 domain's hosted zone id"
ROUTE53_DOMAIN_HOSTED_ZONE_ID="$(aws route53 list-hosted-zones-by-name | jq --arg name "$ROUTE53_DOMAIN_NAME." -r '.HostedZones | .[] | select(.Name=="\($name)") | .Id')"
echo "--- Obtaining the ELB hosted zone id"
echo "Therefore cutting the ELB url from the traefik k8s Service using cut (see https://stackoverflow.com/a/29903172/4964553)"
ELB_NAME="$(kubectl get service traefik -n default --output=jsonpath='{.status.loadBalancer.ingress[0].hostname}' | cut -d "-" -f 1)"
echo "Extracting the hosted zone it using aws cli and jq (see https://stackoverflow.com/a/53230627/4964553)"
ELB_HOSTED_ZONE_ID="$(aws elb describe-load-balancers | jq --arg name "$ELB_NAME" -r '.LoadBalancerDescriptions | .[] | select(.LoadBalancerName=="\($name)") | .CanonicalHostedZoneNameID')"
echo "--- Obtaining the Elastic Load Balancer url as the A records AliasTarget"
ELB_URL="$(kubectl get service traefik -n default --output=jsonpath='{.status.loadBalancer.ingress[0].hostname}')"
echo "--- Creating or updating ('UPSERT') Route53 hosted zone A record to point to ELB Traefik (see https://aws.amazon.com/premiumsupport/knowledge-center/simple-resource-record-route53-cli/)"
echo "--- Creating Route53 hosted zone record (mind to wrap the variables in double quotes in order to get them evaluated, see https://stackoverflow.com/a/49228748/4964553)"
aws route53 change-resource-record-sets \
--hosted-zone-id $ROUTE53_DOMAIN_HOSTED_ZONE_ID \
--change-batch '
{
"Comment": "Create or update Route53 hosted zone A record to point to ELB Traefik is configured to"
,"Changes": [{
"Action" : "UPSERT"
,"ResourceRecordSet" : {
"Name" : "*.'"$ROUTE53_DOMAIN_NAME"'"
,"Type" : "A"
,"AliasTarget": {
"HostedZoneId": "'"$ELB_HOSTED_ZONE_ID"'",
"DNSName": "dualstack.'"$ELB_URL"'",
"EvaluateTargetHealth": true
}
}
}]
}
'
- name: Expose Traefik url as GitHub environment
id: traefik-expose
run: |
echo "--- Apply Traefik-ception IngressRule"
kubectl apply -f traefik/traefik-dashboard.yml
echo "--- Wait until Loadbalancer url is present (see https://stackoverflow.com/a/70108500/4964553)"
until kubectl get service/traefik -n default --output=jsonpath='{.status.loadBalancer}' | grep "ingress"; do : ; done
TRAEFIK_URL="http://traefik.$ROUTE53_DOMAIN_NAME"
echo "All Services should be accessible through Traefik Ingress at $TRAEFIK_URL - creating GitHub Environment"
echo "traefik_url=$TRAEFIK_URL" >> $GITHUB_OUTPUT
install-and-run-argocd-on-eks:
runs-on: ubuntu-latest
needs: provision-eks-with-pulumi
environment:
name: argocd-dashboard
url: ${{ steps.dashboard-expose.outputs.dashboard_host }}
steps:
- name: Checkout
uses: actions/checkout@master
# We must use single quotes (!!!) here to access the kubeconfig like this:
# echo '${{ needs.provision-eks-with-pulumi.outputs.kubeconfig }}' > ~/.kube/config
# Otherwise we'll run into errors like (see https://stackoverflow.com/a/15930393/4964553):
# "error: error loading config file "/home/runner/.kube/config": yaml: did not find expected ',' or '}'"
- name: Configure kubeconfig to use with kubectl from provisioning job
run: |
mkdir ~/.kube
echo '${{ needs.provision-eks-with-pulumi.outputs.kubeconfig }}' > ~/.kube/config
echo "--- Checking connectivity to cluster"
kubectl get nodes
- name: Install ArgoCD
run: |
echo "--- Create argo namespace and install it"
kubectl create namespace argocd --dry-run=client -o yaml | kubectl apply -f -
echo "--- Install & configure ArgoCD via Kustomize - see https://stackoverflow.com/a/71692892/4964553"
kubectl apply -k argocd/install
- name: Expose ArgoCD Dashboard as GitHub environment
id: dashboard-expose
run: |
echo "--- Expose ArgoCD Dashboard via Traefik IngressRoute"
kubectl apply -f traefik/argocd-dashboard.yml
echo "--- Create GitHub environment var"
DASHBOARD_HOST="https://argocd.$ROUTE53_DOMAIN_NAME"
echo "The ArgoCD dashboard is accessible at $DASHBOARD_HOST - creating GitHub Environment"
echo "dashboard_host=$DASHBOARD_HOST" >> $GITHUB_OUTPUT
- name: Create GitHub Container Registry Secret to be able to pull from ghcr.io
run: |
echo "--- Create Secret to access GitHub Container Registry"
kubectl create secret docker-registry github-container-registry \
--docker-server=ghcr.io \
--docker-username=${{ secrets.GHCR_USER }} \
--docker-password=${{ secrets.GHCR_PASSWORD }} \
--namespace default \
--save-config --dry-run=client -o yaml | kubectl apply -f -
- name: Install ArgoCD CLI
run: |
echo "--- Since there's no brew ready to use anymore (https://github.com/actions/runner-images/issues/6283), we use the curl installation method here (see https://argo-cd.readthedocs.io/en/stable/cli_installation/#download-with-curl)"
curl -sSL -o argocd-linux-amd64 https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-amd64
sudo install -m 555 argocd-linux-amd64 /usr/local/bin/argocd
rm argocd-linux-amd64
- name: Install & configure argocd-task-create-sync-and-wait
run: |
echo "--- Wait until Secret argocd-initial-admin-secret got created for the following argocd login"
until kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data}" | grep "password"; do : ; done
echo "--- Wait until (hopefully) ArgoCD server deployment is ready to be logged in to"
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=argocd-server -n argocd
echo "--- Login argocd CLI - now wrapped in until to prevent dial tcp: lookup 12345.eu-central-1.elb.amazonaws.com on 8.8.8.8:53: no such host (see https://stackoverflow.com/a/71030112/4964553)"
until argocd login argocd.$ROUTE53_DOMAIN_NAME --username admin --password $(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d; echo) --insecure; do : ; done
echo "--- Create ConfigMap to point argocd CLI to our argocd-server"
kubectl create configmap argocd-env-configmap \
--from-literal="ARGOCD_SERVER=argocd.$ROUTE53_DOMAIN_NAME" \
--namespace default \
--save-config --dry-run=client -o yaml | kubectl apply -f -
echo "--- Create AppProject apps2deploy using manifest style incl. role create-sync with needed permissions"
kubectl apply -f argocd/argocd-appproject-apps2deploy.yml
echo "--- Show AppProject details incl. role permissions"
kubectl get appproj -n argocd apps2deploy -o yaml
echo "--- Create Secret for argocd CLI authentication to the argocd-server using AppProject role token"
kubectl create secret generic argocd-env-secret \
--from-literal=ARGOCD_AUTH_TOKEN=$(argocd proj role create-token apps2deploy create-sync --token-only) \
--namespace default \
--save-config --dry-run=client -o yaml | kubectl apply -f -
install-and-run-tekton-on-eks:
runs-on: ubuntu-latest
needs: [provision-eks-with-pulumi, install-and-run-argocd-on-eks]
environment:
name: tekton-dashboard
url: ${{ steps.dashboard-expose.outputs.dashboard_host }}
steps:
- name: Checkout
uses: actions/checkout@master
# We must use single quotes (!!!) here to access the kubeconfig like this:
# echo '${{ needs.provision-eks-with-pulumi.outputs.kubeconfig }}' > ~/.kube/config
# Otherwise we'll run into errors like (see https://stackoverflow.com/a/15930393/4964553):
# "error: error loading config file "/home/runner/.kube/config": yaml: did not find expected ',' or '}'"
- name: Configure kubeconfig to use with kubectl from provisioning job
run: |
mkdir ~/.kube
echo '${{ needs.provision-eks-with-pulumi.outputs.kubeconfig }}' > ~/.kube/config
echo "--- Checking connectivity to cluster"
kubectl get nodes
- name: Install Tekton Pipelines, Dashboard, Triggers, Tasks
run: |
echo "--- Install Tekton Pipelines, Dashboard, Triggers via Kustomize"
kubectl apply -k tekton/install
echo "--- Install Tekton Hub & local Tasks via Kustomize"
kubectl apply -k tekton/tasks
echo "--- Wait for Tekton to become ready & show running Tekton pods"
kubectl wait --for=condition=ready pod -l app=tekton-pipelines-controller --namespace tekton-pipelines --timeout=120s
kubectl get pods --namespace tekton-pipelines
- name: Expose Tekton Dashboard as GitHub environment
id: dashboard-expose
run: |
echo "--- Expose Tekton Dashboard via Traefik IngressRoute"
kubectl apply -f traefik/tekton-dashboard.yml
echo "--- Create GitHub environment var"
DASHBOARD_HOST="http://tekton.$ROUTE53_DOMAIN_NAME"
echo "The Tekton dashboard is accessible at $DASHBOARD_HOST - creating GitHub Environment"
echo "dashboard_host=$DASHBOARD_HOST" >> $GITHUB_OUTPUT
- name: Install Tekton CLI using curl instead of homebrew to speed up the pipeline
run: |
curl -LO "https://github.com/tektoncd/cli/releases/download/v${TEKTON_CLI_VERSION}/tkn_${TEKTON_CLI_VERSION}_Linux_x86_64.tar.gz" \
&& tar xvzf "tkn_${TEKTON_CLI_VERSION}_Linux_x86_64.tar.gz" -C /usr/local/bin/ tkn
- name: Create Tekton Task & run it with a TaskRun
run: |
echo "--- Start TaskRun"
tkn task start hello --dry-run > taskRun-hello.yaml
TASK_RUN_NAME=$(kubectl create -f taskRun-hello.yaml -o json | jq -r '.metadata.name')
echo "--- Wait for TaskRun to succeed"
kubectl wait --for=condition=SUCCEEDED=True --timeout=60s taskruns.tekton.dev/$TASK_RUN_NAME
echo "--- Show TaskRun logs"
tkn taskrun logs $TASK_RUN_NAME
- name: Create Secrets for GitLab Container Registry & Repository access & apply buildpacks PVC, ServiceAccount & Pipeline
run: |
echo "--- Create gitlab.com api token secret for Tekton Pipeline status reports"
kubectl create secret generic gitlab-api-secret \
--from-literal=token=${{ secrets.GITLAB_API_TOKEN }} \
--namespace default \
--save-config --dry-run=client -o yaml | kubectl apply -f -
echo "--- Create Secret to access GitLab Container Registry"
kubectl create secret docker-registry gitlab-container-registry \
--docker-server=registry.gitlab.com \
--docker-username=${{ secrets.GITLAB_CR_USER }} \
--docker-password=${{ secrets.GITLAB_CR_PASSWORD }} \
--namespace default \
--save-config --dry-run=client -o yaml | kubectl apply -f -
echo "--- Create Secret for GitLab based configuration repository"
sed "s#{{GITLAB_PUSH_TOKEN}}#${{ secrets.GITLAB_PUSH_TOKEN }}#g" tekton/misc/gitlab-push-secret.yml | kubectl apply -f -
echo "--- apply buildpacks PVC, ServiceAccount & Pipeline"
kubectl apply -k tekton/misc
kubectl apply -k tekton/pipelines
- name: Run Tekton Buildpacks powered Pipeline by creating PipelineRun
run: |
echo "--- Housekeeping: delete old TaskRuns to prevent to many Pods errors"
tkn pipelinerun delete --all --keep 10 -n default --force
echo "--- Trigger PipelineRun in Tekton / K8s"
PIPELINE_RUN_NAME=$(kubectl create -f tekton/pipelines/pipeline-run.yml --output=jsonpath='{.metadata.name}')
echo "--- Show Tekton PipelineRun logs"
tkn pipelinerun logs $PIPELINE_RUN_NAME --follow
echo "--- Check if Tekton PipelineRun Failed & exit GitHub Actions Step accordingly"
kubectl get pipelineruns $PIPELINE_RUN_NAME --output=jsonpath='{.status.conditions[*].reason}' | grep Failed && exit 1 || exit 0
- name: Create Tekton Triggers EventListener, Trigger Secret, ServiceAccount/RoleBinding/ClusterRoleBinding
run: |
kubectl apply -k tekton/triggers
- name: Expose Tekton Triggers EventListener as Traefik IngressRoute & testdrive Trigger
run: |
echo "--- Apply Tekton EventListener Traefik IngressRoute"
kubectl apply -f traefik/gitlab-listener.yml
echo "--- Testdrive Trigger via curl"
curl -v \
-H 'X-GitLab-Token: 1234567' \
-H 'X-Gitlab-Event: Push Hook' \
-H 'Content-Type: application/json' \
--data-binary "@tekton/triggers/gitlab-push-test-event.json" \
http://gitlab-listener.$ROUTE53_DOMAIN_NAME