Skip to content

Commit

Permalink
Bump k8s version for test cluster and add rollback demo target (#115)
Browse files Browse the repository at this point in the history
* Bump k8s version for test cluster and add rollback demo target

* Wait only if env var set
  • Loading branch information
adriananeci committed Oct 24, 2023
1 parent 6223063 commit f50d7ec
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 9 deletions.
8 changes: 7 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

NAME ?= adobe/k8s-shredder
K8S_SHREDDER_VERSION ?= "dev"
KINDNODE_VERSION ?= "v1.25.11"
KINDNODE_VERSION ?= "v1.28.0"
COMMIT ?= $(shell git rev-parse --short HEAD)
TEST_CLUSTERNAME ?= "k8s-shredder-test-cluster"

Expand Down Expand Up @@ -78,13 +78,19 @@ e2e-tests: ## Run e2e tests for k8s-shredder deployed in a local kind cluster
@echo "Run e2e tests for k8s-shredder..."
@KUBECONFIG=${PWD}/kubeconfig go test internal/testing/e2e_test.go -v

# DEMO targets
# -----------
.PHONY: demo.prep demo.run demo.rollback
demo.prep: build ## Setup demo cluster
echo "Setup demo cluster..."
./internal/testing/local_env_prep.sh "${K8S_SHREDDER_VERSION}" "${KINDNODE_VERSION}" "${TEST_CLUSTERNAME}"

demo.run: ## Run demo
./internal/testing/cluster_upgrade.sh "${TEST_CLUSTERNAME}"

demo.rollback: ## Rollback demo
./internal/testing/rollback_cluster_upgrade.sh "${TEST_CLUSTERNAME}"


ci: local-test e2e-tests clean ## Run CI

Expand Down
21 changes: 13 additions & 8 deletions internal/testing/cluster_upgrade.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,16 @@ fi

kubectl label node "${K8S_CLUSTER_NAME}-worker" --kubeconfig=kubeconfig --overwrite shredder.ethos.adobe.net/parked-node-expires-on="${EXPIRES_ON}"

# For moving node back as active, useful during debug process
#export K8S_CLUSTER_NAME=k8s-shredder-test-cluster
#kubectl uncordon "${K8S_CLUSTER_NAME}-worker" --kubeconfig=kubeconfig
#kubectl label node "${K8S_CLUSTER_NAME}-worker" --kubeconfig=kubeconfig shredder.ethos.adobe.net/upgrade-status-
#kubectl label node "${K8S_CLUSTER_NAME}-worker" --kubeconfig=kubeconfig --overwrite shredder.ethos.adobe.net/parked-node-expires-on-
#kubectl delete -n ns-k8s-shredder-test $(kubectl get pods -n ns-k8s-shredder-test -oname) --force --wait=0 --timeout=0
#kubectl delete -n ns-team-k8s-shredder-test $(kubectl get pods -n ns-team-k8s-shredder-test -oname) --force --wait=0 --timeout=0
#kubectl get po -A --field-selector=spec.nodeName=k8s-shredder-test-cluster-worker
if [[ ${WAIT_FOR_PODS:-false} == "true" ]]
then
while [[ $pod_status != "No resources found" ]]
do
echo "Info: Waiting for all pods to be evicted from the node..."
sleep 10
pod_status=$(kubectl get pods -A --field-selector metadata.namespace!=kube-system,metadata.namespace!=local-path-storage,spec.nodeName=k8s-shredder-test-cluster-worker 2>&1 >/dev/null)
done

# This is to simulate the upgrade process. We are going to wait for 1 minute and then uncordon the node.
kubectl label node "${K8S_CLUSTER_NAME}-worker" --kubeconfig=kubeconfig shredder.ethos.adobe.net/upgrade-status-
kubectl label node "${K8S_CLUSTER_NAME}-worker" --kubeconfig=kubeconfig --overwrite shredder.ethos.adobe.net/parked-node-expires-on-
fi
13 changes: 13 additions & 0 deletions internal/testing/rollback_cluster_upgrade.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/usr/bin/env bash
set -e

K8S_CLUSTER_NAME=$1

# For moving node back as active, useful during debug process
export K8S_CLUSTER_NAME=k8s-shredder-test-cluster
kubectl uncordon "${K8S_CLUSTER_NAME}-worker" --kubeconfig=kubeconfig
kubectl label node "${K8S_CLUSTER_NAME}-worker" --kubeconfig=kubeconfig shredder.ethos.adobe.net/upgrade-status-
kubectl label node "${K8S_CLUSTER_NAME}-worker" --kubeconfig=kubeconfig --overwrite shredder.ethos.adobe.net/parked-node-expires-on-
kubectl delete -n ns-k8s-shredder-test $(kubectl get pods -n ns-k8s-shredder-test -oname) --force --wait=0 --timeout=0
kubectl delete -n ns-team-k8s-shredder-test $(kubectl get pods -n ns-team-k8s-shredder-test -oname) --force --wait=0 --timeout=0
kubectl get po -A --field-selector=spec.nodeName=k8s-shredder-test-cluster-worker

0 comments on commit f50d7ec

Please sign in to comment.