Skip to content

Commit

Permalink
feat: enable locally building and loading kubectl-bats docker image i…
Browse files Browse the repository at this point in the history
…nto the cluster for helm tests (#310)

Signed-off-by: Lenin Mehedy <lenin.mehedy@swirldslabs.com>
  • Loading branch information
leninmehedy authored Sep 7, 2023
1 parent b3a7429 commit 230b291
Show file tree
Hide file tree
Showing 6 changed files with 192 additions and 57 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/zxc-compile-code.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,7 @@ jobs:
uses: helm/kind-action@dda0770415bac9fc20092cacbc54aa298604d140 # v1.8.0
if: ${{ inputs.enable-unit-tests && !cancelled() }}
with:
cluster_name: fst
version: v0.19.0
verbosity: 3
wait: 120s
Expand All @@ -122,7 +123,6 @@ jobs:
- name: Kubernetes Cluster Info
if: ${{ inputs.enable-unit-tests && !cancelled() }}
run: |
kubectl config set-context --current --namespace=default
kubectl config get-contexts
kubectl get crd
Expand All @@ -147,13 +147,13 @@ jobs:
- name: Helm Chart Test (Direct Install)
working-directory: dev
if: ${{ inputs.enable-unit-tests && !cancelled() && !failure() }}
run: make test SCRIPT_NAME=direct-install.sh
run: make ci-test SCRIPT_NAME=direct-install.sh

# This step tests the Helm chart NMT mode of operation which uses the ubi8-init-dind image.
- name: Helm Chart Test (NMT Install)
working-directory: dev
if: ${{ inputs.enable-unit-tests && !cancelled() && !failure() }}
run: make test SCRIPT_NAME=nmt-install.sh
run: make ci-test SCRIPT_NAME=nmt-install.sh

- name: Compile
id: gradle-build
Expand Down
149 changes: 97 additions & 52 deletions dev/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,21 +12,29 @@ SHELLOPTS:=$(if $(SHELLOPTS),$(SHELLOPTS):)pipefail:errexit
setup \
setup-cluster \
deploy-chart \
destroy-chart \
helm-test \
deploy-network \
destroy-network \
destroy-test-container \
destroy-network test

deploy-all \
destroy-all \
local-kubectl-bats \
ci-test

# Setup variables
CLUSTER_NAME ?="fst"
SCRIPTS_DIR=$(PWD)/scripts
CHART_DIR=$(PWD)/../charts/hedera-network
SCRIPT_NAME=direct-install.sh
TMP_DIR=${SCRIPTS_DIR}/../temp

CHART_VALUES_FILES= # extra values

# scripts
TELEMETRY_SCRIPT="telemetry.sh"
GATEWAY_API_SCRIPT="gateway.sh"
DOCKER_SCRIPT="docker.sh"

.PHONY: all
all: setup setup-cluster reset
Expand All @@ -38,69 +46,55 @@ setup:

.PHONY: setup-cluster
setup-cluster:
kind create cluster -n fst
echo "Cluster name: ${CLUSTER_NAME}" && \
source "${SCRIPTS_DIR}/main.sh" && setup_cluster "${CLUSTER_NAME}"

.PHONY: destroy-cluster
destroy-cluster:
kind delete cluster -n fst

.PHONY: install-chart
install-chart:
source "${SCRIPTS_DIR}/main.sh" && install_chart "${SCRIPT_NAME}"

.PHONY: uninstall-chart
uninstall-chart:
source "${SCRIPTS_DIR}/main.sh" && uninstall_chart

.PHONY: update-helm-dependencies
update-helm-dependencies:
helm dependency update ../charts/hedera-network

.PHONY: deploy-chart
deploy-chart: deploy-minio-operator-if-required deploy-prometheus-operator deploy-gateway-api
echo ">> Deploying helm chart..." && \
echo "" && \
if [ "${SCRIPT_NAME}" = "nmt-install.sh" ]; then \
helm install fst ../charts/hedera-network --set defaults.root.image.repository=hashgraph/full-stack-testing/ubi8-init-dind ; \
else \
helm install fst ../charts/hedera-network ; \
fi \

.PHONY: helm-test
helm-test:
echo "" && \
echo ">> Running helm test..." && \
echo "" && \
# We have to specify the specific test name here, otherwise it executes all tests from the subcharts as well.
# I had to remove --logs, because --logs ignores filter and tries to get logs for all test pods
# - Error: unable to get pod logs for fst-graphql-test: pods "fst-graphql-test" not found
helm test fst --filter 'name=network-test' && \
EXIT_CODE=$$? && \
kubectl logs network-test && \
exit $${EXIT_CODE}
deploy-chart:
$(MAKE) update-helm-dependencies
$(MAKE) deploy-minio-operator-if-required
$(MAKE) deploy-prometheus-operator
$(MAKE) deploy-gateway-api
$(MAKE) install-chart

.PHONY: destroy-chart
destroy-chart:
-$(MAKE) uninstall-chart
-$(MAKE) destroy-gateway-api
-$(MAKE) destroy-prometheus-operator
-$(MAKE) undeploy-minio-operator

.PHONY: deploy-network
deploy-network: deploy-chart
echo "" && \
echo ">> Pod Information...." && \
echo "" && \
kubectl get pods -o wide && \
echo "" && \
echo ">> Service Information...." && \
echo "" && \
kubectl get svc -o wide

.PHONY: destroy-test-container
destroy-test-container:
kubectl get svc -o wide && \
echo "" && \
echo ">> Deleting test container..." && \
kubectl delete pod network-test || true

.PHONY: destroy-network
destroy-network: destroy-test-container
echo ">> Pod Information...." && \
echo "" && \
echo ">> Uninstalling helm chart..." && \
helm uninstall fst && \
sleep 10
kubectl get pods -o wide && \
echo ">> Waiting for network-node pods to be active (first deployment takes ~10m)...." && \
kubectl wait --for=jsonpath='{.status.phase}'=Running pod -l fullstack.hedera.com/type=network-node --timeout=900s

.PHONY: test
test:
# Enable cleanup_test function so that even if test fails, we cleanup the cluster.
# We are only enabling this in this make target, however if necessary, similar pattern can be used in other targets.
# Ref: https://stackoverflow.com/questions/28597794/how-can-i-clean-up-after-an-error-in-a-makefile
function cleanup_test {
$(MAKE) destroy-network
}
trap cleanup_test EXIT # always destroy-network on exit
$(MAKE) setup deploy-minio-operator-if-required update-helm-dependencies deploy-network helm-test setup-nodes start-nodes
.PHONY: destroy-network
destroy-network: destroy-test-container destroy-chart

.PHONY: setup-nodes
setup-nodes: setup
Expand Down Expand Up @@ -184,8 +178,8 @@ destroy-prometheus:
deploy-minio-operator:
@echo ">> Deploying minio operator..."; \
helm install --repo https://operator.min.io/ --namespace=minio-operator --create-namespace --version 5.0.7 minio-operator operator && \
echo ">> Waiting for minio operator to be ready..." && \
kubectl --namespace=minio-operator wait --for=condition=available --timeout=600s deployment.apps/minio-operator && \
echo ">> Waiting for minio operator to be available (timeout 300s)..." && \
kubectl --namespace=minio-operator wait --for=condition=Available --timeout=300s deployment.apps/minio-operator && \
sleep 5

.PHONY: is-minio-operator-installed
Expand All @@ -203,3 +197,54 @@ deploy-minio-operator-if-required:
undeploy-minio-operator:
echo ">> Deploying minio operator..." && \
helm delete --namespace=minio-operator minio-operator


######################################### Helm Chart Test #################################
.PHONY: helm-test
helm-test:
echo "" && \
echo ">> Running helm test...(first run takes ~2m)" && \
echo "" && \
# We have to specify the specific test name here, otherwise it executes all tests from the subcharts as well.
# I had to remove --logs, because --logs ignores filter and tries to get logs for all test pods
# - Error: unable to get pod logs for fst-graphql-test: pods "fst-graphql-test" not found
helm test fst --filter 'name=network-test' && \
EXIT_CODE=$$? && \
kubectl logs network-test && \
exit $${EXIT_CODE}

.PHONY: destroy-test-container
destroy-test-container:
echo "" && \
echo ">> Deleting test container..." && \
kubectl delete pod network-test || true

######################################### CI #################################
.PHONY: local-kubectl-bats
local-kubectl-bats:
source "${SCRIPTS_DIR}/${DOCKER_SCRIPT}" && build_kubectl_bats "${CLUSTER_NAME}"

.PHONY: deploy-all
deploy-all:
# Enable cleanup_test function so that even if test fails, we cleanup the cluster.
# We are only enabling this in this make target, however if necessary, similar pattern can be used in other targets.
# Ref: https://stackoverflow.com/questions/28597794/how-can-i-clean-up-after-an-error-in-a-makefile
function cleanup_test {
$(MAKE) destroy-network
}
trap cleanup_test EXIT # always destroy-network on exit
$(MAKE) setup
$(MAKE) deploy-network
$(MAKE) helm-test
$(MAKE) setup-nodes
$(MAKE) start-nodes

.PHONY: destroy-all
destroy-all:
-$(MAKE) destroy-network
-$(MAKE) undeploy-minio-operator
-$(MAKE) destroy-prometheus-operator

.PHONY: ci-test
ci-test: setup-cluster local-kubectl-bats
$(MAKE) deploy-all CHART_VALUES_FILES="$(PWD)/ci/ci-values.yaml"
8 changes: 8 additions & 0 deletions dev/ci/ci-values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# helm test container
tester:
image:
registry: "docker.fst.local"
repository: "kubectl-bats"
tag: "local"
pullPolicy: "Never"
resources: {}
20 changes: 20 additions & 0 deletions dev/scripts/docker.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
#!/usr/bin/env bash

SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
readonly SCRIPT_DIR
readonly DOCKERFILE_DIR="${SCRIPT_DIR}/../../docker"
readonly LOCAL_DOCKER_REGISTRY="docker.fst.local" # same as in dev/ci/ci-values.yaml
readonly LOCAL_DOCKER_IMAGE_TAG="local"
readonly KUBECTL_BATS_IMAGE="${LOCAL_DOCKER_REGISTRY}/kubectl-bats:${LOCAL_DOCKER_IMAGE_TAG}"

function build_kubectl_bats() {
local cluster_name=$1
local cluster_name=$1
[[ -z "${cluster_name}" ]] && echo "ERROR: Cluster name is required" && return 1

echo ""
echo "Building kubectl-bats image"
echo "-----------------------------------------------------------------------------------------------------"
cd "${DOCKERFILE_DIR}/kubectl-bats" && docker build -t "${KUBECTL_BATS_IMAGE}" .
kind load docker-image ${KUBECTL_BATS_IMAGE} -n "${cluster_name}"
}
61 changes: 61 additions & 0 deletions dev/scripts/main.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
#!/usr/bin/env bash

SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)"
readonly SCRIPT_DIR
CHART_DIR="${SCRIPT_DIR}/../../charts/hedera-network"

function setup_cluster() {
local cluster_name=$1
[[ -z "${cluster_name}" ]] && echo "ERROR: Cluster name is required" && return 1

local count=$(kind get clusters -q | grep -c -sw "${cluster_name}")
if [[ $count -eq 0 ]]; then
echo "Cluster '${cluster_name}' not found"
kind create cluster -n "${cluster_name}"
else
echo "Cluster '${cluster_name}' found"
fi

kubectl config use-context "kind-${cluster_name}"
kubectl config set-context --current --namespace=default
kubectl config get-contexts
}

function install_chart() {
local node_setup_script=$1

echo ""
echo "Installing helm chart... "
echo "SCRIPT_NAME: ${node_setup_script}"
echo "Values: -f ${CHART_DIR}/values.yaml --values ${CHART_VALUES_FILES}"
echo "-----------------------------------------------------------------------------------------------------"
if [ "${node_setup_script}" = "nmt-install.sh" ]; then
nmt_install
else
direct_install
fi
}

function uninstall_chart() {
echo ""
echo "Uninstalling helm chart... "
echo "-----------------------------------------------------------------------------------------------------"
helm uninstall fst
sleep 10
}

function nmt_install() {
if [[ -z "${CHART_VALUES_FILES}" ]]; then
helm install fst "${CHART_DIR}" --set defaults.root.image.repository=hashgraph/full-stack-testing/ubi8-init-dind
else
helm install fst "${CHART_DIR}" -f "${CHART_DIR}/values.yaml" --values "${CHART_VALUES_FILES}" --set defaults.root.image.repository=hashgraph/full-stack-testing/ubi8-init-dind
fi
}

function direct_install() {
if [[ -z "${CHART_VALUES_FILES}" ]]; then
helm install fst "${CHART_DIR}"
else
helm install fst "${CHART_DIR}" -f "${CHART_DIR}/values.yaml" --values "${CHART_VALUES_FILES}"
fi
}
5 changes: 3 additions & 2 deletions dev/scripts/telemetry.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,11 @@ function deploy-prometheus-operator() {
echo "Deploying prometheus operator"
echo "PROMETHEUS_OPERATOR_YAML: ${PROMETHEUS_OPERATOR_YAML}"
echo "-----------------------------------------------------------------------------------------------------"
local crd_count=$(kubectl get crd | grep "monitoring.coreos.com" | wc -l)
local crd_count=$(kubectl get crd | grep -c "monitoring.coreos.com" )
if [[ $crd_count -ne 10 ]]; then
kubectl create -f "${PROMETHEUS_OPERATOR_YAML}"
kubectl wait --for=condition=Ready pods -l app.kubernetes.io/name=prometheus-operator -n default
kubectl get pods --all-namespaces
kubectl wait --for=condition=Ready pods -l app.kubernetes.io/name=prometheus-operator --timeout 300s --all-namespaces
else
echo "Prometheus operator CRD is already installed"
echo ""
Expand Down

0 comments on commit 230b291

Please sign in to comment.