Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Include k3s in CI tests #195

Merged
merged 7 commits into from
Apr 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 0 additions & 46 deletions .github/workflows/tests-deploy-cluster.yml

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
name: Test Deployment (Single Node)
run-name: "${{ github.ref_name }}: Test Deployment (Single Node)"
name: Test Deployment
run-name: "${{ github.ref_name }}: Test Deployment"

on:
workflow_dispatch:
Expand All @@ -16,6 +16,44 @@ concurrency:
cancel-in-progress: true

jobs:
# Test cluster deployment.
test-cluster:
if: github.event_name == 'push' || github.event_name == 'pull_request'
name: Cluster
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
manager:
- kubespray
- k3s
k8s:
- v1.27.10
- v1.28.6
distro:
- ubuntu22
networkPlugin:
- calico # K3s will use flannel

steps:
- name: Checkout
uses: actions/checkout@v4

- name: Setup environment
uses: ./.github/actions/runner-setup

- name: Deploy cluster
run: |
./scripts/deploy-cluster.sh k8s \
${{ matrix.distro }} \
${{ matrix.networkPlugin }} \
${{ matrix.k8s }} \
${{ matrix.manager }}

- name: Test
run: |
./scripts/test-cluster.sh

# Test multiple k8s versions using the default distro and network plugin.
test-single-node-quick:
if: github.event_name == 'push' || github.event_name == 'pull_request'
Expand All @@ -24,14 +62,16 @@ jobs:
strategy:
fail-fast: false
matrix:
manager:
- k3s
- kubespray
k8sVersion:
- v1.26.13
- v1.27.10
- v1.28.6
distro:
- ubuntu22
networkPlugin:
- calico
- calico # K3s will use flannel

steps:
- name: Checkout
Expand All @@ -45,7 +85,8 @@ jobs:
./scripts/deploy-node.sh k8s \
${{ matrix.distro }} \
${{ matrix.networkPlugin }} \
${{ matrix.k8sVersion }}
${{ matrix.k8sVersion }} \
${{ matrix.manager }}

- name: Test
run: |
Expand All @@ -55,15 +96,18 @@ jobs:
run: |
./scripts/destroy-cluster.sh k8s

# Test most combinations of Kubernetes versions, distros,
# and network plugins. Run this only on push.
# Test most combinations of managers, versions, distros,
# and network plugins. Run this only on schedule.
test-single-node-all:
if: github.event_name != 'push' && github.event_name != 'pull_request'
name: Node
if: github.event_name == 'schedule'
name: Node Matrix
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
manager:
- k3s
- kubespray
k8sVersion:
- v1.26.13
- v1.27.10
Expand Down Expand Up @@ -91,7 +135,8 @@ jobs:
./scripts/deploy-node.sh k8s \
${{ matrix.distro }} \
${{ matrix.networkPlugin }} \
${{ matrix.k8sVersion }}
${{ matrix.k8sVersion }} \
${{ matrix.manager }}

- name: Test
run: |
Expand Down
11 changes: 0 additions & 11 deletions embed/ansible/kubitect/finalize.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,6 @@
dest: "{{ config_dir }}/admin.conf"
flat: true

- name: Fetch kubeconfig from first master node
hosts: server[0]
gather_facts: false
any_errors_fatal: true
tasks:
- name: Fetch Kubeconfig
fetch:
src: "/home/{{ ansible_user }}/.kube/config"
dest: "{{ config_dir }}/admin.conf"
flat: true

- name: Finalize cluster deployment
hosts: localhost
gather_facts: false
Expand Down
2 changes: 1 addition & 1 deletion embed/ansible/kubitect/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
ansible-core==2.16.2
ansible==9.3.0
jinja2==3.1.2
netaddr==0.9.0
3 changes: 2 additions & 1 deletion pkg/cluster/managers/k3s_playbooks.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ func (e *k3s) K3sCreate(inventory string) error {
"api_endpoint": string(e.InfraConfig.Nodes.LoadBalancer.VIP),
"api_port": "6443",
"user_kubectl": "true", // Set to false to kubectl via root user.
"cluster_context": e.ClusterName,
"cluster_context": "default",
"kubeconfig": filepath.Join(e.ConfigDir, "admin.conf"),
"extra_server_args": "",
"extra_agent_args": "",
}
Expand Down
6 changes: 4 additions & 2 deletions scripts/deploy-cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,16 @@
set -eu

# Check input arguments.
if [ "${1:-}" = "" ] || [ "${2:-}" = "" ] || [ "${3:-}" = "" ] || [ "${4:-}" = "" ]; then
echo "Usage: ${0} <cluster_name> <distro> <network_plugin> <k8s_version>"
if [ "${1:-}" = "" ] || [ "${2:-}" = "" ] || [ "${3:-}" = "" ] || [ "${4:-}" = "" ] || [ "${5:-}" = "" ]; then
echo "Usage: ${0} <cluster_name> <distro> <network_plugin> <k8s_version> <k8s_manager>"
exit 1
fi

CLUSTER="${1}"
DISTRO="${2}"
NETWORK_PLUGIN="${3}"
K8S_VERSION="${4}"
K8S_MANAGER="${5}"

echo "==> DEPLOY: Cluster ${DISTRO}/${NETWORK_PLUGIN}/${K8S_VERSION}"

Expand Down Expand Up @@ -63,6 +64,7 @@ cluster:
ip: 192.168.113.21

kubernetes:
manager: ${K8S_MANAGER}
version: ${K8S_VERSION}
networkPlugin: ${NETWORK_PLUGIN}
EOF
Expand Down
6 changes: 4 additions & 2 deletions scripts/deploy-node.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,16 @@
set -eu

# Check input arguments.
if [ "${1:-}" = "" ] || [ "${2:-}" = "" ] || [ "${3:-}" = "" ] || [ "${4:-}" = "" ]; then
echo "Usage: ${0} <cluster_name> <distro> <network_plugin> <k8s_version>"
if [ "${1:-}" = "" ] || [ "${2:-}" = "" ] || [ "${3:-}" = "" ] || [ "${4:-}" = "" ] || [ "${5:-}" = "" ]; then
echo "Usage: ${0} <cluster_name> <distro> <network_plugin> <k8s_version> <k8s_manager>"
exit 1
fi

CLUSTER="${1}"
DISTRO="${2}"
NETWORK_PLUGIN="${3}"
K8S_VERSION="${4}"
K8S_MANAGER="${5}"

echo "==> DEPLOY: Cluster (Single Node) ${DISTRO}/${NETWORK_PLUGIN}/${K8S_VERSION}"

Expand Down Expand Up @@ -45,6 +46,7 @@ cluster:
ip: 192.168.113.10

kubernetes:
manager: ${K8S_MANAGER}
version: ${K8S_VERSION}
networkPlugin: ${NETWORK_PLUGIN}
EOF
Expand Down
76 changes: 18 additions & 58 deletions scripts/test-cluster.sh
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
#!/bin/sh
set -eu

TIMEOUT=600 # seconds

defer() {
if [ "${FAIL}" = "1" ]; then
echo "==> DEBUG: Cluster events"
Expand All @@ -19,71 +17,33 @@ defer() {
FAIL=1
trap defer EXIT HUP INT TERM

echo "==> TEST: Cluster readiness"

startTime=$(date +%s)
nodes=$(kubectl get nodes | awk 'NR>1 {print $1}')

for node in $nodes; do
while :; do
isReady=$(kubectl get node "${node}" \
-o jsonpath='{.status.conditions[?(@.type=="Ready")].status}'
)

if [ "${isReady}" = "True" ]; then
echo "===> PASS: Node ${node} is ready."
break
fi

currentTime=$(date +%s)
elapsedTime=$((currentTime - timeStart))

if [ "${elapsedTime}" -gt "${TIMEOUT}" ]; then
echo "FAIL: Node ${node} is NOT READY after ${TIMEOUT} seconds!"
kubectl get nodes
break
fi

sleep 10
done
done

echo "==> TEST: Running pods"

startTime=$(date +%s)

while :; do
failedPods=$(kubectl get pods \
--all-namespaces \
--field-selector="status.phase!=Succeeded,status.phase!=Running" \
--output custom-columns="NAMESPACE:metadata.namespace,POD:metadata.name,STATUS:status.phase"
)
echo "==> TEST: All nodes ready"
kubectl wait --for=condition=ready node --all --timeout=120s

if [ "$(echo "${failedPods}" | awk 'NR>1')" = "" ]; then
echo "===> PASS: All pods are running."
break
fi

currentTime=$(date +%s)
elapsedTime=$((currentTime - startTime))

if [ "${elapsedTime}" -gt "${TIMEOUT}" ]; then
echo "==> FAIL: Pods not running after ${TIMEOUT} seconds!"
echo "${failedPods}"
echo "==> TEST: All pods ready"
set +e
# Wait for all pods to be ready. Retry a few times, as it may happen that
# cluster has no pods when check is ran, which will result in an error.
for i in $(seq 5); do
podsReadiness=$(kubectl wait --for=condition=ready pods --all -A --timeout=30s)
if [ "$?" -eq 0 ]; then
break
else
echo "(attempt $i/5) Pods are still not ready. Retrying in 10 seconds..."
sleep 10
fi

sleep 10
done
set -e
echo "${podsReadiness}"

echo "==> TEST: DNS"
kubectl run dns-test --image=busybox:1.28.4 --restart=Never -- sleep 180
kubectl wait --for=condition=Ready pod/dns-test --timeout=60s
kubectl apply -f https://k8s.io/examples/admin/dns/dnsutils.yaml
kubectl wait --for=condition=Ready pod/dnsutils --timeout=60s

kubectl exec dns-test -- nslookup kubernetes.default
kubectl exec dnsutils -- nslookup kubernetes.default
echo "===> PASS: Local lookup (kubernetes.default)."

kubectl exec dns-test -- nslookup kubitect.io
kubectl exec dnsutils -- nslookup kubitect.io
echo "===> PASS: External lookup (kubitect.io)."

# All tests have passed.
Expand Down