Skip to content

Commit

Permalink
🏃 support MachinePool clusters in ci-entrypoint.sh
Browse files Browse the repository at this point in the history
  • Loading branch information
Ernest Wong committed Jun 2, 2020
1 parent 5a7bedf commit 165d3ad
Show file tree
Hide file tree
Showing 14 changed files with 856 additions and 21 deletions.
23 changes: 16 additions & 7 deletions Makefile
Expand Up @@ -52,7 +52,6 @@ KUSTOMIZE := $(TOOLS_BIN_DIR)/kustomize
MOCKGEN := $(TOOLS_BIN_DIR)/mockgen
RELEASE_NOTES := $(TOOLS_BIN_DIR)/release-notes
GO_APIDIFF := $(TOOLS_BIN_DIR)/go-apidiff
EXP_DIR := exp

# Define Docker related variables. Releases should modify and double check these vars.
REGISTRY ?= gcr.io/$(shell gcloud config get-value project)
Expand Down Expand Up @@ -83,6 +82,10 @@ SKIP_CREATE_MGMT_CLUSTER ?= false
# Build time versioning details.
LDFLAGS := $(shell hack/version.sh)

# Allow overriding the feature gates
FEATURE_GATE_MACHINE_POOL ?= false
FEATURE_GATES_JSON_PATCH := [{"op": "add", "path": "/spec/template/spec/containers/1/args/-", "value": "--feature-gates=MachinePool=$(FEATURE_GATE_MACHINE_POOL)"}]

CLUSTER_TEMPLATE ?= cluster-template.yaml
MANAGED_CLUSTER_TEMPLATE ?= cluster-template-aks.yaml

Expand Down Expand Up @@ -350,13 +353,19 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST)
kind load docker-image $(CONTROLLER_IMG)-$(ARCH):$(TAG) --name=capz
$(KUSTOMIZE) build config | $(ENVSUBST) | kubectl apply -f -

# Wait for CAPI pods
kubectl wait --for=condition=Ready --timeout=5m -n capi-system pod -l cluster.x-k8s.io/provider=cluster-api
kubectl wait --for=condition=Ready --timeout=5m -n capi-kubeadm-bootstrap-system pod -l cluster.x-k8s.io/provider=bootstrap-kubeadm
kubectl wait --for=condition=Ready --timeout=5m -n capi-kubeadm-control-plane-system pod -l cluster.x-k8s.io/provider=control-plane-kubeadm
# Patch controllers with feature gates flag
kubectl patch deployment capi-controller-manager -n capi-system --type=json -p='$(FEATURE_GATES_JSON_PATCH)'
kubectl patch deployment capi-kubeadm-bootstrap-controller-manager -n capi-kubeadm-bootstrap-system --type=json -p='$(FEATURE_GATES_JSON_PATCH)'
kubectl patch deployment capz-controller-manager -n capz-system --type=json -p='$(FEATURE_GATES_JSON_PATCH)'
kubectl patch deployment capi-controller-manager -n capi-webhook-system --type=json -p='$(FEATURE_GATES_JSON_PATCH)'

# Wait for CAPI deployments
kubectl wait --for=condition=Available --timeout=5m -n capi-system deployment -l cluster.x-k8s.io/provider=cluster-api
kubectl wait --for=condition=Available --timeout=5m -n capi-kubeadm-bootstrap-system deployment -l cluster.x-k8s.io/provider=bootstrap-kubeadm
kubectl wait --for=condition=Available --timeout=5m -n capi-kubeadm-control-plane-system deployment -l cluster.x-k8s.io/provider=control-plane-kubeadm

# Wait for CAPZ pods
kubectl wait --for=condition=Ready --timeout=5m -n capz-system pod -l cluster.x-k8s.io/provider=infrastructure-azure
# Wait for CAPZ deployments
kubectl wait --for=condition=Available --timeout=5m -n capz-system deployment -l cluster.x-k8s.io/provider=infrastructure-azure

# required sleep for when creating management and workload cluster simultaneously
sleep 10
Expand Down
3 changes: 1 addition & 2 deletions docs/development.md
Expand Up @@ -335,8 +335,6 @@ To generate the mocks you can run
make generate-go
```



#### E2E Testing

To run E2E locally, set `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_SUBSCRIPTION_ID`, `AZURE_TENANT_ID` and run:
Expand Down Expand Up @@ -377,6 +375,7 @@ You can optionally set the following variables:
| `PARALLEL` | Skip serial tests and set --ginkgo-parallel. |
| `USE_CI_ARTIFACTS` | Use a CI version of Kubernetes, ie. not a released version (eg. `v1.19.0-alpha.1.426+0926c9c47677e9`) |
| `CI_VERSION` | Provide a custom CI version of Kubernetes. By default, the latest master commit will be used. |
| `FEATURE_GATE_MACHINE_POOL` | Use [Machine Pool](topics/machinepools.md) for worker machines. |

You can also customize the configuration of the CAPZ cluster (assuming that `SKIP_CREATE_WORKLOAD_CLUSTER` is not set). See [Customizing the cluster deployment](#customizing-the-cluster-deployment) for more details.

Expand Down
2 changes: 1 addition & 1 deletion hack/gen-flavors.sh
Expand Up @@ -30,4 +30,4 @@ find "${flavors_dir}"* -maxdepth 0 -type d -print0 | xargs -0 -I {} basename {}
mv "${root}/templates/cluster-template-default.yaml" "${root}/templates/cluster-template.yaml"

rm -f "${test_dir}cluster-template"*
find "${test_dir}"* -maxdepth 0 -type d -print0 | xargs -0 -I {} basename {} | xargs -I {} sh -c "${kustomize} build --reorder none ${test_dir}{} > ${test_dir}cluster-template-{}.yaml"
find "${test_dir}"* -maxdepth 0 -type d -print0 | xargs -0 -I {} basename {} | grep -v patches | xargs -I {} sh -c "${kustomize} build --load_restrictor none --reorder none ${test_dir}{} > ${test_dir}cluster-template-{}.yaml"
30 changes: 21 additions & 9 deletions scripts/ci-entrypoint.sh
Expand Up @@ -82,6 +82,10 @@ create_cluster() {
export CLUSTER_TEMPLATE="test/cluster-template-prow.yaml"
fi

if [[ "${FEATURE_GATE_MACHINE_POOL:-}" == "true" ]]; then
export CLUSTER_TEMPLATE="${CLUSTER_TEMPLATE/prow/prow-machine-pool}"
fi

export CLUSTER_NAME="capz-$(head /dev/urandom | LC_ALL=C tr -dc a-z0-9 | head -c 6 ; echo '')"
# Need a cluster with at least 2 nodes
export CONTROL_PLANE_MACHINE_COUNT=${CONTROL_PLANE_MACHINE_COUNT:-1}
Expand All @@ -93,6 +97,19 @@ create_cluster() {
${REPO_ROOT}/hack/create-dev-cluster.sh
}

wait_for_nodes() {
echo "Waiting for ${CONTROL_PLANE_MACHINE_COUNT} control plane machine(s) and ${WORKER_MACHINE_COUNT} worker machine(s) to become Ready"

# Ensure that all nodes are registered with the API server before checking for readiness
local total_nodes="$((${CONTROL_PLANE_MACHINE_COUNT} + ${WORKER_MACHINE_COUNT}))"
while [[ $(kubectl get nodes -ojson | jq '.items | length') -ne "${total_nodes}" ]]; do
sleep 10
done

kubectl wait --for=condition=Ready node --all --timeout=5m
kubectl get nodes -owide
}

run_upstream_e2e_tests() {
# ginkgo regexes
SKIP="${SKIP:-}"
Expand All @@ -107,19 +124,11 @@ run_upstream_e2e_tests() {
fi
fi

# get the number of worker nodes
NUM_NODES="$(kubectl get nodes --kubeconfig="$KUBECONFIG" \
-o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.taints}{"\n"}{end}' \
| grep -cv "node-role.kubernetes.io/master" )"

# wait for all the nodes to be ready
kubectl wait --for=condition=Ready node --kubeconfig="$KUBECONFIG" --all || true

# setting this env prevents ginkg e2e from trying to run provider setup
export KUBERNETES_CONFORMANCE_TEST="y"
# run the tests
(cd "$(go env GOPATH)/src/k8s.io/kubernetes" && ./hack/ginkgo-e2e.sh \
'--provider=skeleton' "--num-nodes=${NUM_NODES}" \
'--provider=skeleton' \
"--ginkgo.focus=${FOCUS}" "--ginkgo.skip=${SKIP}" \
"--report-dir=${ARTIFACTS}" '--disable-log-dump=true')

Expand Down Expand Up @@ -163,6 +172,9 @@ fi
# export the target cluster KUBECONFIG if not already set
export KUBECONFIG="${KUBECONFIG:-${PWD}/kubeconfig}"

export -f wait_for_nodes
timeout --foreground 1800 bash -c wait_for_nodes

# build k8s binaries and run upstream e2e tests
if [[ -z "${SKIP_UPSTREAM_E2E_TESTS:-}" ]]; then
build_k8s
Expand Down

0 comments on commit 165d3ad

Please sign in to comment.