Skip to content

Commit

Permalink
Merge pull request #5289 from nilo19/fix/cherry-pick-5281-1.28
Browse files Browse the repository at this point in the history
fix: [multi-slb] Put the service in the load balancer that has no lab…
  • Loading branch information
k8s-ci-robot committed Jan 16, 2024
2 parents a7cc749 + af38701 commit 9255dd3
Show file tree
Hide file tree
Showing 9 changed files with 209 additions and 66 deletions.
38 changes: 29 additions & 9 deletions hack/deploy-cluster-capz.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ export CONTROL_PLANE_MACHINE_COUNT="${CONTROL_PLANE_MACHINE_COUNT:-1}"
export WORKER_MACHINE_COUNT="${WORKER_MACHINE_COUNT:-2}"
export AZURE_CONTROL_PLANE_MACHINE_TYPE="${AZURE_CONTROL_PLANE_MACHINE_TYPE:-Standard_D4s_v3}"
export AZURE_NODE_MACHINE_TYPE="${AZURE_NODE_MACHINE_TYPE:-Standard_D2s_v3}"
export AZURE_LOCATION="${AZURE_LOCATION:-westus2}"
export AZURE_LOCATION="${AZURE_LOCATION:-eastus}"
export AZURE_CLOUD_CONTROLLER_MANAGER_IMG_REGISTRY="${AZURE_CLOUD_CONTROLLER_MANAGER_IMG_REGISTRY:-mcr.microsoft.com/oss/kubernetes}"
export AZURE_CLOUD_CONTROLLER_MANAGER_IMG_NAME="${AZURE_CLOUD_CONTROLLER_MANAGER_IMG_NAME:-azure-cloud-controller-manager}"
export AZURE_CLOUD_CONTROLLER_MANAGER_IMG_TAG="${AZURE_CLOUD_CONTROLLER_MANAGER_IMG_TAG:-v1.28.4}"
Expand All @@ -46,6 +46,8 @@ export AZURE_CLOUD_NODE_MANAGER_IMG_TAG="${AZURE_CLOUD_NODE_MANAGER_IMG_TAG:-v1.
export KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.28.0}"
export EXP_MACHINE_POOL=true
export EXP_CLUSTER_RESOURCE_SET=true
export SKIP_CREATING_MGMT_CLUSTER="${SKIP_CREATING_MGMT_CLUSTER:-false}"
export KIND="${KIND:-true}"

export AZURE_LOADBALANCER_SKU="${AZURE_LOADBALANCER_SKU:-Standard}"
export LB_BACKEND_POOL_CONFIG_TYPE="${LB_BACKEND_POOL_CONFIG_TYPE:-nodeIPConfiguration}"
Expand All @@ -56,6 +58,12 @@ if [ "${AZURE_SSH_PUBLIC_KEY}" ]; then
export AZURE_SSH_PUBLIC_KEY_B64
fi

export MGMT_CLUSTER_CONTEXT="${MGMT_CLUSTER_CONTEXT:-kind-${MANAGEMENT_CLUSTER_NAME}}"
if [ "${SKIP_CREATING_MGMT_CLUSTER}" = "true" ] || [ "${KIND}" = "false" ]; then
MGMT_CLUSTER_CONTEXT="${MANAGEMENT_CLUSTER_NAME}"
fi


source "${REPO_ROOT}/hack/ensure-kind.sh"
source "${REPO_ROOT}/hack/ensure-clusterctl.sh"

Expand All @@ -71,30 +79,42 @@ function init_and_wait_capz() {

# Create CAPZ management cluster by kind
function create_management_cluster() {
if [ "${SKIP_CREATING_MGMT_CLUSTER}" = "true" ]; then
echo "Skipping creation of management cluster as per configuration"
return 0
fi
unset KUBECONFIG
if ! kubectl cluster-info --context=kind-"${MANAGEMENT_CLUSTER_NAME}"; then
if ! kubectl cluster-info --context="${MGMT_CLUSTER_CONTEXT}"; then
echo "Creating kind cluster"
kind create cluster --name="${MANAGEMENT_CLUSTER_NAME}"
echo "Waiting for the node to be Ready"
kubectl wait node "${MANAGEMENT_CLUSTER_NAME}-control-plane" --for=condition=ready --timeout=900s --context=kind-"${MANAGEMENT_CLUSTER_NAME}"
kubectl cluster-info --context=kind-"${MANAGEMENT_CLUSTER_NAME}"
kubectl wait node "${MANAGEMENT_CLUSTER_NAME}-control-plane" --for=condition=ready --timeout=900s --context="${MGMT_CLUSTER_CONTEXT}"
kubectl cluster-info --context=kind-"${MGMT_CLUSTER_CONTEXT}"
init_and_wait_capz
else
echo "Found management cluster, assuming the CAPZ has been initialized"
if [ "${KIND}" = "true" ]; then
echo "Found management cluster, assuming the CAPZ has been initialized"
else
init_and_wait_capz
fi
fi
}

function create_workload_cluster() {
kubectl create ns "${CLUSTER_NAME}" \
--context="${MGMT_CLUSTER_CONTEXT}"

if [ "${CUSTOMIZED_CLOUD_CONFIG_TEMPLATE}" ] && ! kubectl get secret "${CLUSTER_NAME}-control-plane-azure-json"; then
echo "Creating customized cloud config file from ${CUSTOMIZED_CLOUD_CONFIG_TEMPLATE}"
envsubst < "${CUSTOMIZED_CLOUD_CONFIG_TEMPLATE}" > tmp_azure_json
kubectl create secret generic "${CLUSTER_NAME}-control-plane-azure-json" \
--from-file=azure.json=tmp_azure_json \
--from-file=control-plane-azure.json=tmp_azure_json \
--from-file=worker-node-azure.json=tmp_azure_json \
--context=kind-"${MANAGEMENT_CLUSTER_NAME}"
--context="${MGMT_CLUSTER_CONTEXT}" \
-n "${CLUSTER_NAME}"
rm tmp_azure_json
kubectl --context=kind-"${MANAGEMENT_CLUSTER_NAME}" label secret "${CLUSTER_NAME}-control-plane-azure-json" "${CLUSTER_NAME}"=foo --overwrite
kubectl --context="${MGMT_CLUSTER_CONTEXT}" label secret "${CLUSTER_NAME}-control-plane-azure-json" "${CLUSTER_NAME}"=foo --overwrite -n "${CLUSTER_NAME}"
else
echo "Using default cloud config generated by CAPZ"
fi
Expand All @@ -115,9 +135,9 @@ function create_workload_cluster() {
return 124
fi
echo "Get kubeconfig and store it locally."
kubectl --context=kind-"${MANAGEMENT_CLUSTER_NAME}" get secrets "${CLUSTER_NAME}"-kubeconfig -o json | jq -r .data.value | base64 --decode > ./"${CLUSTER_NAME}"-kubeconfig
kubectl --context="${MGMT_CLUSTER_CONTEXT}" get secrets "${CLUSTER_NAME}"-kubeconfig -o json -n "${CLUSTER_NAME}" | jq -r .data.value | base64 --decode > ./"${CLUSTER_NAME}"-kubeconfig
echo "Waiting for the control plane nodes to show up"
timeout --foreground 1000 bash -c "while ! kubectl --kubeconfig=./${CLUSTER_NAME}-kubeconfig get nodes | grep -E 'master|control-plane'; do sleep 1; done"
timeout --foreground 1000 bash -c "while ! kubectl --kubeconfig=./${CLUSTER_NAME}-kubeconfig get nodes -n "${CLUSTER_NAME}" | grep -E 'master|control-plane'; do sleep 1; done"
if [ "$?" == 124 ]; then
echo "Timeout waiting for the control plane nodes"
return 124
Expand Down
2 changes: 1 addition & 1 deletion pkg/provider/azure.go
Original file line number Diff line number Diff line change
Expand Up @@ -1517,7 +1517,7 @@ func (az *Cloud) getActiveNodesByLoadBalancerName(lbName string) sets.Set[string
defer az.multipleStandardLoadBalancersActiveNodesLock.Unlock()

for _, multiSLBConfig := range az.MultipleStandardLoadBalancerConfigurations {
if strings.EqualFold(strings.TrimSuffix(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) {
if strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) {
return multiSLBConfig.ActiveNodes
}
}
Expand Down

0 comments on commit 9255dd3

Please sign in to comment.