Skip to content

Commit

Permalink
Support new control plane label and taint
Browse files Browse the repository at this point in the history
Signed-off-by: Stefan Büringer buringerst@vmware.com
  • Loading branch information
sbueringer committed Feb 1, 2022
1 parent 5d5dc7a commit 2497fcc
Show file tree
Hide file tree
Showing 10 changed files with 141 additions and 17 deletions.
2 changes: 2 additions & 0 deletions bootstrap/kubeadm/config/manager/manager.yaml
Expand Up @@ -41,3 +41,5 @@ spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
2 changes: 2 additions & 0 deletions config/manager/manager.yaml
Expand Up @@ -42,3 +42,5 @@ spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
2 changes: 2 additions & 0 deletions controlplane/kubeadm/config/manager/manager.yaml
Expand Up @@ -41,3 +41,5 @@ spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
Expand Up @@ -1516,7 +1516,7 @@ func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *control
node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"node-role.kubernetes.io/master": ""},
Labels: map[string]string{"node-role.kubernetes.io/control-plane": ""},
},
}

Expand Down
47 changes: 33 additions & 14 deletions controlplane/kubeadm/internal/workload_cluster.go
Expand Up @@ -36,6 +36,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/retry"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
Expand All @@ -51,13 +52,14 @@ import (
)

const (
kubeProxyKey = "kube-proxy"
kubeadmConfigKey = "kubeadm-config"
kubeletConfigKey = "kubelet"
cgroupDriverKey = "cgroupDriver"
labelNodeRoleControlPlane = "node-role.kubernetes.io/master"
clusterStatusKey = "ClusterStatus"
clusterConfigurationKey = "ClusterConfiguration"
kubeProxyKey = "kube-proxy"
kubeadmConfigKey = "kubeadm-config"
kubeletConfigKey = "kubelet"
cgroupDriverKey = "cgroupDriver"
labelNodeRoleOldControlPlane = "node-role.kubernetes.io/master" // Deprecated: https://github.com/kubernetes/kubeadm/issues/2200
labelNodeRoleControlPlane = "node-role.kubernetes.io/control-plane"
clusterStatusKey = "ClusterStatus"
clusterConfigurationKey = "ClusterConfiguration"
)

var (
Expand Down Expand Up @@ -121,14 +123,31 @@ type Workload struct {
var _ WorkloadCluster = &Workload{}

func (w *Workload) getControlPlaneNodes(ctx context.Context) (*corev1.NodeList, error) {
nodes := &corev1.NodeList{}
labels := map[string]string{
labelNodeRoleControlPlane: "",
}
if err := w.Client.List(ctx, nodes, ctrlclient.MatchingLabels(labels)); err != nil {
return nil, err
controlPlaneNodes := &corev1.NodeList{}
controlPlaneNodeNames := sets.NewString()

for _, label := range []string{labelNodeRoleOldControlPlane, labelNodeRoleControlPlane} {
nodes := &corev1.NodeList{}
if err := w.Client.List(ctx, nodes, ctrlclient.MatchingLabels(map[string]string{
label: "",
})); err != nil {
return nil, err
}

for i := range nodes.Items {
node := nodes.Items[i]

// Continue if we already added that node.
if controlPlaneNodeNames.Has(node.Name) {
continue
}

controlPlaneNodeNames.Insert(node.Name)
controlPlaneNodes.Items = append(controlPlaneNodes.Items, node)
}
}
return nodes, nil

return controlPlaneNodes, nil
}

func (w *Workload) getConfigMap(ctx context.Context, configMap ctrlclient.ObjectKey) (*corev1.ConfigMap, error) {
Expand Down
Expand Up @@ -980,6 +980,9 @@ func fakeNode(name string, options ...fakeNodeOption) *corev1.Node {
p := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
labelNodeRoleControlPlane: "",
},
},
}
for _, opt := range options {
Expand Down
72 changes: 72 additions & 0 deletions controlplane/kubeadm/internal/workload_cluster_test.go
Expand Up @@ -38,6 +38,78 @@ import (
"sigs.k8s.io/cluster-api/util/yaml"
)

func TestGetControlPlaneNodes(t *testing.T) {
tests := []struct {
name string
nodes []corev1.Node
expectedNodes []string
}{
{
name: "Return control plane nodes",
nodes: []corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "control-plane-node-with-old-label",
Labels: map[string]string{
labelNodeRoleOldControlPlane: "",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "control-plane-node-with-both-labels",
Labels: map[string]string{
labelNodeRoleOldControlPlane: "",
labelNodeRoleControlPlane: "",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "control-plane-node-with-new-label",
Labels: map[string]string{
labelNodeRoleControlPlane: "",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "worker-node",
Labels: map[string]string{},
},
},
},
expectedNodes: []string{
"control-plane-node-with-both-labels",
"control-plane-node-with-old-label",
"control-plane-node-with-new-label",
},
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
objs := []client.Object{}
for i := range tt.nodes {
objs = append(objs, &tt.nodes[i])
}
fakeClient := fake.NewClientBuilder().WithObjects(objs...).Build()

w := &Workload{
Client: fakeClient,
}
nodes, err := w.getControlPlaneNodes(ctx)
g.Expect(err).ToNot(HaveOccurred())
var actualNodes []string
for _, n := range nodes.Items {
actualNodes = append(actualNodes, n.Name)
}
g.Expect(actualNodes).To(Equal(tt.expectedNodes))
})
}
}

func TestUpdateKubeProxyImageInfo(t *testing.T) {
tests := []struct {
name string
Expand Down
3 changes: 3 additions & 0 deletions docs/book/src/user/troubleshooting.md
Expand Up @@ -35,7 +35,10 @@ kubectl label nodes <name> node-role.kubernetes.io/worker=""
For convenience, here is an example one-liner to do this post installation

```
# Kubernetes 1.19 (kubeadm 1.19 sets only the node-role.kubernetes.io/master label)
kubectl get nodes --no-headers -l '!node-role.kubernetes.io/master' -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}' | xargs -I{} kubectl label node {} node-role.kubernetes.io/worker=''
# Kubernetes >= 1.20 (kubeadm >= 1.20 sets the node-role.kubernetes.io/control-plane label)
kubectl get nodes --no-headers -l '!node-role.kubernetes.io/control-plane' -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}' | xargs -I{} kubectl label node {} node-role.kubernetes.io/worker=''
```

## Cluster API with Docker
Expand Down
23 changes: 21 additions & 2 deletions test/framework/deployment_helpers.go
Expand Up @@ -34,6 +34,7 @@ import (
"k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/kubernetes"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand All @@ -42,6 +43,11 @@ import (
"sigs.k8s.io/cluster-api/test/framework/internal/log"
)

const (
nodeRoleOldControlPlane = "node-role.kubernetes.io/master" // Deprecated: https://github.com/kubernetes/kubeadm/issues/2200
nodeRoleControlPlane = "node-role.kubernetes.io/control-plane"
)

// WaitForDeploymentsAvailableInput is the input for WaitForDeploymentsAvailable.
type WaitForDeploymentsAvailableInput struct {
Getter Getter
Expand Down Expand Up @@ -306,10 +312,23 @@ func DeployUnevictablePod(ctx context.Context, input DeployUnevictablePodInput)
},
}
if input.ControlPlane != nil {
workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{"node-role.kubernetes.io/master": ""}
serverVersion, err := workloadClient.ServerVersion()
Expect(err).ToNot(HaveOccurred())

// Use the control-plane label for Kubernetes version >= v1.20.0.
if utilversion.MustParseGeneric(serverVersion.String()).AtLeast(utilversion.MustParseGeneric("v1.20.0")) {
workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{nodeRoleControlPlane: ""}
} else {
workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{nodeRoleOldControlPlane: ""}
}

workloadDeployment.Spec.Template.Spec.Tolerations = []corev1.Toleration{
{
Key: "node-role.kubernetes.io/master",
Key: nodeRoleOldControlPlane,
Effect: "NoSchedule",
},
{
Key: nodeRoleControlPlane,
Effect: "NoSchedule",
},
}
Expand Down
2 changes: 2 additions & 0 deletions test/infrastructure/docker/config/manager/manager.yaml
Expand Up @@ -44,6 +44,8 @@ spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
volumes:
- name: dockersock
hostPath:
Expand Down

0 comments on commit 2497fcc

Please sign in to comment.