Skip to content

Commit

Permalink
Support new control plane label and taint
Browse files Browse the repository at this point in the history
Signed-off-by: Stefan Büringer buringerst@vmware.com
  • Loading branch information
sbueringer committed Jan 10, 2022
1 parent c337ecc commit 892bd88
Show file tree
Hide file tree
Showing 9 changed files with 112 additions and 14 deletions.
2 changes: 2 additions & 0 deletions bootstrap/kubeadm/config/manager/manager.yaml
Expand Up @@ -41,3 +41,5 @@ spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
2 changes: 2 additions & 0 deletions config/manager/manager.yaml
Expand Up @@ -42,3 +42,5 @@ spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
2 changes: 2 additions & 0 deletions controlplane/kubeadm/config/manager/manager.yaml
Expand Up @@ -41,3 +41,5 @@ spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
2 changes: 1 addition & 1 deletion controlplane/kubeadm/controllers/controller_test.go
Expand Up @@ -1516,7 +1516,7 @@ func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *control
node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"node-role.kubernetes.io/master": ""},
Labels: map[string]string{"node-role.kubernetes.io/control-plane": ""},
},
}

Expand Down
37 changes: 25 additions & 12 deletions controlplane/kubeadm/internal/workload_cluster.go
Expand Up @@ -51,13 +51,14 @@ import (
)

const (
kubeProxyKey = "kube-proxy"
kubeadmConfigKey = "kubeadm-config"
kubeletConfigKey = "kubelet"
cgroupDriverKey = "cgroupDriver"
labelNodeRoleControlPlane = "node-role.kubernetes.io/master"
clusterStatusKey = "ClusterStatus"
clusterConfigurationKey = "ClusterConfiguration"
kubeProxyKey = "kube-proxy"
kubeadmConfigKey = "kubeadm-config"
kubeletConfigKey = "kubelet"
cgroupDriverKey = "cgroupDriver"
labelNodeRoleOldControlPlane = "node-role.kubernetes.io/master" // Deprecated: https://github.com/kubernetes/kubeadm/issues/2200
labelNodeRoleControlPlane = "node-role.kubernetes.io/control-plane"
clusterStatusKey = "ClusterStatus"
clusterConfigurationKey = "ClusterConfiguration"
)

var (
Expand Down Expand Up @@ -121,16 +122,28 @@ type Workload struct {
var _ WorkloadCluster = &Workload{}

func (w *Workload) getControlPlaneNodes(ctx context.Context) (*corev1.NodeList, error) {
nodes := &corev1.NodeList{}
labels := map[string]string{
labelNodeRoleControlPlane: "",
}
if err := w.Client.List(ctx, nodes, ctrlclient.MatchingLabels(labels)); err != nil {
allNodes := &corev1.NodeList{}
if err := w.Client.List(ctx, allNodes); err != nil {
return nil, err
}

nodes := &corev1.NodeList{}
for i := range allNodes.Items {
node := allNodes.Items[i]
if isControlPlaneNode(node) {
nodes.Items = append(nodes.Items, node)
}
}

return nodes, nil
}

func isControlPlaneNode(node corev1.Node) bool {
_, hasLabelNodeRoleControlPlane := node.Labels[labelNodeRoleControlPlane]
_, hasLabelNodeRoleOldControlPlane := node.Labels[labelNodeRoleOldControlPlane]
return hasLabelNodeRoleControlPlane || hasLabelNodeRoleOldControlPlane
}

func (w *Workload) getConfigMap(ctx context.Context, configMap ctrlclient.ObjectKey) (*corev1.ConfigMap, error) {
original := &corev1.ConfigMap{}
if err := w.Client.Get(ctx, configMap, original); err != nil {
Expand Down
Expand Up @@ -980,6 +980,9 @@ func fakeNode(name string, options ...fakeNodeOption) *corev1.Node {
p := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
labelNodeRoleControlPlane: "",
},
},
}
for _, opt := range options {
Expand Down
60 changes: 60 additions & 0 deletions controlplane/kubeadm/internal/workload_cluster_test.go
Expand Up @@ -38,6 +38,66 @@ import (
"sigs.k8s.io/cluster-api/util/yaml"
)

func TestIsControlPlaneNode(t *testing.T) {
tests := []struct {
name string
node corev1.Node
isControlPlaneNode bool
}{
{
name: "control plane node with old label",
node: corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
labelNodeRoleOldControlPlane: "",
},
},
},
isControlPlaneNode: true,
},
{
name: "control plane node with new label",
node: corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
labelNodeRoleControlPlane: "",
},
},
},
isControlPlaneNode: true,
},
{
name: "control plane node with both label",
node: corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
labelNodeRoleControlPlane: "",
labelNodeRoleOldControlPlane: "",
},
},
},
isControlPlaneNode: true,
},
{
name: "worker node",
node: corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{},
},
},
isControlPlaneNode: false,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)

g.Expect(isControlPlaneNode(tt.node)).To(Equal(tt.isControlPlaneNode))
})
}
}

func TestUpdateKubeProxyImageInfo(t *testing.T) {
tests := []struct {
name string
Expand Down
16 changes: 15 additions & 1 deletion test/framework/deployment_helpers.go
Expand Up @@ -34,6 +34,7 @@ import (
"k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/kubernetes"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -306,12 +307,25 @@ func DeployUnevictablePod(ctx context.Context, input DeployUnevictablePodInput)
},
}
if input.ControlPlane != nil {
workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{"node-role.kubernetes.io/master": ""}
serverVersion, err := workloadClient.ServerVersion()
Expect(err).ToNot(HaveOccurred())

// Use the control-plane label for Kubernetes version >= v1.20.0.
if utilversion.MustParseGeneric(serverVersion.String()).AtLeast(utilversion.MustParseGeneric("v1.20.0")) {
workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{"node-role.kubernetes.io/control-plane": ""}
} else {
workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{"node-role.kubernetes.io/master": ""}
}

workloadDeployment.Spec.Template.Spec.Tolerations = []corev1.Toleration{
{
Key: "node-role.kubernetes.io/master",
Effect: "NoSchedule",
},
{
Key: "node-role.kubernetes.io/control-plane",
Effect: "NoSchedule",
},
}
}
AddDeploymentToWorkloadCluster(ctx, AddDeploymentToWorkloadClusterInput{
Expand Down
2 changes: 2 additions & 0 deletions test/infrastructure/docker/config/manager/manager.yaml
Expand Up @@ -44,6 +44,8 @@ spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
volumes:
- name: dockersock
hostPath:
Expand Down

0 comments on commit 892bd88

Please sign in to comment.