Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🌱 Support new control plane label and taint #5919

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 2 additions & 0 deletions bootstrap/kubeadm/config/manager/manager.yaml
Expand Up @@ -41,3 +41,5 @@ spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
2 changes: 2 additions & 0 deletions config/manager/manager.yaml
Expand Up @@ -42,3 +42,5 @@ spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
2 changes: 2 additions & 0 deletions controlplane/kubeadm/config/manager/manager.yaml
Expand Up @@ -41,3 +41,5 @@ spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
Expand Up @@ -1516,7 +1516,7 @@ func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *control
node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{"node-role.kubernetes.io/master": ""},
Labels: map[string]string{"node-role.kubernetes.io/control-plane": ""},
},
}

Expand Down
47 changes: 33 additions & 14 deletions controlplane/kubeadm/internal/workload_cluster.go
Expand Up @@ -36,6 +36,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/retry"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
Expand All @@ -51,13 +52,14 @@ import (
)

const (
kubeProxyKey = "kube-proxy"
kubeadmConfigKey = "kubeadm-config"
kubeletConfigKey = "kubelet"
cgroupDriverKey = "cgroupDriver"
labelNodeRoleControlPlane = "node-role.kubernetes.io/master"
clusterStatusKey = "ClusterStatus"
clusterConfigurationKey = "ClusterConfiguration"
kubeProxyKey = "kube-proxy"
kubeadmConfigKey = "kubeadm-config"
kubeletConfigKey = "kubelet"
cgroupDriverKey = "cgroupDriver"
labelNodeRoleOldControlPlane = "node-role.kubernetes.io/master" // Deprecated: https://github.com/kubernetes/kubeadm/issues/2200
labelNodeRoleControlPlane = "node-role.kubernetes.io/control-plane"
clusterStatusKey = "ClusterStatus"
clusterConfigurationKey = "ClusterConfiguration"
)

var (
Expand Down Expand Up @@ -121,14 +123,31 @@ type Workload struct {
var _ WorkloadCluster = &Workload{}

func (w *Workload) getControlPlaneNodes(ctx context.Context) (*corev1.NodeList, error) {
nodes := &corev1.NodeList{}
labels := map[string]string{
labelNodeRoleControlPlane: "",
}
if err := w.Client.List(ctx, nodes, ctrlclient.MatchingLabels(labels)); err != nil {
vincepri marked this conversation as resolved.
Show resolved Hide resolved
return nil, err
controlPlaneNodes := &corev1.NodeList{}
controlPlaneNodeNames := sets.NewString()

for _, label := range []string{labelNodeRoleOldControlPlane, labelNodeRoleControlPlane} {
nodes := &corev1.NodeList{}
if err := w.Client.List(ctx, nodes, ctrlclient.MatchingLabels(map[string]string{
label: "",
})); err != nil {
return nil, err
}

for i := range nodes.Items {
node := nodes.Items[i]

// Continue if we already added that node.
if controlPlaneNodeNames.Has(node.Name) {
continue
}

sbueringer marked this conversation as resolved.
Show resolved Hide resolved
controlPlaneNodeNames.Insert(node.Name)
controlPlaneNodes.Items = append(controlPlaneNodes.Items, node)
}
}
return nodes, nil

return controlPlaneNodes, nil
}

func (w *Workload) getConfigMap(ctx context.Context, configMap ctrlclient.ObjectKey) (*corev1.ConfigMap, error) {
Expand Down
Expand Up @@ -980,6 +980,9 @@ func fakeNode(name string, options ...fakeNodeOption) *corev1.Node {
p := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: map[string]string{
labelNodeRoleControlPlane: "",
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is now necessary because the old implementation of getControlPlaneNodes did select all nodes as control plane nodes independent of the label when using fakeClient (I assume because ctrlclient.MatchingLabels(labels) is not implemented in fake client)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'll take a closer look.

To be clear for the new code everything works fine. I think it's now mostly a question of why didn't we have to set the control plane node label with the old code. (aka is there something wrong with fake client or our usage of it)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay everything worked/works as expected.

In the relevant unit test we're injecting the list result:

injectClient: &fakeClient{
list: &corev1.NodeList{
Items: []corev1.Node{*fakeNode("n1")},
},
},

Previously, we didn't have to set the control plane label as the result of the list was automatically the list of control plane nodes. As we're now iterating through the result and checking for the label it became relevant.

},
},
}
for _, opt := range options {
Expand Down
72 changes: 72 additions & 0 deletions controlplane/kubeadm/internal/workload_cluster_test.go
Expand Up @@ -38,6 +38,78 @@ import (
"sigs.k8s.io/cluster-api/util/yaml"
)

func TestGetControlPlaneNodes(t *testing.T) {
tests := []struct {
name string
nodes []corev1.Node
expectedNodes []string
}{
{
name: "Return control plane nodes",
nodes: []corev1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "control-plane-node-with-old-label",
Labels: map[string]string{
labelNodeRoleOldControlPlane: "",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "control-plane-node-with-both-labels",
Labels: map[string]string{
labelNodeRoleOldControlPlane: "",
labelNodeRoleControlPlane: "",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "control-plane-node-with-new-label",
Labels: map[string]string{
labelNodeRoleControlPlane: "",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "worker-node",
Labels: map[string]string{},
},
},
},
expectedNodes: []string{
"control-plane-node-with-both-labels",
"control-plane-node-with-old-label",
"control-plane-node-with-new-label",
},
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
objs := []client.Object{}
for i := range tt.nodes {
objs = append(objs, &tt.nodes[i])
}
fakeClient := fake.NewClientBuilder().WithObjects(objs...).Build()

w := &Workload{
Client: fakeClient,
}
nodes, err := w.getControlPlaneNodes(ctx)
g.Expect(err).ToNot(HaveOccurred())
var actualNodes []string
for _, n := range nodes.Items {
actualNodes = append(actualNodes, n.Name)
}
g.Expect(actualNodes).To(Equal(tt.expectedNodes))
})
}
}

func TestUpdateKubeProxyImageInfo(t *testing.T) {
tests := []struct {
name string
Expand Down
3 changes: 3 additions & 0 deletions docs/book/src/user/troubleshooting.md
Expand Up @@ -35,7 +35,10 @@ kubectl label nodes <name> node-role.kubernetes.io/worker=""
For convenience, here is an example one-liner to do this post installation

```
# Kubernetes 1.19 (kubeadm 1.19 sets only the node-role.kubernetes.io/master label)
kubectl get nodes --no-headers -l '!node-role.kubernetes.io/master' -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}' | xargs -I{} kubectl label node {} node-role.kubernetes.io/worker=''
# Kubernetes >= 1.20 (kubeadm >= 1.20 sets the node-role.kubernetes.io/control-plane label)
kubectl get nodes --no-headers -l '!node-role.kubernetes.io/control-plane' -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}' | xargs -I{} kubectl label node {} node-role.kubernetes.io/worker=''
```

## Cluster API with Docker
Expand Down
23 changes: 21 additions & 2 deletions test/framework/deployment_helpers.go
Expand Up @@ -34,6 +34,7 @@ import (
"k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/kubernetes"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand All @@ -42,6 +43,11 @@ import (
"sigs.k8s.io/cluster-api/test/framework/internal/log"
)

const (
nodeRoleOldControlPlane = "node-role.kubernetes.io/master" // Deprecated: https://github.com/kubernetes/kubeadm/issues/2200
nodeRoleControlPlane = "node-role.kubernetes.io/control-plane"
)

// WaitForDeploymentsAvailableInput is the input for WaitForDeploymentsAvailable.
type WaitForDeploymentsAvailableInput struct {
Getter Getter
Expand Down Expand Up @@ -306,10 +312,23 @@ func DeployUnevictablePod(ctx context.Context, input DeployUnevictablePodInput)
},
}
if input.ControlPlane != nil {
workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{"node-role.kubernetes.io/master": ""}
serverVersion, err := workloadClient.ServerVersion()
Expect(err).ToNot(HaveOccurred())

// Use the control-plane label for Kubernetes version >= v1.20.0.
if utilversion.MustParseGeneric(serverVersion.String()).AtLeast(utilversion.MustParseGeneric("v1.20.0")) {
workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{nodeRoleControlPlane: ""}
} else {
workloadDeployment.Spec.Template.Spec.NodeSelector = map[string]string{nodeRoleOldControlPlane: ""}
}

workloadDeployment.Spec.Template.Spec.Tolerations = []corev1.Toleration{
{
Key: "node-role.kubernetes.io/master",
Key: nodeRoleOldControlPlane,
Effect: "NoSchedule",
},
{
Key: nodeRoleControlPlane,
Effect: "NoSchedule",
},
}
Expand Down
2 changes: 2 additions & 0 deletions test/infrastructure/docker/config/manager/manager.yaml
Expand Up @@ -44,6 +44,8 @@ spec:
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
volumes:
- name: dockersock
hostPath:
Expand Down