From 79515ec8800b68bc2a4f377661f16e2bcedb4dcd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 1 Feb 2017 19:06:51 +0200 Subject: [PATCH] Move some code from apiclient.go to the dedicated apiconfig phase package. Add constants and somewhat refactor the RBAC code as well --- cmd/kubeadm/app/cmd/init.go | 47 +++-- cmd/kubeadm/app/constants/constants.go | 4 + cmd/kubeadm/app/master/BUILD | 1 - cmd/kubeadm/app/master/addons.go | 6 +- cmd/kubeadm/app/master/apiclient.go | 65 +------ cmd/kubeadm/app/master/apiclient_test.go | 19 +- cmd/kubeadm/app/phases/apiconfig/BUILD | 8 +- .../app/phases/apiconfig/clusterroles.go | 171 ++++++++++++------ .../app/phases/apiconfig/setupmaster.go | 83 +++++++++ 9 files changed, 238 insertions(+), 166 deletions(-) create mode 100644 cmd/kubeadm/app/phases/apiconfig/setupmaster.go diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 270211a47a23..9dc244f6ec42 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -33,7 +33,7 @@ import ( kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/discovery" kubemaster "k8s.io/kubernetes/cmd/kubeadm/app/master" - "k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig" + apiconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig" certphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs" kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig" "k8s.io/kubernetes/cmd/kubeadm/app/preflight" @@ -210,7 +210,7 @@ func (i *Init) Run(out io.Writer) error { } } - // Phase 3: Bootstrap the control plane + // PHASE 3: Bootstrap the control plane if err := kubemaster.WriteStaticPodManifests(i.cfg); err != nil { return err } @@ -220,28 +220,34 @@ func (i *Init) Run(out io.Writer) error { return err } - if i.cfg.AuthorizationMode == kubeadmconstants.AuthzModeRBAC { - err = apiconfig.CreateBootstrapRBACClusterRole(client) - if err != nil { - return err - } + if err := apiconfigphase.UpdateMasterRoleLabelsAndTaints(client); err != nil { + return err + } - err = apiconfig.CreateKubeDNSRBACClusterRole(client) - if err != nil { + // Is deployment type self-hosted? + if i.selfHosted { + // Temporary control plane is up, now we create our self hosted control + // plane components and remove the static manifests: + fmt.Println("[init] Creating self-hosted control plane...") + if err := kubemaster.CreateSelfHostedControlPlane(i.cfg, client); err != nil { return err } + } + + // PHASE 4: Set up various things in the API + // Create the necessary ServiceAccounts + err = apiconfigphase.CreateServiceAccounts(client) + if err != nil { + return err + } - // TODO: remove this when https://github.com/kubernetes/kubeadm/issues/114 is fixed - err = apiconfig.CreateKubeProxyClusterRoleBinding(client) + if i.cfg.AuthorizationMode == kubeadmconstants.AuthzModeRBAC { + err = apiconfigphase.CreateRBACRules(client) if err != nil { return err } } - if err := kubemaster.UpdateMasterRoleLabelsAndTaints(client, false); err != nil { - return err - } - if i.cfg.Discovery.Token != nil { fmt.Printf("[token-discovery] Using token: %s\n", kubeadmutil.BearerToken(i.cfg.Discovery.Token)) if err := kubemaster.CreateDiscoveryDeploymentAndSecret(i.cfg, client); err != nil { @@ -252,16 +258,7 @@ func (i *Init) Run(out io.Writer) error { } } - // Is deployment type self-hosted? - if i.selfHosted { - // Temporary control plane is up, now we create our self hosted control - // plane components and remove the static manifests: - fmt.Println("[init] Creating self-hosted control plane...") - if err := kubemaster.CreateSelfHostedControlPlane(i.cfg, client); err != nil { - return err - } - } - + // PHASE 5: Deploy essential addons if err := kubemaster.CreateEssentialAddons(i.cfg, client); err != nil { return err } diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index 386b1653f132..aad72ee82743 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -39,4 +39,8 @@ const ( // Important: a "v"-prefix shouldn't exist here; semver doesn't allow that MinimumControlPlaneVersion = "1.6.0-alpha.1" + + // Constants for what we name our ServiceAccounts with limited access to the cluster in case of RBAC + KubeDNSServiceAccountName = "kube-dns" + KubeProxyServiceAccountName = "kube-proxy" ) diff --git a/cmd/kubeadm/app/master/BUILD b/cmd/kubeadm/app/master/BUILD index a0e7cbd4b5e2..e010f9da6e84 100644 --- a/cmd/kubeadm/app/master/BUILD +++ b/cmd/kubeadm/app/master/BUILD @@ -38,7 +38,6 @@ go_library( "//vendor:k8s.io/apimachinery/pkg/util/uuid", "//vendor:k8s.io/apimachinery/pkg/util/wait", "//vendor:k8s.io/client-go/tools/clientcmd", - "//vendor:k8s.io/client-go/tools/clientcmd/api", "//vendor:k8s.io/client-go/util/cert", ], ) diff --git a/cmd/kubeadm/app/master/addons.go b/cmd/kubeadm/app/master/addons.go index ab9b03dfd209..ecea19091866 100644 --- a/cmd/kubeadm/app/master/addons.go +++ b/cmd/kubeadm/app/master/addons.go @@ -310,11 +310,7 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse kubeDNSDeployment := NewDeployment(KubeDNS, 1, createKubeDNSPodSpec(cfg)) SetMasterTaintTolerations(&kubeDNSDeployment.Spec.Template.ObjectMeta) - kubeDNSServiceAccount := &v1.ServiceAccount{} - kubeDNSServiceAccount.ObjectMeta.Name = KubeDNS - if _, err := client.ServiceAccounts(metav1.NamespaceSystem).Create(kubeDNSServiceAccount); err != nil { - return fmt.Errorf("failed creating kube-dns service account [%v]", err) - } + if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(kubeDNSDeployment); err != nil { return fmt.Errorf("failed creating essential kube-dns addon [%v]", err) } diff --git a/cmd/kubeadm/app/master/apiclient.go b/cmd/kubeadm/app/master/apiclient.go index d00d76c89c2e..0b960d88fe1e 100644 --- a/cmd/kubeadm/app/master/apiclient.go +++ b/cmd/kubeadm/app/master/apiclient.go @@ -25,7 +25,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" "k8s.io/kubernetes/cmd/kubeadm/app/images" "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" @@ -34,8 +33,11 @@ import ( const apiCallRetryInterval = 500 * time.Millisecond -// TODO: This method shouldn't exist as a standalone function but be integrated into CreateClientFromFile -func createAPIClient(adminKubeconfig *clientcmdapi.Config) (*clientset.Clientset, error) { +func CreateClientFromFile(path string) (*clientset.Clientset, error) { + adminKubeconfig, err := clientcmd.LoadFromFile(path) + if err != nil { + return nil, fmt.Errorf("failed to load admin kubeconfig [%v]", err) + } adminClientConfig, err := clientcmd.NewDefaultClientConfig( *adminKubeconfig, &clientcmd.ConfigOverrides{}, @@ -51,14 +53,6 @@ func createAPIClient(adminKubeconfig *clientcmdapi.Config) (*clientset.Clientset return client, nil } -func CreateClientFromFile(path string) (*clientset.Clientset, error) { - adminKubeconfig, err := clientcmd.LoadFromFile(path) - if err != nil { - return nil, fmt.Errorf("failed to load admin kubeconfig [%v]", err) - } - return createAPIClient(adminKubeconfig) -} - func CreateClientAndWaitForAPI(file string) (*clientset.Clientset, error) { client, err := CreateClientFromFile(file) if err != nil { @@ -171,55 +165,6 @@ func NewDeployment(deploymentName string, replicas int32, podSpec v1.PodSpec) *e } } -// It's safe to do this for alpha, as we don't have HA and there is no way we can get -// more then one node here (TODO(phase1+) use os.Hostname) -func findMyself(client *clientset.Clientset) (*v1.Node, error) { - nodeList, err := client.Nodes().List(metav1.ListOptions{}) - if err != nil { - return nil, fmt.Errorf("unable to list nodes [%v]", err) - } - if len(nodeList.Items) < 1 { - return nil, fmt.Errorf("no nodes found") - } - node := &nodeList.Items[0] - return node, nil -} - -func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, schedulable bool) error { - n, err := findMyself(client) - if err != nil { - return err - } - - n.ObjectMeta.Labels[metav1.NodeLabelKubeadmAlphaRole] = metav1.NodeLabelRoleMaster - - if !schedulable { - taintsAnnotation, _ := json.Marshal([]v1.Taint{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}}) - n.ObjectMeta.Annotations[v1.TaintsAnnotationKey] = string(taintsAnnotation) - } - - if _, err := client.Nodes().Update(n); err != nil { - if apierrs.IsConflict(err) { - fmt.Println("[apiclient] Temporarily unable to update master node metadata due to conflict (will retry)") - time.Sleep(apiCallRetryInterval) - attemptToUpdateMasterRoleLabelsAndTaints(client, schedulable) - } else { - return err - } - } - - return nil -} - -func UpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, schedulable bool) error { - // TODO(phase1+) use iterate instead of recursion - err := attemptToUpdateMasterRoleLabelsAndTaints(client, schedulable) - if err != nil { - return fmt.Errorf("failed to update master node - [%v]", err) - } - return nil -} - func SetMasterTaintTolerations(meta *metav1.ObjectMeta) { tolerationsAnnotation, _ := json.Marshal([]v1.Toleration{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}}) if meta.Annotations == nil { diff --git a/cmd/kubeadm/app/master/apiclient_test.go b/cmd/kubeadm/app/master/apiclient_test.go index 4476ad8ad15e..9936e965062b 100644 --- a/cmd/kubeadm/app/master/apiclient_test.go +++ b/cmd/kubeadm/app/master/apiclient_test.go @@ -17,22 +17,11 @@ limitations under the License. package master import ( - "fmt" "testing" - "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm" - apiv1 "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/api/v1" ) -func TestCreateClientAndWaitForAPI(t *testing.T) { - cfg := &kubeadmapi.MasterConfiguration{ - Networking: kubeadm.Networking{DNSDomain: "localhost"}, - } - fmt.Println(cfg) - -} - func TestStandardLabels(t *testing.T) { var tests = []struct { n string @@ -90,7 +79,7 @@ func TestNewDaemonSet(t *testing.T) { } for _, rt := range tests { - p := apiv1.PodSpec{} + p := v1.PodSpec{} actual := NewDaemonSet(rt.dn, p) if actual.Spec.Selector.MatchLabels["k8s-app"] != rt.expected { t.Errorf( @@ -132,7 +121,7 @@ func TestNewService(t *testing.T) { } for _, rt := range tests { - p := apiv1.ServiceSpec{} + p := v1.ServiceSpec{} actual := NewService(rt.dn, p) if actual.ObjectMeta.Labels["k8s-app"] != rt.expected { t.Errorf( @@ -174,7 +163,7 @@ func TestNewDeployment(t *testing.T) { } for _, rt := range tests { - p := apiv1.PodSpec{} + p := v1.PodSpec{} actual := NewDeployment(rt.dn, 1, p) if actual.Spec.Selector.MatchLabels["k8s-app"] != rt.expected { t.Errorf( diff --git a/cmd/kubeadm/app/phases/apiconfig/BUILD b/cmd/kubeadm/app/phases/apiconfig/BUILD index 9978455bc0cc..bd7944562a3a 100644 --- a/cmd/kubeadm/app/phases/apiconfig/BUILD +++ b/cmd/kubeadm/app/phases/apiconfig/BUILD @@ -9,12 +9,18 @@ load( go_library( name = "go_default_library", - srcs = ["clusterroles.go"], + srcs = [ + "clusterroles.go", + "setupmaster.go", + ], tags = ["automanaged"], deps = [ + "//cmd/kubeadm/app/constants:go_default_library", "//cmd/kubeadm/app/master:go_default_library", + "//pkg/api/v1:go_default_library", "//pkg/apis/rbac/v1beta1:go_default_library", "//pkg/client/clientset_generated/clientset:go_default_library", + "//vendor:k8s.io/apimachinery/pkg/api/errors", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", ], ) diff --git a/cmd/kubeadm/app/phases/apiconfig/clusterroles.go b/cmd/kubeadm/app/phases/apiconfig/clusterroles.go index 446eaab1c2e5..b6981e048db3 100644 --- a/cmd/kubeadm/app/phases/apiconfig/clusterroles.go +++ b/cmd/kubeadm/app/phases/apiconfig/clusterroles.go @@ -20,38 +20,76 @@ import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants" "k8s.io/kubernetes/cmd/kubeadm/app/master" + "k8s.io/kubernetes/pkg/api/v1" rbac "k8s.io/kubernetes/pkg/apis/rbac/v1beta1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" ) -// CreateBootstrapRBACClusterRole grants the system:node-bootstrapper role to the group we created the bootstrap credential with -func CreateBootstrapRBACClusterRole(clientset *clientset.Clientset) error { - clusterRoleBinding := rbac.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "kubeadm:kubelet-bootstrap", - }, - RoleRef: rbac.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: "system:node-bootstrapper", - }, - Subjects: []rbac.Subject{ - {Kind: "Group", Name: master.KubeletBootstrapGroup}, - }, +const ( + // TODO: This role should eventually be a system:-prefixed, automatically bootstrapped ClusterRole + + // KubeDNSClusterRoleName sets the name for the kube-dns ClusterRole + KubeDNSClusterRoleName = "kubeadm:kube-dns" + // KubeProxyClusterRoleName sets the name for the kube-proxy ClusterRole + KubeProxyClusterRoleName = "system:node-proxier" + // NodeBootstrapperClusterRoleName sets the name for the TLS Node Bootstrapper ClusterRole + NodeBootstrapperClusterRoleName = "system:node-bootstrapper" + + // Constants + clusterRoleKind = "ClusterRole" + serviceAccountKind = "ServiceAccount" + rbacAPIGroup = "rbac.authorization.k8s.io" +) + +// TODO: Are there any unit tests that could be made for this file other than duplicating all values and logic in a separate file? + +// CreateRBACRules creates the essential RBAC rules for a minimally set-up cluster +func CreateRBACRules(clientset *clientset.Clientset) error { + // Create the ClusterRoles we need for our RBAC rules + if err := CreateClusterRoles(clientset); err != nil { + return err } - if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil { + // Create the CreateClusterRoleBindings we need for our RBAC rules + if err := CreateClusterRoleBindings(clientset); err != nil { return err } - fmt.Println("[apiconfig] Created node bootstrapper RBAC rules") + fmt.Println("[apiconfig] Created RBAC rules") return nil } -// CreateKubeDNSRBACClusterRole creates the necessary ClusterRole for kube-dns -func CreateKubeDNSRBACClusterRole(clientset *clientset.Clientset) error { +// CreateServiceAccounts creates the necessary serviceaccounts that kubeadm uses/might use. +func CreateServiceAccounts(clientset *clientset.Clientset) error { + serviceAccounts := []v1.ServiceAccount{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmconstants.KubeDNSServiceAccountName, + Namespace: metav1.NamespaceSystem, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: kubeadmconstants.KubeProxyServiceAccountName, + Namespace: metav1.NamespaceSystem, + }, + }, + } + + for _, sa := range serviceAccounts { + if _, err := clientset.CoreV1().ServiceAccounts(metav1.NamespaceSystem).Create(&sa); err != nil { + return err + } + } + return nil +} + +// CreateClusterRoles creates the ClusterRoles that aren't bootstrapped by the apiserver +func CreateClusterRoles(clientset *clientset.Clientset) error { + // TODO: Remove this ClusterRole when it's automatically bootstrapped in the apiserver clusterRole := rbac.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{Name: "kubeadm:" + master.KubeDNS}, + ObjectMeta: metav1.ObjectMeta{Name: KubeDNSClusterRoleName}, Rules: []rbac.PolicyRule{ rbac.NewRule("list", "watch").Groups("").Resources("endpoints", "services").RuleOrDie(), // TODO: remove watch rule when https://github.com/kubernetes/kubernetes/pull/38816 gets merged @@ -61,53 +99,68 @@ func CreateKubeDNSRBACClusterRole(clientset *clientset.Clientset) error { if _, err := clientset.Rbac().ClusterRoles().Create(&clusterRole); err != nil { return err } - - subject := rbac.Subject{ - Kind: "ServiceAccount", - Name: master.KubeDNS, - Namespace: metav1.NamespaceSystem, - } - - clusterRoleBinding := rbac.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "kubeadm:" + master.KubeDNS, - }, - RoleRef: rbac.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: clusterRole.Name, - }, - Subjects: []rbac.Subject{subject}, - } - if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil { - return err - } - fmt.Println("[apiconfig] Created kube-dns RBAC rules") - return nil } -// CreateKubeProxyClusterRoleBinding grants the system:node-proxier role to the nodes group, -// since kubelet credentials are used to run the kube-proxy -// TODO: give the kube-proxy its own credential and stop requiring this -func CreateKubeProxyClusterRoleBinding(clientset *clientset.Clientset) error { - clusterRoleBinding := rbac.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "kubeadm:node-proxier", +// CreateClusterRoleBindings creates all necessary bindings between bootstrapped & kubeadm-created ClusterRoles and subjects kubeadm is using +func CreateClusterRoleBindings(clientset *clientset.Clientset) error { + clusterRoleBindings := []rbac.ClusterRoleBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadm:kubelet-bootstrap", + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbacAPIGroup, + Kind: clusterRoleKind, + Name: NodeBootstrapperClusterRoleName, + }, + Subjects: []rbac.Subject{ + { + Kind: "Group", + Name: master.KubeletBootstrapGroup, + }, + }, }, - RoleRef: rbac.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: "system:node-proxier", + { + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadm:kube-dns", + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbacAPIGroup, + Kind: clusterRoleKind, + Name: KubeDNSClusterRoleName, + }, + Subjects: []rbac.Subject{ + { + Kind: serviceAccountKind, + Name: kubeadmconstants.KubeDNSServiceAccountName, + Namespace: metav1.NamespaceSystem, + }, + }, }, - Subjects: []rbac.Subject{ - {Kind: "Group", Name: "system:nodes"}, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "kubeadm:node-proxier", + }, + RoleRef: rbac.RoleRef{ + APIGroup: rbacAPIGroup, + Kind: clusterRoleKind, + Name: KubeProxyClusterRoleName, + }, + Subjects: []rbac.Subject{ + { + Kind: serviceAccountKind, + Name: kubeadmconstants.KubeProxyServiceAccountName, + Namespace: metav1.NamespaceSystem, + }, + }, }, } - if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil { - return err - } - fmt.Println("[apiconfig] Created kube-proxy RBAC rules") + for _, clusterRoleBinding := range clusterRoleBindings { + if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil { + return err + } + } return nil } diff --git a/cmd/kubeadm/app/phases/apiconfig/setupmaster.go b/cmd/kubeadm/app/phases/apiconfig/setupmaster.go new file mode 100644 index 000000000000..6e39591a8979 --- /dev/null +++ b/cmd/kubeadm/app/phases/apiconfig/setupmaster.go @@ -0,0 +1,83 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiconfig + +import ( + "encoding/json" + "fmt" + "time" + + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" +) + +const apiCallRetryInterval = 500 * time.Millisecond + +// TODO: Can we think of any unit tests here? Or should this code just be covered through integration/e2e tests? + +// It's safe to do this for alpha, as we don't have HA and there is no way we can get +// more then one node here (TODO(phase1+) use os.Hostname) +func findMyself(client *clientset.Clientset) (*v1.Node, error) { + nodeList, err := client.Nodes().List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("unable to list nodes [%v]", err) + } + if len(nodeList.Items) < 1 { + return nil, fmt.Errorf("no nodes found") + } + node := &nodeList.Items[0] + return node, nil +} + +func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error { + n, err := findMyself(client) + if err != nil { + return err + } + + // TODO: Switch to the new master label defined in https://github.com/kubernetes/kubernetes/pull/39112 + n.ObjectMeta.Labels[metav1.NodeLabelKubeadmAlphaRole] = metav1.NodeLabelRoleMaster + + // TODO: Use the Taints beta field on the NodeSpec now + taintsAnnotation, _ := json.Marshal([]v1.Taint{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}}) + n.ObjectMeta.Annotations[v1.TaintsAnnotationKey] = string(taintsAnnotation) + + // TODO: Use a patch instead of an Update + if _, err := client.Nodes().Update(n); err != nil { + if apierrs.IsConflict(err) { + fmt.Println("[apiclient] Temporarily unable to update master node metadata due to conflict (will retry)") + time.Sleep(apiCallRetryInterval) + attemptToUpdateMasterRoleLabelsAndTaints(client) + } else { + return err + } + } + + return nil +} + +// UpdateMasterRoleLabelsAndTaints taints the master and sets the master label +func UpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error { + // TODO: Use iterate instead of recursion + err := attemptToUpdateMasterRoleLabelsAndTaints(client) + if err != nil { + return fmt.Errorf("failed to update master node - [%v]", err) + } + return nil +}