From 60b77a72eb8a14e0e534f3a6121f6b95e5a0f173 Mon Sep 17 00:00:00 2001 From: Ling Samuel Date: Thu, 10 Dec 2020 13:53:13 +0800 Subject: [PATCH] Support multi control plane --- cmd/minikube/cmd/node_add.go | 14 +++- cmd/minikube/cmd/node_start.go | 4 +- cmd/minikube/cmd/node_stop.go | 2 +- cmd/minikube/cmd/start.go | 18 +++-- cmd/minikube/cmd/start_flags.go | 16 +++++ cmd/minikube/cmd/status.go | 31 +++++--- cmd/minikube/cmd/stop.go | 17 +++++ pkg/minikube/bootstrapper/bootstrapper.go | 2 +- .../bootstrapper/bsutil/extraconfig.go | 2 +- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 2 +- pkg/minikube/bootstrapper/certs.go | 10 ++- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 71 ++++++++++++++++--- pkg/minikube/cni/kindnet.go | 2 +- pkg/minikube/config/profile.go | 8 ++- pkg/minikube/config/types.go | 1 + pkg/minikube/constants/constants.go | 4 +- pkg/minikube/driver/driver.go | 2 +- pkg/minikube/node/node.go | 28 +++++++- pkg/minikube/node/start.go | 28 +++++--- pkg/minikube/reason/reason.go | 1 + 20 files changed, 211 insertions(+), 52 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 5121fb1affe2..a207cc39fb3a 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -20,11 +20,13 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/out/register" "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/style" ) @@ -48,16 +50,25 @@ var nodeAddCmd = &cobra.Command{ name := node.Name(len(cc.Nodes) + 1) - out.Step(style.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + if cp { + out.Step(style.Happy, "Adding control plane node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + } else { + out.Step(style.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + } // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. n := config.Node{ Name: name, Worker: worker, ControlPlane: cp, + ApiEndpointServer: false, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } + if n.ControlPlane { + n.Port = constants.APIServerPort + } + // Make sure to decrease the default amount of memory we use per VM if this is the first worker node if len(cc.Nodes) == 1 { warnAboutMultiNode() @@ -66,6 +77,7 @@ var nodeAddCmd = &cobra.Command{ } } + register.Reg.SetStep(register.InitialSetup) if err := node.Add(cc, n, false); err != nil { _, err := maybeDeleteAndRetry(cmd, *cc, n, nil, err) if err != nil { diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index 7470d62431a9..aef47e0d6b6a 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -56,7 +56,7 @@ var nodeStartCmd = &cobra.Command{ } register.Reg.SetStep(register.InitialSetup) - r, p, m, h, err := node.Provision(cc, n, n.ControlPlane, viper.GetBool(deleteOnFailure)) + r, p, m, h, err := node.Provision(cc, n, viper.GetBool(deleteOnFailure)) if err != nil { exit.Error(reason.GuestNodeProvision, "provisioning host for node", err) } @@ -71,7 +71,7 @@ var nodeStartCmd = &cobra.Command{ ExistingAddons: nil, } - _, err = node.Start(s, n.ControlPlane) + _, err = node.Start(s) if err != nil { _, err := maybeDeleteAndRetry(cmd, *cc, *n, nil, err) if err != nil { diff --git a/cmd/minikube/cmd/node_stop.go b/cmd/minikube/cmd/node_stop.go index db718f311e01..28dae174c0d2 100644 --- a/cmd/minikube/cmd/node_stop.go +++ b/cmd/minikube/cmd/node_stop.go @@ -46,7 +46,7 @@ var nodeStopCmd = &cobra.Command{ } machineName := driver.MachineName(*cc, *n) - + node.MustReset(*cc, *n, api, machineName) err = machine.StopHost(api, machineName) if err != nil { out.FatalT("Failed to stop node {{.name}}", out.V{"name": name}) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 5f3062b7704d..73ca9830bb1a 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -326,7 +326,7 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * ssh.SetDefaultClient(ssh.External) } - mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true, viper.GetBool(deleteOnFailure)) + mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, viper.GetBool(deleteOnFailure)) if err != nil { return node.Starter{}, err } @@ -343,7 +343,13 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * } func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) { - kubeconfig, err := node.Start(starter, true) + // TODO: Currently, we start the primary control plane first. If there are multiple control planes, + // the kube-apiserver will keep crash to wait for other apiserver to respond, which blocks health checks. + // As a temporary solution, we reset the stacked control planes before we stopped it. + // To fix this, we could: + // - Delay the health check. + // - Start all control planes at the same time. + kubeconfig, err := node.Start(starter) if err != nil { kubeconfig, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err) if err != nil { @@ -374,6 +380,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. Name: nodeName, Worker: true, ControlPlane: false, + ApiEndpointServer: false, KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, } out.Ln("") // extra newline for clarity on the command line @@ -384,7 +391,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. } } else { for _, n := range existing.Nodes { - if !n.ControlPlane { + if !n.ApiEndpointServer { // TODO Make this backward compatibility err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure)) if err != nil { return nil, errors.Wrap(err, "adding node") @@ -492,7 +499,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co cc := updateExistingConfigFromFlags(cmd, &existing) var kubeconfig *kubeconfig.Settings for _, n := range cc.Nodes { - r, p, m, h, err := node.Provision(&cc, &n, n.ControlPlane, false) + r, p, m, h, err := node.Provision(&cc, &n, false) s := node.Starter{ Runner: r, PreExists: p, @@ -507,7 +514,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co return nil, err } - k, err := node.Start(s, n.ControlPlane) + k, err := node.Start(s) if n.ControlPlane { kubeconfig = k } @@ -1142,6 +1149,7 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C KubernetesVersion: getKubernetesVersion(&cc), Name: kubeNodeName, ControlPlane: true, + ApiEndpointServer: true, Worker: true, } cc.Nodes = []config.Node{cp} diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 52d557789d6b..3455ff9621b7 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -404,6 +404,22 @@ func upgradeExistingConfig(cc *config.ClusterConfig) { cc.KicBaseImage = viper.GetString(kicBaseImage) klog.Infof("config upgrade: KicBaseImage=%s", cc.KicBaseImage) } + + needTagApiEndpointServer := true + for i := range cc.Nodes { + if cc.Nodes[i].ApiEndpointServer { + needTagApiEndpointServer = false + break + } + } + if needTagApiEndpointServer { + for i := range cc.Nodes { + if cc.Nodes[i].ControlPlane { + cc.Nodes[i].ApiEndpointServer = true + break + } + } + } } // updateExistingConfigFromFlags will update the existing config from the flags - used on a second start diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index bfcde0880bb4..b97d41cdeb7e 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -129,13 +129,18 @@ var ( // Status holds string representations of component states type Status struct { - Name string - Host string - Kubelet string + Name string + Host string + Kubelet string + + // APIServer indicates kube-apiserver status APIServer string Kubeconfig string Worker bool TimeToStop string + // IsAPIEndpoint indicates primary control plane (api endpoint) + IsAPIEndpoint bool + IP string } // ClusterState holds a cluster state representation @@ -177,18 +182,20 @@ const ( clusterNotRunningStatusFlag = 1 << 1 k8sNotRunningStatusFlag = 1 << 2 defaultStatusFormat = `{{.Name}} -type: Control Plane +type: Control Plane{{if .IsAPIEndpoint}} (Primary){{end}} host: {{.Host}} kubelet: {{.Kubelet}} apiserver: {{.APIServer}} kubeconfig: {{.Kubeconfig}} timeToStop: {{.TimeToStop}} +IP: {{.IP}} ` workerStatusFormat = `{{.Name}} type: Worker host: {{.Host}} kubelet: {{.Kubelet}} +IP: {{.IP}} ` ) @@ -304,13 +311,15 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St name := driver.MachineName(cc, n) st := &Status{ - Name: name, - Host: Nonexistent, - APIServer: Nonexistent, - Kubelet: Nonexistent, - Kubeconfig: Nonexistent, - Worker: !controlPlane, - TimeToStop: Nonexistent, + Name: name, + Host: Nonexistent, + APIServer: Nonexistent, + Kubelet: Nonexistent, + Kubeconfig: Nonexistent, + Worker: !controlPlane, + TimeToStop: Nonexistent, + IsAPIEndpoint: n.ApiEndpointServer, + IP: n.IP, } hs, err := machine.Status(api, name) diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index 4fd0fadabc49..4cd0b6a0328c 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -34,6 +34,7 @@ import ( "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/mustload" + "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" "k8s.io/minikube/pkg/minikube/reason" @@ -137,14 +138,30 @@ func stopProfile(profile string) int { api, cc := mustload.Partial(profile) defer api.Close() + primaryMachineName := "" for _, n := range cc.Nodes { machineName := driver.MachineName(*cc, n) + if n.ApiEndpointServer { + // Skip because we need to update etcd members + primaryMachineName = machineName + continue + } else if n.ControlPlane { + // Remove from primary control plane + node.MustReset(*cc, n, api, machineName) + } + nonexistent := stop(api, machineName) if !nonexistent { stoppedNodes++ } } + if primaryMachineName != "" { + nonexistent := stop(api, primaryMachineName) + if !nonexistent { + stoppedNodes++ + } + } if err := killMountProcess(); err != nil { out.WarningT("Unable to kill mount process: {{.error}}", out.V{"error": err}) diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 4d18749ccd23..bc65f243433d 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -41,7 +41,7 @@ type Bootstrapper interface { WaitForNode(config.ClusterConfig, config.Node, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error - GenerateToken(config.ClusterConfig) (string, error) + GenerateToken(config.ClusterConfig, bool) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(config.ClusterConfig, LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/bootstrapper/bsutil/extraconfig.go b/pkg/minikube/bootstrapper/bsutil/extraconfig.go index 7e8c04944afe..513621d87561 100644 --- a/pkg/minikube/bootstrapper/bsutil/extraconfig.go +++ b/pkg/minikube/bootstrapper/bsutil/extraconfig.go @@ -70,7 +70,7 @@ var KubeadmExtraArgsAllowed = map[int][]string{ "kubeconfig-dir", "node-name", "cri-socket", - "experimental-upload-certs", + "upload-certs", "certificate-key", "rootfs", "skip-phases", diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 48e4968e6da8..fd8befd75033 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -128,7 +128,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana CgroupDriver: cgroupDriver, ClientCAFile: path.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), StaticPodPath: vmpath.GuestManifestsDir, - ControlPlaneAddress: constants.ControlPlaneAlias, + ControlPlaneAddress: constants.ApiEndpointAlias, KubeProxyOptions: createKubeProxyOptions(k8s.ExtraOptions), } diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 90fd9870faec..d76bcf1ef81e 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -93,9 +93,13 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) copyableFiles = append(copyableFiles, certFile) } + endpoint := net.JoinHostPort(constants.ApiEndpointAlias, fmt.Sprint(k8s.NodePort)) + if n.ApiEndpointServer { + endpoint = net.JoinHostPort("localhost", fmt.Sprint(n.Port)) + } kcs := &kubeconfig.Settings{ ClusterName: n.Name, - ClusterServerAddress: fmt.Sprintf("https://%s", net.JoinHostPort("localhost", fmt.Sprint(n.Port))), + ClusterServerAddress: fmt.Sprintf("https://%s", endpoint), ClientCertificate: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.crt"), ClientKey: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.key"), CertificateAuthority: path.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), @@ -183,7 +187,7 @@ func generateSharedCACerts() (CACerts, error) { func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACerts) ([]string, error) { // Only generate these certs for the api server - if !n.ControlPlane { + if !n.ApiEndpointServer { return []string{}, nil } @@ -201,7 +205,7 @@ func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACert apiServerIPs = append(apiServerIPs, net.ParseIP(v)) } - apiServerNames := append(k8s.APIServerNames, k8s.APIServerName, constants.ControlPlaneAlias) + apiServerNames := append(k8s.APIServerNames, k8s.APIServerName, constants.ApiEndpointAlias) apiServerAlternateNames := append( apiServerNames, util.GetAlternateDNS(k8s.DNSDomain)...) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index a85c04402431..105f8995d7fa 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -70,6 +70,8 @@ type Bootstrapper struct { contextName string } +var _ bootstrapper.Bootstrapper = (*Bootstrapper)(nil) + // NewBootstrapper creates a new kubeadm.Bootstrapper func NewBootstrapper(api libmachine.API, cc config.ClusterConfig, r command.Runner) (*Bootstrapper, error) { return &Bootstrapper{c: r, contextName: cc.Name, k8sClient: nil}, nil @@ -154,7 +156,7 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error { return err } - endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(constants.ControlPlaneAlias, strconv.Itoa(cp.Port))) + endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(constants.ApiEndpointAlias, strconv.Itoa(cp.Port))) for _, path := range paths { _, err := k.c.RunCmd(exec.Command("sudo", "grep", endpoint, path)) if err != nil { @@ -400,9 +402,15 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time klog.Warningf("Couldn't ensure kubelet is started this might cause issues: %v", err) } // TODO: #7706: for better performance we could use k.client inside minikube to avoid asking for external IP:PORT - cp, err := config.PrimaryControlPlane(&cfg) - if err != nil { - return errors.Wrap(err, "get primary control plane") + var cp config.Node + var err error + if n.ControlPlane { + cp = n + } else { + cp, err = config.PrimaryControlPlane(&cfg) + if err != nil { + return errors.Wrap(err, "get primary control plane") + } } hostname, _, port, err := driver.ControlPlaneEndpoint(&cfg, &cp, cfg.Driver) if err != nil { @@ -665,6 +673,11 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC // Join the master by specifying its token joinCmd = fmt.Sprintf("%s --node-name=%s", joinCmd, driver.MachineName(cc, n)) + if n.ControlPlane { + // Specify advertise address here because we are using interface eth1 and port 8443 (by default) + // We can't use `--config bsutil.KubeadmYamlPath` here because cannot mix '--config' with [certificate-key control-plane discovery-token-ca-cert-hash token] + joinCmd = fmt.Sprintf("%s --control-plane --apiserver-advertise-address %s --apiserver-bind-port %v", joinCmd, n.IP, n.Port) + } join := func() error { // reset first to clear any possibly existing state @@ -695,17 +708,59 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC } // GenerateToken creates a token and returns the appropriate kubeadm join command to run, or the already existing token -func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { +func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig, genCertKey bool) (string, error) { + ka := bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion) + // Take that generated token and use it to get a kubeadm join command - tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) + tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", ka)) r, err := k.c.RunCmd(tokenCmd) if err != nil { return "", errors.Wrap(err, "generating join command") } joinCmd := r.Stdout.String() - joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) + joinCmd = strings.Replace(joinCmd, "kubeadm", ka, 1) joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) + if genCertKey { + // Generate config first because init phase upload-certs cannot specify --cert-dir + confPath := path.Join(vmpath.GuestPersistentDir, "kubeadm-conf.yaml") + // TODO kubeadm config view is deprecated + conf, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s config view", ka))) + if err != nil { + return "", errors.Wrap(err, "generate kubeadm-conf") + } + + confAsset := assets.NewMemoryAssetTarget(conf.Stdout.Bytes(), confPath, "0644") + err = bsutil.CopyFiles(k.c, []assets.CopyableFile{ + confAsset, + }) + if err != nil { + return "", errors.Wrap(err, "write kubeadm-conf") + } + + certCmd := fmt.Sprintf("%s init phase upload-certs --upload-certs --config %s", ka, confPath) + out.Step(style.Tip, certCmd) + certKeyCmd := exec.Command("/bin/bash", "-c", certCmd) + certKeyResult, err := k.c.RunCmd(certKeyCmd) + if err != nil { + return "", errors.Wrap(err, "generating join command") + } + out.Step(style.Tip, certKeyResult.Stdout.String()) + // Currently we have to parse stdout manually to get certificate key + outputs := strings.Split(certKeyResult.Stdout.String(), "\n") + + certKey := "" + for i, s := range outputs { + if strings.Contains(s, "Using certificate key") { + certKey = outputs[i+1] + break + } + } + if certKey != "" { + return "", errors.Wrap(err, "parse certificate-key") + } + joinCmd = fmt.Sprintf("%s --certificate-key %s", joinCmd, certKey) + } if cc.KubernetesConfig.CRISocket != "" { joinCmd = fmt.Sprintf("%s --cri-socket %s", joinCmd, cc.KubernetesConfig.CRISocket) } @@ -853,7 +908,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru return errors.Wrap(err, "control plane") } - if err := machine.AddHostAlias(k.c, constants.ControlPlaneAlias, net.ParseIP(cp.IP)); err != nil { + if err := machine.AddHostAlias(k.c, constants.ApiEndpointAlias, net.ParseIP(cp.IP)); err != nil { return errors.Wrap(err, "host alias") } diff --git a/pkg/minikube/cni/kindnet.go b/pkg/minikube/cni/kindnet.go index cf7a18301900..1ba80cc46752 100644 --- a/pkg/minikube/cni/kindnet.go +++ b/pkg/minikube/cni/kindnet.go @@ -149,7 +149,7 @@ type KindNet struct { // String returns a string representation of this CNI func (c KindNet) String() string { - return "CNI" + return "KindNet" } // manifest returns a Kubernetes manifest for a CNI diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index 5bb5fc2f9d7d..ab25d1991671 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -52,7 +52,12 @@ func (p *Profile) IsValid() bool { // PrimaryControlPlane gets the node specific config for the first created control plane func PrimaryControlPlane(cc *ClusterConfig) (Node, error) { for _, n := range cc.Nodes { - if n.ControlPlane { + if n.ApiEndpointServer { + return n, nil + } + } + for _, n := range cc.Nodes { + if n.ControlPlane { // keep n.ControlPlane for backward compatibility return n, nil } } @@ -64,6 +69,7 @@ func PrimaryControlPlane(cc *ClusterConfig) (Node, error) { Port: cc.KubernetesConfig.NodePort, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, ControlPlane: true, + ApiEndpointServer: true, Worker: true, } diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index d95e9f7c85f4..18ada36dfd23 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -112,6 +112,7 @@ type Node struct { IP string Port int KubernetesVersion string + ApiEndpointServer bool ControlPlane bool Worker bool } diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index c14a97443f17..0207473fda41 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -54,8 +54,8 @@ const ( DefaultServiceCIDR = "10.96.0.0/12" // HostAlias is a DNS alias to the the container/VM host IP HostAlias = "host.minikube.internal" - // ControlPlaneAlias is a DNS alias pointing to the apiserver frontend - ControlPlaneAlias = "control-plane.minikube.internal" + // ApiEndpointAlias is a DNS alias pointing to the apiserver frontend + ApiEndpointAlias = "control-plane.minikube.internal" // DockerHostEnv is used for docker daemon settings DockerHostEnv = "DOCKER_HOST" diff --git a/pkg/minikube/driver/driver.go b/pkg/minikube/driver/driver.go index b890ce0e4500..f970dbc1a354 100644 --- a/pkg/minikube/driver/driver.go +++ b/pkg/minikube/driver/driver.go @@ -308,7 +308,7 @@ func SetLibvirtURI(v string) { // MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names func MachineName(cc config.ClusterConfig, n config.Node) string { // For single node cluster, default to back to old naming - if len(cc.Nodes) == 1 || n.ControlPlane { + if len(cc.Nodes) == 1 || n.ApiEndpointServer || (n.ControlPlane && n.Name == "") { return cc.Name } return fmt.Sprintf("%s-%s", cc.Name, n.Name) diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 042ccf5d5f54..2ba589e191e6 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -20,14 +20,17 @@ import ( "fmt" "os/exec" + "github.com/docker/machine/libmachine" "github.com/pkg/errors" "github.com/spf13/viper" - "k8s.io/klog/v2" "k8s.io/minikube/pkg/kapi" + "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/reason" ) // TODO: Share these between cluster and node packages @@ -42,7 +45,7 @@ func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error { return errors.Wrap(err, "save node") } - r, p, m, h, err := Provision(cc, &n, false, delOnFail) + r, p, m, h, err := Provision(cc, &n, delOnFail) if err != nil { return err } @@ -56,7 +59,7 @@ func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error { ExistingAddons: nil, } - _, err = Start(s, false) + _, err = Start(s) return err } @@ -151,3 +154,22 @@ func Save(cfg *config.ClusterConfig, node *config.Node) error { func Name(index int) string { return fmt.Sprintf("m%02d", index) } + +// MustReset reset a stacked control plane to avoid blocking the start of primary control plane +// Exit process if failed +func MustReset(cc config.ClusterConfig, n config.Node, api libmachine.API, machineName string) { + if n.ControlPlane && !n.ApiEndpointServer { + // Remove from primary control plane + host, err := machine.LoadHost(api, machineName) + runner, err := machine.CommandRunner(host) + if err != nil { + exit.Error(reason.InternalCommandRunner, "Failed to get command runner", err) + } + resetCmd := fmt.Sprintf("%s reset -f", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion)) + rc := exec.Command("/bin/bash", "-c", resetCmd) + _, err = runner.RunCmd(rc) + if err != nil { + exit.Error(reason.GuestNodeReset, "Failed to reset kubeadm", err) + } + } +} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index 5995270aab68..d5f615a43215 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -80,7 +80,8 @@ type Starter struct { } // Start spins up a guest and starts the Kubernetes node. -func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { +func Start(starter Starter) (*kubeconfig.Settings, error) { + apiEndpointServer := starter.Node.ApiEndpointServer // wait for preloaded tarball to finish downloading before configuring runtimes waitCacheRequiredImages(&cacheGroup) @@ -103,7 +104,8 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { var bs bootstrapper.Bootstrapper var kcs *kubeconfig.Settings - if apiServer { + if apiEndpointServer { + out.Step(style.Tip, "Preparing control plane node...") // Must be written before bootstrap, otherwise health checks may flake due to stale IP kcs = setupKubeconfig(starter.Host, starter.Cfg, starter.Node, starter.Cfg.Name) if err != nil { @@ -119,11 +121,14 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, err } - // write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper - if err := kubeconfig.Update(kcs); err != nil { - return nil, errors.Wrap(err, "Failed kubeconfig update") + if apiEndpointServer { + // write the kubeconfig to the file system after everything required (like certs) are created by the bootstrapper + if err := kubeconfig.Update(kcs); err != nil { + return nil, errors.Wrap(err, "Failed kubeconfig update") + } } } else { + out.Step(style.Tip, "Preparing worker node...") bs, err = cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, starter.Runner) if err != nil { return nil, errors.Wrap(err, "Failed to get bootstrapper") @@ -163,24 +168,26 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { wg.Done() }() - if apiServer { - // special ops for none , like change minikube directory. + if apiEndpointServer { + // special ops for none, like change minikube directory. // multinode super doesn't work on the none driver if starter.Cfg.Driver == driver.None && len(starter.Cfg.Nodes) == 1 { prepareNone() } } else { // Make sure to use the command runner for the control plane to generate the join token + out.Step(style.Tip, "Preparing kubeadm...") cpBs, cpr, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) if err != nil { return nil, errors.Wrap(err, "getting control plane bootstrapper") } - joinCmd, err := cpBs.GenerateToken(*starter.Cfg) + joinCmd, err := cpBs.GenerateToken(*starter.Cfg, starter.Node.ControlPlane) if err != nil { return nil, errors.Wrap(err, "generating join token") } + out.Step(style.Tip, "Joining cluster...") if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd); err != nil { return nil, errors.Wrap(err, "joining cluster") } @@ -190,6 +197,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "cni") } + out.Step(style.Tip, fmt.Sprintf("Applying CNI %s...", cnm.String())) if err := cnm.Apply(cpr); err != nil { return nil, errors.Wrap(err, "cni apply") } @@ -208,10 +216,10 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { } // Provision provisions the machine/container for the node -func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool, delOnFail bool) (command.Runner, bool, libmachine.API, *host.Host, error) { +func Provision(cc *config.ClusterConfig, n *config.Node, delOnFail bool) (command.Runner, bool, libmachine.API, *host.Host, error) { register.Reg.SetStep(register.StartingNode) name := driver.MachineName(*cc, *n) - if apiServer { + if n.ControlPlane { out.Step(style.ThumbsUp, "Starting control plane node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) } else { out.Step(style.ThumbsUp, "Starting node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) diff --git a/pkg/minikube/reason/reason.go b/pkg/minikube/reason/reason.go index d3602e59c442..993c9362ff1b 100644 --- a/pkg/minikube/reason/reason.go +++ b/pkg/minikube/reason/reason.go @@ -242,6 +242,7 @@ var ( GuestMount = Kind{ID: "GUEST_MOUNT", ExitCode: ExGuestError} GuestMountConflict = Kind{ID: "GUEST_MOUNT_CONFLICT", ExitCode: ExGuestConflict} GuestNodeAdd = Kind{ID: "GUEST_NODE_ADD", ExitCode: ExGuestError} + GuestNodeReset = Kind{ID: "GUEST_NODE_RESET", ExitCode: ExGuestError} GuestNodeDelete = Kind{ID: "GUEST_NODE_DELETE", ExitCode: ExGuestError} GuestNodeProvision = Kind{ID: "GUEST_NODE_PROVISION", ExitCode: ExGuestError} GuestNodeRetrieve = Kind{ID: "GUEST_NODE_RETRIEVE", ExitCode: ExGuestNotFound}