From 51a2576a3a8ba30ee10a478644ac9c74b5a5488a Mon Sep 17 00:00:00 2001 From: Ling Samuel Date: Mon, 11 Jan 2021 15:51:06 +0800 Subject: [PATCH 01/10] Support multi control plane Signed-off-by: Ling Samuel --- cmd/minikube/cmd/node_add.go | 14 +++- cmd/minikube/cmd/node_start.go | 4 +- cmd/minikube/cmd/node_stop.go | 2 +- cmd/minikube/cmd/start.go | 18 +++-- cmd/minikube/cmd/start_flags.go | 16 ++++ cmd/minikube/cmd/status.go | 66 ++++++++++------- cmd/minikube/cmd/status_test.go | 14 ++-- cmd/minikube/cmd/stop.go | 17 +++++ pkg/minikube/bootstrapper/bootstrapper.go | 2 +- .../bootstrapper/bsutil/extraconfig.go | 2 +- pkg/minikube/bootstrapper/bsutil/kubeadm.go | 2 +- pkg/minikube/bootstrapper/certs.go | 11 ++- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 73 ++++++++++++++++--- pkg/minikube/cni/kindnet.go | 2 +- pkg/minikube/config/profile.go | 10 ++- pkg/minikube/config/types.go | 1 + pkg/minikube/constants/constants.go | 4 +- pkg/minikube/node/node.go | 30 +++++++- pkg/minikube/node/start.go | 20 +++-- pkg/minikube/reason/reason.go | 1 + site/content/en/docs/commands/start.md | 2 +- site/content/en/docs/commands/status.md | 2 +- 22 files changed, 239 insertions(+), 74 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 622a2d3d018d..20438ce983a7 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -21,11 +21,13 @@ import ( "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/out/register" "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/style" ) @@ -49,16 +51,25 @@ var nodeAddCmd = &cobra.Command{ name := node.Name(len(cc.Nodes) + 1) - out.Step(style.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + if cp { + out.Step(style.Happy, "Adding control plane node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + } else { + out.Step(style.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + } // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. n := config.Node{ Name: name, Worker: worker, ControlPlane: cp, + APIEndpointServer: false, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } + if n.ControlPlane { + n.Port = constants.APIServerPort + } + // Make sure to decrease the default amount of memory we use per VM if this is the first worker node if len(cc.Nodes) == 1 { if viper.GetString(memory) == "" { @@ -70,6 +81,7 @@ var nodeAddCmd = &cobra.Command{ } } + register.Reg.SetStep(register.InitialSetup) if err := node.Add(cc, n, false); err != nil { _, err := maybeDeleteAndRetry(cmd, *cc, n, nil, err) if err != nil { diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index dda95ca8eb75..45b4025b1d64 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -56,7 +56,7 @@ var nodeStartCmd = &cobra.Command{ } register.Reg.SetStep(register.InitialSetup) - r, p, m, h, err := node.Provision(cc, n, n.ControlPlane, viper.GetBool(deleteOnFailure)) + r, p, m, h, err := node.Provision(cc, n, viper.GetBool(deleteOnFailure)) if err != nil { exit.Error(reason.GuestNodeProvision, "provisioning host for node", err) } @@ -71,7 +71,7 @@ var nodeStartCmd = &cobra.Command{ ExistingAddons: nil, } - _, err = node.Start(s, n.ControlPlane) + _, err = node.Start(s) if err != nil { _, err := maybeDeleteAndRetry(cmd, *cc, *n, nil, err) if err != nil { diff --git a/cmd/minikube/cmd/node_stop.go b/cmd/minikube/cmd/node_stop.go index 65c92f71f8af..5d747f2c9ae5 100644 --- a/cmd/minikube/cmd/node_stop.go +++ b/cmd/minikube/cmd/node_stop.go @@ -46,7 +46,7 @@ var nodeStopCmd = &cobra.Command{ } machineName := config.MachineName(*cc, *n) - + node.MustReset(*cc, *n, api, machineName) err = machine.StopHost(api, machineName) if err != nil { out.FatalT("Failed to stop node {{.name}}", out.V{"name": name}) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 13df195611fb..42524f239bc8 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -328,7 +328,7 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * ssh.SetDefaultClient(ssh.External) } - mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true, viper.GetBool(deleteOnFailure)) + mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, viper.GetBool(deleteOnFailure)) if err != nil { return node.Starter{}, err } @@ -345,7 +345,13 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * } func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) { - kubeconfig, err := node.Start(starter, true) + // TODO: Currently, we start the primary control plane first. If there are multiple control planes, + // the kube-apiserver will keep crash to wait for other apiserver to respond, which blocks health checks. + // As a temporary solution, we reset the stacked control planes before we stopped it. + // To fix this, we could: + // - Delay the health check. + // - Start all control planes at the same time. + kubeconfig, err := node.Start(starter) if err != nil { kubeconfig, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err) if err != nil { @@ -372,6 +378,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. Name: nodeName, Worker: true, ControlPlane: false, + APIEndpointServer: false, KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, } out.Ln("") // extra newline for clarity on the command line @@ -382,7 +389,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. } } else { for _, n := range existing.Nodes { - if !n.ControlPlane { + if !n.APIEndpointServer { err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure)) if err != nil { return nil, errors.Wrap(err, "adding node") @@ -489,7 +496,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co cc := updateExistingConfigFromFlags(cmd, &existing) var kubeconfig *kubeconfig.Settings for _, n := range cc.Nodes { - r, p, m, h, err := node.Provision(&cc, &n, n.ControlPlane, false) + r, p, m, h, err := node.Provision(&cc, &n, false) s := node.Starter{ Runner: r, PreExists: p, @@ -504,7 +511,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co return nil, err } - k, err := node.Start(s, n.ControlPlane) + k, err := node.Start(s) if n.ControlPlane { kubeconfig = k } @@ -1194,6 +1201,7 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C KubernetesVersion: getKubernetesVersion(&cc), Name: kubeNodeName, ControlPlane: true, + APIEndpointServer: true, Worker: true, } cc.Nodes = []config.Node{cp} diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 8cd3fdc6acc0..9ac44eddf19a 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -412,6 +412,22 @@ func upgradeExistingConfig(cc *config.ClusterConfig) { cc.KicBaseImage = viper.GetString(kicBaseImage) klog.Infof("config upgrade: KicBaseImage=%s", cc.KicBaseImage) } + + needTagAPIEndpointServer := true + for i := range cc.Nodes { + if cc.Nodes[i].APIEndpointServer { + needTagAPIEndpointServer = false + break + } + } + if needTagAPIEndpointServer { + for i := range cc.Nodes { + if cc.Nodes[i].ControlPlane { + cc.Nodes[i].APIEndpointServer = true + break + } + } + } } // updateExistingConfigFromFlags will update the existing config from the flags - used on a second start diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 43b191973b16..f6aae7b782bd 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -129,13 +129,18 @@ var ( // Status holds string representations of component states type Status struct { - Name string - Host string - Kubelet string + Name string + Host string + Kubelet string + + // APIServer indicates kube-apiserver status APIServer string Kubeconfig string Worker bool TimeToStop string + // IsAPIEndpoint indicates primary control plane (api endpoint) + IsAPIEndpoint bool + IP string } // ClusterState holds a cluster state representation @@ -177,18 +182,20 @@ const ( clusterNotRunningStatusFlag = 1 << 1 k8sNotRunningStatusFlag = 1 << 2 defaultStatusFormat = `{{.Name}} -type: Control Plane +type: Control Plane{{if .IsAPIEndpoint}} (Primary){{end}} host: {{.Host}} kubelet: {{.Kubelet}} apiserver: {{.APIServer}} kubeconfig: {{.Kubeconfig}} timeToStop: {{.TimeToStop}} +IP: {{.IP}} ` workerStatusFormat = `{{.Name}} type: Worker host: {{.Host}} kubelet: {{.Kubelet}} +IP: {{.IP}} ` ) @@ -302,15 +309,18 @@ func exitCode(statuses []*Status) int { func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status, error) { controlPlane := n.ControlPlane name := config.MachineName(cc, n) + apiEndpoint := n.APIEndpointServer st := &Status{ - Name: name, - Host: Nonexistent, - APIServer: Nonexistent, - Kubelet: Nonexistent, - Kubeconfig: Nonexistent, - Worker: !controlPlane, - TimeToStop: Nonexistent, + Name: name, + Host: Nonexistent, + APIServer: Nonexistent, + Kubelet: Nonexistent, + Kubeconfig: Nonexistent, + Worker: !controlPlane, + TimeToStop: Nonexistent, + IsAPIEndpoint: n.APIEndpointServer, + IP: n.IP, } hs, err := machine.Status(api, name) @@ -343,9 +353,11 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St st.Kubeconfig = Configured if !controlPlane { - st.Kubeconfig = Irrelevant st.APIServer = Irrelevant } + if !apiEndpoint { + st.Kubeconfig = Irrelevant + } host, err := machine.LoadHost(api, name) if err != nil { @@ -380,25 +392,29 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St } hostname, _, port, err := driver.ControlPlaneEndpoint(&cc, &n, host.DriverName) - if err != nil { - klog.Errorf("forwarded endpoint: %v", err) - st.Kubeconfig = Misconfigured - } else { - err := kubeconfig.VerifyEndpoint(cc.Name, hostname, port) + if st.Kubeconfig != Irrelevant { if err != nil { - klog.Errorf("kubeconfig endpoint: %v", err) + klog.Errorf("forwarded endpoint: %v", err) st.Kubeconfig = Misconfigured + } else { + err := kubeconfig.VerifyEndpoint(cc.Name, hostname, port) + if err != nil { + klog.Errorf("kubeconfig endpoint: %v", err) + st.Kubeconfig = Misconfigured + } } } - sta, err := kverify.APIServerStatus(cr, hostname, port) - klog.Infof("%s apiserver status = %s (err=%v)", name, stk, err) + if st.APIServer != Irrelevant { + sta, err := kverify.APIServerStatus(cr, hostname, port) + klog.Infof("%s apiserver status = %s (err=%v)", name, stk, err) - if err != nil { - klog.Errorln("Error apiserver status:", err) - st.APIServer = state.Error.String() - } else { - st.APIServer = sta.String() + if err != nil { + klog.Errorln("Error apiserver status:", err) + st.APIServer = state.Error.String() + } else { + st.APIServer = sta.String() + } } return st, nil diff --git a/cmd/minikube/cmd/status_test.go b/cmd/minikube/cmd/status_test.go index aa9f905c22af..f5a663e1d4b3 100644 --- a/cmd/minikube/cmd/status_test.go +++ b/cmd/minikube/cmd/status_test.go @@ -51,18 +51,18 @@ func TestStatusText(t *testing.T) { }{ { name: "ok", - state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured, TimeToStop: "10m"}, - want: "minikube\ntype: Control Plane\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\ntimeToStop: 10m\n\n", + state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured, TimeToStop: "10m", IP: "192.168.39.10"}, + want: "minikube\ntype: Control Plane\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\ntimeToStop: 10m\nIP: 192.168.39.10\n\n", }, { name: "paused", - state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured, TimeToStop: Nonexistent}, - want: "minikube\ntype: Control Plane\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\ntimeToStop: Nonexistent\n\n", + state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured, TimeToStop: Nonexistent, IP: "192.168.39.10"}, + want: "minikube\ntype: Control Plane\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\ntimeToStop: Nonexistent\nIP: 192.168.39.10\n\n", }, { name: "down", - state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured, TimeToStop: Nonexistent}, - want: "minikube\ntype: Control Plane\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\ntimeToStop: Nonexistent\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", + state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured, TimeToStop: Nonexistent, IP: "192.168.39.10"}, + want: "minikube\ntype: Control Plane\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\ntimeToStop: Nonexistent\nIP: 192.168.39.10\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", }, } for _, tc := range tests { @@ -75,7 +75,7 @@ func TestStatusText(t *testing.T) { got := b.String() if got != tc.want { - t.Errorf("text(%+v) = %q, want: %q", tc.state, got, tc.want) + t.Errorf("text(%+v)\n got: %q\nwant: %q", tc.state, got, tc.want) } }) } diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index bc2e2d6b51f4..6d7334449c97 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -33,6 +33,7 @@ import ( "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/mustload" + "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" "k8s.io/minikube/pkg/minikube/reason" @@ -136,14 +137,30 @@ func stopProfile(profile string) int { api, cc := mustload.Partial(profile) defer api.Close() + primaryMachineName := "" for _, n := range cc.Nodes { machineName := config.MachineName(*cc, n) + if n.APIEndpointServer { + // Skip because we need to update etcd members + primaryMachineName = machineName + continue + } else if n.ControlPlane { + // Remove from primary control plane + node.MustReset(*cc, n, api, machineName) + } + nonexistent := stop(api, machineName) if !nonexistent { stoppedNodes++ } } + if primaryMachineName != "" { + nonexistent := stop(api, primaryMachineName) + if !nonexistent { + stoppedNodes++ + } + } if err := killMountProcess(); err != nil { out.WarningT("Unable to kill mount process: {{.error}}", out.V{"error": err}) diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 4d18749ccd23..bc65f243433d 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -41,7 +41,7 @@ type Bootstrapper interface { WaitForNode(config.ClusterConfig, config.Node, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error - GenerateToken(config.ClusterConfig) (string, error) + GenerateToken(config.ClusterConfig, bool) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(config.ClusterConfig, LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/bootstrapper/bsutil/extraconfig.go b/pkg/minikube/bootstrapper/bsutil/extraconfig.go index 7e8c04944afe..513621d87561 100644 --- a/pkg/minikube/bootstrapper/bsutil/extraconfig.go +++ b/pkg/minikube/bootstrapper/bsutil/extraconfig.go @@ -70,7 +70,7 @@ var KubeadmExtraArgsAllowed = map[int][]string{ "kubeconfig-dir", "node-name", "cri-socket", - "experimental-upload-certs", + "upload-certs", "certificate-key", "rootfs", "skip-phases", diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 04ce5dd7669f..1a58ee1af267 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -128,7 +128,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana CgroupDriver: cgroupDriver, ClientCAFile: path.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), StaticPodPath: vmpath.GuestManifestsDir, - ControlPlaneAddress: constants.ControlPlaneAlias, + ControlPlaneAddress: constants.APIEndpointAlias, KubeProxyOptions: createKubeProxyOptions(k8s.ExtraOptions), } diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 90fd9870faec..1a438bb60f15 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -93,9 +93,13 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) copyableFiles = append(copyableFiles, certFile) } + endpoint := net.JoinHostPort(constants.APIEndpointAlias, fmt.Sprint(k8s.NodePort)) + if n.APIEndpointServer { + endpoint = net.JoinHostPort("localhost", fmt.Sprint(n.Port)) + } kcs := &kubeconfig.Settings{ ClusterName: n.Name, - ClusterServerAddress: fmt.Sprintf("https://%s", net.JoinHostPort("localhost", fmt.Sprint(n.Port))), + ClusterServerAddress: fmt.Sprintf("https://%s", endpoint), ClientCertificate: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.crt"), ClientKey: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.key"), CertificateAuthority: path.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), @@ -111,7 +115,6 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) if err != nil { return nil, errors.Wrap(err, "encoding kubeconfig") } - if n.ControlPlane { kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644") copyableFiles = append(copyableFiles, kubeCfgFile) @@ -183,7 +186,7 @@ func generateSharedCACerts() (CACerts, error) { func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACerts) ([]string, error) { // Only generate these certs for the api server - if !n.ControlPlane { + if !n.APIEndpointServer { return []string{}, nil } @@ -201,7 +204,7 @@ func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACert apiServerIPs = append(apiServerIPs, net.ParseIP(v)) } - apiServerNames := append(k8s.APIServerNames, k8s.APIServerName, constants.ControlPlaneAlias) + apiServerNames := append(k8s.APIServerNames, k8s.APIServerName, constants.APIEndpointAlias) apiServerAlternateNames := append( apiServerNames, util.GetAlternateDNS(k8s.DNSDomain)...) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 4b0fc2e10846..9665d5386e96 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -72,6 +72,8 @@ type Bootstrapper struct { contextName string } +var _ bootstrapper.Bootstrapper = (*Bootstrapper)(nil) + // NewBootstrapper creates a new kubeadm.Bootstrapper func NewBootstrapper(api libmachine.API, cc config.ClusterConfig, r command.Runner) (*Bootstrapper, error) { return &Bootstrapper{c: r, contextName: cc.Name, k8sClient: nil}, nil @@ -156,7 +158,7 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error { return err } - endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(constants.ControlPlaneAlias, strconv.Itoa(cp.Port))) + endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(constants.APIEndpointAlias, strconv.Itoa(cp.Port))) for _, path := range paths { _, err := k.c.RunCmd(exec.Command("sudo", "grep", endpoint, path)) if err != nil { @@ -234,7 +236,7 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { ctx, cancel := context.WithTimeout(context.Background(), initTimeoutMinutes*time.Minute) defer cancel() kr, kw := io.Pipe() - c := exec.CommandContext(ctx, "/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", + c := exec.CommandContext(ctx, "/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s --upload-certs", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ","))) c.Stdout = kw c.Stderr = kw @@ -445,9 +447,15 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time klog.Warningf("Couldn't ensure kubelet is started this might cause issues: %v", err) } // TODO: #7706: for better performance we could use k.client inside minikube to avoid asking for external IP:PORT - cp, err := config.PrimaryControlPlane(&cfg) - if err != nil { - return errors.Wrap(err, "get primary control plane") + var cp config.Node + var err error + if n.ControlPlane { + cp = n + } else { + cp, err = config.PrimaryControlPlane(&cfg) + if err != nil { + return errors.Wrap(err, "get primary control plane") + } } hostname, _, port, err := driver.ControlPlaneEndpoint(&cfg, &cp, cfg.Driver) if err != nil { @@ -710,6 +718,11 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC // Join the master by specifying its token joinCmd = fmt.Sprintf("%s --node-name=%s", joinCmd, config.MachineName(cc, n)) + if n.ControlPlane { + // Specify advertise address here because we are using interface eth1 and port 8443 (by default) + // We can't use `--config bsutil.KubeadmYamlPath` here because cannot mix '--config' with [certificate-key control-plane discovery-token-ca-cert-hash token] + joinCmd = fmt.Sprintf("%s --control-plane --apiserver-advertise-address %s --apiserver-bind-port %v", joinCmd, n.IP, n.Port) + } join := func() error { // reset first to clear any possibly existing state @@ -740,17 +753,59 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC } // GenerateToken creates a token and returns the appropriate kubeadm join command to run, or the already existing token -func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { +func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig, genCertKey bool) (string, error) { + ka := bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion) + // Take that generated token and use it to get a kubeadm join command - tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) + tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", ka)) r, err := k.c.RunCmd(tokenCmd) if err != nil { return "", errors.Wrap(err, "generating join command") } joinCmd := r.Stdout.String() - joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) + joinCmd = strings.Replace(joinCmd, "kubeadm", ka, 1) joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) + if genCertKey { + // Generate config first because init phase upload-certs cannot specify --cert-dir + confPath := path.Join(vmpath.GuestPersistentDir, "kubeadm-conf.yaml") + // TODO kubeadm config view is deprecated + conf, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s config view", ka))) + if err != nil { + return "", errors.Wrap(err, "generate kubeadm-conf") + } + + confAsset := assets.NewMemoryAssetTarget(conf.Stdout.Bytes(), confPath, "0644") + err = bsutil.CopyFiles(k.c, []assets.CopyableFile{ + confAsset, + }) + if err != nil { + return "", errors.Wrap(err, "write kubeadm-conf") + } + + certCmd := fmt.Sprintf("%s init phase upload-certs --upload-certs --config %s", ka, confPath) + out.Step(style.Tip, certCmd) + certKeyCmd := exec.Command("/bin/bash", "-c", certCmd) + certKeyResult, err := k.c.RunCmd(certKeyCmd) + if err != nil { + return "", errors.Wrap(err, "generating join command") + } + out.Step(style.Tip, certKeyResult.Stdout.String()) + // Currently we have to parse stdout manually to get certificate key + outputs := strings.Split(certKeyResult.Stdout.String(), "\n") + + certKey := "" + for i, s := range outputs { + if strings.Contains(s, "Using certificate key") { + certKey = outputs[i+1] + break + } + } + if certKey == "" { + return "", errors.Wrap(err, "parse certificate-key") + } + joinCmd = fmt.Sprintf("%s --certificate-key %s", joinCmd, certKey) + } if cc.KubernetesConfig.CRISocket != "" { joinCmd = fmt.Sprintf("%s --cri-socket %s", joinCmd, cc.KubernetesConfig.CRISocket) } @@ -898,7 +953,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru return errors.Wrap(err, "control plane") } - if err := machine.AddHostAlias(k.c, constants.ControlPlaneAlias, net.ParseIP(cp.IP)); err != nil { + if err := machine.AddHostAlias(k.c, constants.APIEndpointAlias, net.ParseIP(cp.IP)); err != nil { return errors.Wrap(err, "host alias") } diff --git a/pkg/minikube/cni/kindnet.go b/pkg/minikube/cni/kindnet.go index cf7a18301900..1ba80cc46752 100644 --- a/pkg/minikube/cni/kindnet.go +++ b/pkg/minikube/cni/kindnet.go @@ -149,7 +149,7 @@ type KindNet struct { // String returns a string representation of this CNI func (c KindNet) String() string { - return "CNI" + return "KindNet" } // manifest returns a Kubernetes manifest for a CNI diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index a632f0485958..766c6937a2c2 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -53,7 +53,12 @@ func (p *Profile) IsValid() bool { // PrimaryControlPlane gets the node specific config for the first created control plane func PrimaryControlPlane(cc *ClusterConfig) (Node, error) { for _, n := range cc.Nodes { - if n.ControlPlane { + if n.APIEndpointServer { + return n, nil + } + } + for _, n := range cc.Nodes { + if n.ControlPlane { // keep n.ControlPlane for backward compatibility return n, nil } } @@ -65,6 +70,7 @@ func PrimaryControlPlane(cc *ClusterConfig) (Node, error) { Port: cc.KubernetesConfig.NodePort, KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, ControlPlane: true, + APIEndpointServer: true, Worker: true, } @@ -291,7 +297,7 @@ func ProfileFolderPath(profile string, miniHome ...string) string { // MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names func MachineName(cc ClusterConfig, n Node) string { // For single node cluster, default to back to old naming - if len(cc.Nodes) == 1 || n.ControlPlane { + if len(cc.Nodes) == 1 || n.APIEndpointServer || (n.ControlPlane && n.Name == "") { return cc.Name } return fmt.Sprintf("%s-%s", cc.Name, n.Name) diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index 00cc1b54d4d3..2d7fc9fe8dca 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -114,6 +114,7 @@ type Node struct { IP string Port int KubernetesVersion string + APIEndpointServer bool ControlPlane bool Worker bool } diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index c14a97443f17..ae82f2cc7354 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -54,8 +54,8 @@ const ( DefaultServiceCIDR = "10.96.0.0/12" // HostAlias is a DNS alias to the the container/VM host IP HostAlias = "host.minikube.internal" - // ControlPlaneAlias is a DNS alias pointing to the apiserver frontend - ControlPlaneAlias = "control-plane.minikube.internal" + // APIEndpointAlias is a DNS alias pointing to the apiserver frontend + APIEndpointAlias = "control-plane.minikube.internal" // DockerHostEnv is used for docker daemon settings DockerHostEnv = "DOCKER_HOST" diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 672e52276d54..09ff9ba4a723 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -20,14 +20,17 @@ import ( "fmt" "os/exec" + "github.com/docker/machine/libmachine" "github.com/pkg/errors" "github.com/spf13/viper" - "k8s.io/klog/v2" "k8s.io/minikube/pkg/kapi" + "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/reason" ) // TODO: Share these between cluster and node packages @@ -42,7 +45,7 @@ func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error { return errors.Wrap(err, "save node") } - r, p, m, h, err := Provision(cc, &n, false, delOnFail) + r, p, m, h, err := Provision(cc, &n, delOnFail) if err != nil { return err } @@ -56,7 +59,7 @@ func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error { ExistingAddons: nil, } - _, err = Start(s, false) + _, err = Start(s) return err } @@ -155,3 +158,24 @@ func Save(cfg *config.ClusterConfig, node *config.Node) error { func Name(index int) string { return fmt.Sprintf("m%02d", index) } + +// MustReset reset a stacked control plane to avoid blocking the start of primary control plane +// Exit if failed +func MustReset(cc config.ClusterConfig, n config.Node, api libmachine.API, machineName string) { + if n.ControlPlane && !n.APIEndpointServer { + host, err := machine.LoadHost(api, machineName) + if err != nil { + exit.Error(reason.GuestLoadHost, "Error getting host", err) + } + runner, err := machine.CommandRunner(host) + if err != nil { + exit.Error(reason.InternalCommandRunner, "Failed to get command runner", err) + } + resetCmd := fmt.Sprintf("%s reset -f", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion)) + rc := exec.Command("/bin/bash", "-c", resetCmd) + _, err = runner.RunCmd(rc) + if err != nil { + exit.Error(reason.GuestNodeReset, "Failed to reset kubeadm", err) + } + } +} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index d1081fd7bb70..b3bb040963e7 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -80,7 +80,8 @@ type Starter struct { } // Start spins up a guest and starts the Kubernetes node. -func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { +func Start(starter Starter) (*kubeconfig.Settings, error) { + apiEndpointServer := starter.Node.APIEndpointServer // TODO backward compatibility // wait for preloaded tarball to finish downloading before configuring runtimes waitCacheRequiredImages(&cacheGroup) @@ -103,7 +104,8 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { var bs bootstrapper.Bootstrapper var kcs *kubeconfig.Settings - if apiServer { + if apiEndpointServer { + out.Step(style.Tip, "Preparing control plane node...") // Must be written before bootstrap, otherwise health checks may flake due to stale IP kcs = setupKubeconfig(starter.Host, starter.Cfg, starter.Node, starter.Cfg.Name) if err != nil { @@ -124,6 +126,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "Failed kubeconfig update") } } else { + out.Step(style.Tip, "Preparing worker node...") bs, err = cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, starter.Runner) if err != nil { return nil, errors.Wrap(err, "Failed to get bootstrapper") @@ -163,24 +166,26 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { wg.Done() }() - if apiServer { - // special ops for none , like change minikube directory. + if apiEndpointServer { + // special ops for none, like change minikube directory. // multinode super doesn't work on the none driver if starter.Cfg.Driver == driver.None && len(starter.Cfg.Nodes) == 1 { prepareNone() } } else { // Make sure to use the command runner for the control plane to generate the join token + out.Step(style.Tip, "Preparing kubeadm...") cpBs, cpr, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) if err != nil { return nil, errors.Wrap(err, "getting control plane bootstrapper") } - joinCmd, err := cpBs.GenerateToken(*starter.Cfg) + joinCmd, err := cpBs.GenerateToken(*starter.Cfg, starter.Node.ControlPlane) if err != nil { return nil, errors.Wrap(err, "generating join token") } + out.Step(style.Tip, "Joining cluster...") if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd); err != nil { return nil, errors.Wrap(err, "joining cluster") } @@ -190,6 +195,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "cni") } + out.Step(style.Tip, fmt.Sprintf("Applying CNI %s...", cnm.String())) if err := cnm.Apply(cpr); err != nil { return nil, errors.Wrap(err, "cni apply") } @@ -208,10 +214,10 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { } // Provision provisions the machine/container for the node -func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool, delOnFail bool) (command.Runner, bool, libmachine.API, *host.Host, error) { +func Provision(cc *config.ClusterConfig, n *config.Node, delOnFail bool) (command.Runner, bool, libmachine.API, *host.Host, error) { register.Reg.SetStep(register.StartingNode) name := config.MachineName(*cc, *n) - if apiServer { + if n.ControlPlane { out.Step(style.ThumbsUp, "Starting control plane node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) } else { out.Step(style.ThumbsUp, "Starting node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) diff --git a/pkg/minikube/reason/reason.go b/pkg/minikube/reason/reason.go index ee406919d95b..92c75720e0ae 100644 --- a/pkg/minikube/reason/reason.go +++ b/pkg/minikube/reason/reason.go @@ -242,6 +242,7 @@ var ( GuestMount = Kind{ID: "GUEST_MOUNT", ExitCode: ExGuestError} GuestMountConflict = Kind{ID: "GUEST_MOUNT_CONFLICT", ExitCode: ExGuestConflict} GuestNodeAdd = Kind{ID: "GUEST_NODE_ADD", ExitCode: ExGuestError} + GuestNodeReset = Kind{ID: "GUEST_NODE_RESET", ExitCode: ExGuestError} GuestNodeDelete = Kind{ID: "GUEST_NODE_DELETE", ExitCode: ExGuestError} GuestNodeProvision = Kind{ID: "GUEST_NODE_PROVISION", ExitCode: ExGuestError} GuestNodeRetrieve = Kind{ID: "GUEST_NODE_RETRIEVE", ExitCode: ExGuestNotFound} diff --git a/site/content/en/docs/commands/start.md b/site/content/en/docs/commands/start.md index aac90df39d8b..dcbc8a79b123 100644 --- a/site/content/en/docs/commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -47,7 +47,7 @@ minikube start [flags] --extra-config ExtraOption A set of key=value pairs that describe configuration that may be passed to different components. The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler - Valid kubeadm parameters: ignore-preflight-errors, dry-run, kubeconfig, kubeconfig-dir, node-name, cri-socket, experimental-upload-certs, certificate-key, rootfs, skip-phases, pod-network-cidr + Valid kubeadm parameters: ignore-preflight-errors, dry-run, kubeconfig, kubeconfig-dir, node-name, cri-socket, upload-certs, certificate-key, rootfs, skip-phases, pod-network-cidr --feature-gates string A set of key=value pairs that describe feature gates for alpha/experimental features. --force Force minikube to perform possibly dangerous operations --force-systemd If set, force the container runtime to use sytemd as cgroup manager. Defaults to false. diff --git a/site/content/en/docs/commands/status.md b/site/content/en/docs/commands/status.md index e5c3c89e7dd5..c86b43129ac3 100644 --- a/site/content/en/docs/commands/status.md +++ b/site/content/en/docs/commands/status.md @@ -23,7 +23,7 @@ minikube status [flags] ``` -f, --format string Go template format string for the status output. The format for Go templates can be found here: https://golang.org/pkg/text/template/ - For the list accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#Status (default "{{.Name}}\ntype: Control Plane\nhost: {{.Host}}\nkubelet: {{.Kubelet}}\napiserver: {{.APIServer}}\nkubeconfig: {{.Kubeconfig}}\ntimeToStop: {{.TimeToStop}}\n\n") + For the list accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#Status (default "{{.Name}}\ntype: Control Plane{{if .IsAPIEndpoint}} (Primary){{end}}\nhost: {{.Host}}\nkubelet: {{.Kubelet}}\napiserver: {{.APIServer}}\nkubeconfig: {{.Kubeconfig}}\ntimeToStop: {{.TimeToStop}}\nIP: {{.IP}}\n\n") -l, --layout string output layout (EXPERIMENTAL, JSON only): 'nodes' or 'cluster' (default "nodes") -n, --node string The node to check status for. Defaults to control plane. Leave blank with default format for status on all nodes. -o, --output string minikube status --output OUTPUT. json, text (default "text") From ceeadf78995199e85cbc1b39b9574aed30e97389 Mon Sep 17 00:00:00 2001 From: Ling Samuel Date: Mon, 11 Jan 2021 15:53:04 +0800 Subject: [PATCH 02/10] Backward compatibility Signed-off-by: Ling Samuel --- cmd/minikube/cmd/node_add.go | 11 +++--- cmd/minikube/cmd/start.go | 24 +++++++------- cmd/minikube/cmd/start_flags.go | 18 ++-------- cmd/minikube/cmd/status.go | 35 ++++++++++++++++---- cmd/minikube/cmd/stop.go | 6 +++- pkg/minikube/bootstrapper/certs.go | 4 +-- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 1 - pkg/minikube/config/profile.go | 32 +++++++++++++----- pkg/minikube/config/types.go | 14 ++++---- pkg/minikube/node/node.go | 12 ++++++- pkg/minikube/node/start.go | 2 +- 11 files changed, 99 insertions(+), 60 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 20438ce983a7..768c001dcd88 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -44,6 +44,7 @@ var nodeAddCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { co := mustload.Healthy(ClusterFlagValue()) cc := co.Config + config.TagPrimaryControlPlane(cc) if driver.BareMetal(cc.Driver) { out.FailureT("none driver does not support multi-node clusters") @@ -59,11 +60,11 @@ var nodeAddCmd = &cobra.Command{ // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. n := config.Node{ - Name: name, - Worker: worker, - ControlPlane: cp, - APIEndpointServer: false, - KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, + Name: name, + Worker: worker, + ControlPlane: cp, + PrimaryControlPlane: false, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, } if n.ControlPlane { diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 42524f239bc8..8b605d2cb490 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -375,11 +375,11 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. for i := 1; i < numNodes; i++ { nodeName := node.Name(i + 1) n := config.Node{ - Name: nodeName, - Worker: true, - ControlPlane: false, - APIEndpointServer: false, - KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, + Name: nodeName, + Worker: true, + ControlPlane: false, + PrimaryControlPlane: false, + KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, } out.Ln("") // extra newline for clarity on the command line err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure)) @@ -389,7 +389,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. } } else { for _, n := range existing.Nodes { - if !n.APIEndpointServer { + if !n.PrimaryControlPlane { err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure)) if err != nil { return nil, errors.Wrap(err, "adding node") @@ -1197,12 +1197,12 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C } cp := config.Node{ - Port: cc.KubernetesConfig.NodePort, - KubernetesVersion: getKubernetesVersion(&cc), - Name: kubeNodeName, - ControlPlane: true, - APIEndpointServer: true, - Worker: true, + Port: cc.KubernetesConfig.NodePort, + KubernetesVersion: getKubernetesVersion(&cc), + Name: kubeNodeName, + ControlPlane: true, + PrimaryControlPlane: true, + Worker: true, } cc.Nodes = []config.Node{cp} return cc, cp, nil diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 9ac44eddf19a..53ccb287967c 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -413,21 +413,7 @@ func upgradeExistingConfig(cc *config.ClusterConfig) { klog.Infof("config upgrade: KicBaseImage=%s", cc.KicBaseImage) } - needTagAPIEndpointServer := true - for i := range cc.Nodes { - if cc.Nodes[i].APIEndpointServer { - needTagAPIEndpointServer = false - break - } - } - if needTagAPIEndpointServer { - for i := range cc.Nodes { - if cc.Nodes[i].ControlPlane { - cc.Nodes[i].APIEndpointServer = true - break - } - } - } + config.TagPrimaryControlPlane(cc) } // updateExistingConfigFromFlags will update the existing config from the flags - used on a second start @@ -661,6 +647,8 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC cc.KicBaseImage = viper.GetString(kicBaseImage) } + config.TagPrimaryControlPlane(&cc) + return cc } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index f6aae7b782bd..87257f31afed 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -51,10 +51,11 @@ import ( ) var ( - statusFormat string - output string - layout string - watch time.Duration + statusFormat string + output string + layout string + watch time.Duration + updatePrimaryControlPlaneTag bool ) const ( @@ -218,6 +219,27 @@ var statusCmd = &cobra.Command{ cname := ClusterFlagValue() api, cc := mustload.Partial(cname) + // We should warn user if primary control plane no tagged + tagged := false + for i := range cc.Nodes { + if cc.Nodes[i].PrimaryControlPlane { + tagged = true + break + } + } + if !tagged { + if updatePrimaryControlPlaneTag { + out.Ln("Updating primary control plane tag...") + config.TagPrimaryControlPlane(cc) + err := config.SaveProfile(cc.Name, cc) + if err != nil { + exit.Error(reason.HostSaveProfile, "failed to save config", err) + } + } else { + out.Ln("There is no primary control plane, set --update-primary-control-plane-tag=true to update profile.") + } + } + duration := watch if !cmd.Flags().Changed("watch") || watch < 0 { duration = 0 @@ -309,7 +331,7 @@ func exitCode(statuses []*Status) int { func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status, error) { controlPlane := n.ControlPlane name := config.MachineName(cc, n) - apiEndpoint := n.APIEndpointServer + apiEndpoint := n.PrimaryControlPlane st := &Status{ Name: name, @@ -319,7 +341,7 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St Kubeconfig: Nonexistent, Worker: !controlPlane, TimeToStop: Nonexistent, - IsAPIEndpoint: n.APIEndpointServer, + IsAPIEndpoint: n.PrimaryControlPlane, IP: n.IP, } @@ -431,6 +453,7 @@ For the list accessible variables for the template, see the struct values here: statusCmd.Flags().StringVarP(&nodeName, "node", "n", "", "The node to check status for. Defaults to control plane. Leave blank with default format for status on all nodes.") statusCmd.Flags().DurationVarP(&watch, "watch", "w", 1*time.Second, "Continuously listing/getting the status with optional interval duration.") statusCmd.Flags().Lookup("watch").NoOptDefVal = "1s" + statusCmd.Flags().BoolVar(&updatePrimaryControlPlaneTag, "update-primary-control-plane-tag", false, "Update primary control plane tag if there is no control plane marked as API endpoint.") } func statusText(st *Status, w io.Writer) error { diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index 6d7334449c97..1a1c1e5cdc91 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -135,13 +135,17 @@ func stopProfile(profile string) int { // end new code api, cc := mustload.Partial(profile) + + // ensure tag primary control plane properly + config.TagPrimaryControlPlane(cc) + defer api.Close() primaryMachineName := "" for _, n := range cc.Nodes { machineName := config.MachineName(*cc, n) - if n.APIEndpointServer { + if n.PrimaryControlPlane { // Skip because we need to update etcd members primaryMachineName = machineName continue diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 1a438bb60f15..30713038f835 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -94,7 +94,7 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) } endpoint := net.JoinHostPort(constants.APIEndpointAlias, fmt.Sprint(k8s.NodePort)) - if n.APIEndpointServer { + if n.PrimaryControlPlane { endpoint = net.JoinHostPort("localhost", fmt.Sprint(n.Port)) } kcs := &kubeconfig.Settings{ @@ -186,7 +186,7 @@ func generateSharedCACerts() (CACerts, error) { func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACerts) ([]string, error) { // Only generate these certs for the api server - if !n.APIEndpointServer { + if !n.PrimaryControlPlane { return []string{}, nil } diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 9665d5386e96..d2dda85ff6fd 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -790,7 +790,6 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig, genCertKey bool) ( if err != nil { return "", errors.Wrap(err, "generating join command") } - out.Step(style.Tip, certKeyResult.Stdout.String()) // Currently we have to parse stdout manually to get certificate key outputs := strings.Split(certKeyResult.Stdout.String(), "\n") diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index 766c6937a2c2..bdc0c7cdfa84 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -53,7 +53,7 @@ func (p *Profile) IsValid() bool { // PrimaryControlPlane gets the node specific config for the first created control plane func PrimaryControlPlane(cc *ClusterConfig) (Node, error) { for _, n := range cc.Nodes { - if n.APIEndpointServer { + if n.PrimaryControlPlane { return n, nil } } @@ -65,13 +65,13 @@ func PrimaryControlPlane(cc *ClusterConfig) (Node, error) { // This config is probably from 1.6 or earlier, let's convert it. cp := Node{ - Name: cc.KubernetesConfig.NodeName, - IP: cc.KubernetesConfig.NodeIP, - Port: cc.KubernetesConfig.NodePort, - KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, - ControlPlane: true, - APIEndpointServer: true, - Worker: true, + Name: cc.KubernetesConfig.NodeName, + IP: cc.KubernetesConfig.NodeIP, + Port: cc.KubernetesConfig.NodePort, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, + ControlPlane: true, + PrimaryControlPlane: true, + Worker: true, } cc.Nodes = []Node{cp} @@ -144,6 +144,20 @@ func SaveNode(cfg *ClusterConfig, node *Node) error { return SaveProfile(viper.GetString(ProfileName), cfg) } +func TagPrimaryControlPlane(cc *ClusterConfig) { + for i := range cc.Nodes { + if cc.Nodes[i].PrimaryControlPlane { + return + } + } + for i := range cc.Nodes { + if cc.Nodes[i].ControlPlane && cc.Nodes[i].Name == "" { + cc.Nodes[i].PrimaryControlPlane = true + break + } + } +} + // SaveProfile creates an profile out of the cfg and stores in $MINIKUBE_HOME/profiles//config.json func SaveProfile(name string, cfg *ClusterConfig, miniHome ...string) error { data, err := json.MarshalIndent(cfg, "", " ") @@ -297,7 +311,7 @@ func ProfileFolderPath(profile string, miniHome ...string) string { // MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names func MachineName(cc ClusterConfig, n Node) string { // For single node cluster, default to back to old naming - if len(cc.Nodes) == 1 || n.APIEndpointServer || (n.ControlPlane && n.Name == "") { + if len(cc.Nodes) == 1 || n.PrimaryControlPlane || (n.ControlPlane && n.Name == "") { return cc.Name } return fmt.Sprintf("%s-%s", cc.Name, n.Name) diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index 2d7fc9fe8dca..7bacd8ca6b9b 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -110,13 +110,13 @@ type KubernetesConfig struct { // Node contains information about specific nodes in a cluster type Node struct { - Name string - IP string - Port int - KubernetesVersion string - APIEndpointServer bool - ControlPlane bool - Worker bool + Name string + IP string + Port int + KubernetesVersion string + PrimaryControlPlane bool + ControlPlane bool + Worker bool } // VersionedExtraOption holds information on flags to apply to a specific range diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 09ff9ba4a723..6a0c9ae5741a 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -87,6 +87,16 @@ func Delete(cc config.ClusterConfig, name string) (*config.Node, error) { return n, err } + // leave master + if n.ControlPlane { + resetCmd := fmt.Sprintf("%s reset -f", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion)) + rc := exec.Command("/bin/bash", "-c", resetCmd) + _, err = runner.RunCmd(rc) + if err != nil { + klog.Errorf("Failed to reset kubeadm", err) + } + } + // kubectl drain kubectl := kapi.KubectlBinaryPath(cc.KubernetesConfig.KubernetesVersion) cmd := exec.Command("sudo", "KUBECONFIG=/var/lib/minikube/kubeconfig", kubectl, "drain", m) @@ -162,7 +172,7 @@ func Name(index int) string { // MustReset reset a stacked control plane to avoid blocking the start of primary control plane // Exit if failed func MustReset(cc config.ClusterConfig, n config.Node, api libmachine.API, machineName string) { - if n.ControlPlane && !n.APIEndpointServer { + if n.ControlPlane && !n.PrimaryControlPlane { host, err := machine.LoadHost(api, machineName) if err != nil { exit.Error(reason.GuestLoadHost, "Error getting host", err) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index b3bb040963e7..b1e68ff21d95 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -81,7 +81,7 @@ type Starter struct { // Start spins up a guest and starts the Kubernetes node. func Start(starter Starter) (*kubeconfig.Settings, error) { - apiEndpointServer := starter.Node.APIEndpointServer // TODO backward compatibility + apiEndpointServer := starter.Node.PrimaryControlPlane // TODO backward compatibility // wait for preloaded tarball to finish downloading before configuring runtimes waitCacheRequiredImages(&cacheGroup) From 6cf95aa6fe20583b372da6c0927fc1ebf931c825 Mon Sep 17 00:00:00 2001 From: Ling Samuel Date: Fri, 11 Dec 2020 16:01:09 +0800 Subject: [PATCH 03/10] Update docs --- site/content/en/docs/commands/status.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/site/content/en/docs/commands/status.md b/site/content/en/docs/commands/status.md index c86b43129ac3..824777edafc7 100644 --- a/site/content/en/docs/commands/status.md +++ b/site/content/en/docs/commands/status.md @@ -22,12 +22,13 @@ minikube status [flags] ### Options ``` - -f, --format string Go template format string for the status output. The format for Go templates can be found here: https://golang.org/pkg/text/template/ - For the list accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#Status (default "{{.Name}}\ntype: Control Plane{{if .IsAPIEndpoint}} (Primary){{end}}\nhost: {{.Host}}\nkubelet: {{.Kubelet}}\napiserver: {{.APIServer}}\nkubeconfig: {{.Kubeconfig}}\ntimeToStop: {{.TimeToStop}}\nIP: {{.IP}}\n\n") - -l, --layout string output layout (EXPERIMENTAL, JSON only): 'nodes' or 'cluster' (default "nodes") - -n, --node string The node to check status for. Defaults to control plane. Leave blank with default format for status on all nodes. - -o, --output string minikube status --output OUTPUT. json, text (default "text") - -w, --watch duration[=1s] Continuously listing/getting the status with optional interval duration. (default 1s) + -f, --format string Go template format string for the status output. The format for Go templates can be found here: https://golang.org/pkg/text/template/ + For the list accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#Status (default "{{.Name}}\ntype: Control Plane{{if .IsAPIEndpoint}} (Primary){{end}}\nhost: {{.Host}}\nkubelet: {{.Kubelet}}\napiserver: {{.APIServer}}\nkubeconfig: {{.Kubeconfig}}\ntimeToStop: {{.TimeToStop}}\nIP: {{.IP}}\n\n") + -l, --layout string output layout (EXPERIMENTAL, JSON only): 'nodes' or 'cluster' (default "nodes") + -n, --node string The node to check status for. Defaults to control plane. Leave blank with default format for status on all nodes. + -o, --output string minikube status --output OUTPUT. json, text (default "text") + --update-primary-control-plane-tag Update primary control plane tag if there is no control plane marked as API endpoint. + -w, --watch duration[=1s] Continuously listing/getting the status with optional interval duration. (default 1s) ``` ### Options inherited from parent commands From e1320bf1b7a8fbd4f1a6c42cab4a86a0a32447a5 Mon Sep 17 00:00:00 2001 From: Ling Samuel Date: Thu, 17 Dec 2020 10:20:58 +0800 Subject: [PATCH 04/10] Remove todo --- pkg/minikube/node/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index b1e68ff21d95..e2ca668b4af2 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -81,7 +81,7 @@ type Starter struct { // Start spins up a guest and starts the Kubernetes node. func Start(starter Starter) (*kubeconfig.Settings, error) { - apiEndpointServer := starter.Node.PrimaryControlPlane // TODO backward compatibility + apiEndpointServer := starter.Node.PrimaryControlPlane // wait for preloaded tarball to finish downloading before configuring runtimes waitCacheRequiredImages(&cacheGroup) From 929a97144de45de0c3fbeaf8d4015f1418440bf3 Mon Sep 17 00:00:00 2001 From: Ling Samuel Date: Thu, 17 Dec 2020 11:58:58 +0800 Subject: [PATCH 05/10] Fix node delete control-plane --- pkg/minikube/node/node.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 6a0c9ae5741a..45d92d05010f 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -89,6 +89,16 @@ func Delete(cc config.ClusterConfig, name string) (*config.Node, error) { // leave master if n.ControlPlane { + host, err := machine.LoadHost(api, m) + if err != nil { + return n, err + } + + runner, err := machine.CommandRunner(host) + if err != nil { + return n, err + } + resetCmd := fmt.Sprintf("%s reset -f", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion)) rc := exec.Command("/bin/bash", "-c", resetCmd) _, err = runner.RunCmd(rc) From 69c3a0cbb05d8f856e5080807af731e199b22d54 Mon Sep 17 00:00:00 2001 From: Ling Samuel Date: Thu, 17 Dec 2020 11:58:07 +0800 Subject: [PATCH 06/10] Add integration tests Signed-off-by: Ling Samuel --- test/integration/main_test.go | 1 + test/integration/multinode_test.go | 346 +++++++++++++++-------------- 2 files changed, 181 insertions(+), 166 deletions(-) diff --git a/test/integration/main_test.go b/test/integration/main_test.go index f0a1b9502cd5..8bd7584c4db6 100644 --- a/test/integration/main_test.go +++ b/test/integration/main_test.go @@ -48,6 +48,7 @@ var testdataDir = flag.String("testdata-dir", "testdata", "the directory relativ const ( SecondNodeName = "m02" ThirdNodeName = "m03" + FourthNodeName = "m04" ) // TestMain is the test main diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index bad0bf781789..de22b84d5701 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -1,5 +1,3 @@ -// +build integration - /* Copyright 2020 The Kubernetes Authors All rights reserved. @@ -26,14 +24,22 @@ import ( "testing" ) +type clusterStatus struct { + running bool + totalNodes int + wantRunningNodes int + wantStoppedNodes int +} + +type validatorFunc func(context.Context, *testing.T, string, *clusterStatus) + func TestMultiNode(t *testing.T) { if NoneDriver() { t.Skip("none driver does not support multinode") } - type validatorFunc func(context.Context, *testing.T, string) profile := UniqueProfileName("multinode") - ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) + ctx, cancel := context.WithTimeout(context.Background(), Minutes(45)) defer CleanupWithLogs(t, profile, cancel) t.Run("serial", func(t *testing.T) { @@ -43,12 +49,18 @@ func TestMultiNode(t *testing.T) { }{ {"FreshStart2Nodes", validateMultiNodeStart}, {"AddNode", validateAddNodeToMultiNode}, - {"StopNode", validateStopRunningNode}, - {"StartAfterStop", validateStartNodeAfterStop}, - {"DeleteNode", validateDeleteNodeFromMultiNode}, + {"StopNode", validateStopRunningNode(ThirdNodeName)}, + {"AddControlPlaneNode", validateAddControlPlaneNodeToMultiNode}, + {"StopControlPlaneNode", validateStopRunningNode(FourthNodeName)}, + {"StartAfterStop", validateStartNodeAfterStop(ThirdNodeName)}, + {"StartControlPlaneAfterStop", validateStartNodeAfterStop(FourthNodeName)}, + {"DeleteNode", validateDeleteNodeFromMultiNode(ThirdNodeName, true)}, + {"DeleteControlPlaneNode", validateDeleteNodeFromMultiNode(FourthNodeName, true)}, {"StopMultiNode", validateStopMultiNodeCluster}, {"RestartMultiNode", validateRestartMultiNodeCluster}, } + + s := &clusterStatus{} for _, tc := range tests { tc := tc if ctx.Err() == context.DeadlineExceeded { @@ -56,13 +68,13 @@ func TestMultiNode(t *testing.T) { } t.Run(tc.name, func(t *testing.T) { defer PostMortemLogs(t, profile) - tc.validator(ctx, t, profile) + tc.validator(ctx, t, profile, s) }) } }) } -func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) { +func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { // Start a 2 node cluster with the --nodes param startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=2200", "--nodes=2", "-v=8", "--alsologtostderr"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) @@ -70,147 +82,84 @@ func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) { t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err) } - // Make sure minikube status shows 2 nodes - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) - if err != nil { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - if strings.Count(rr.Stdout.String(), "host: Running") != 2 { - t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } - - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 { - t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } - + s.startCluster() + s.addNode(2) + validateClusterStatus(ctx, t, profile, s) } -func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile string) { +func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { // Add a node to the current cluster addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr"} rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...)) if err != nil { t.Fatalf("failed to add node to current cluster. args %q : %v", rr.Command(), err) } - - // Make sure minikube status shows 3 nodes - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) - if err != nil { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - if strings.Count(rr.Stdout.String(), "host: Running") != 3 { - t.Errorf("status says all hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } - - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { - t.Errorf("status says all kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } + s.addNode(1) + validateClusterStatus(ctx, t, profile, s) } -func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) { - // Run minikube node stop on that node - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", ThirdNodeName)) +func validateAddControlPlaneNodeToMultiNode(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { + // Add a node to the current cluster + addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr", "--control-plane"} + rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...)) if err != nil { - t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) + t.Fatalf("failed to add control plane node to current cluster. args %q : %v", rr.Command(), err) } - // Run status again to see the stopped host - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) - // Exit code 7 means one host is stopped, which we are expecting - if err != nil && rr.ExitCode != 7 { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - // Make sure minikube status shows 2 running nodes and 1 stopped one - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) - if err != nil && rr.ExitCode != 7 { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 { - t.Errorf("incorrect number of running kubelets: args %q: %v", rr.Command(), rr.Stdout.String()) - } - - if strings.Count(rr.Stdout.String(), "host: Stopped") != 1 { - t.Errorf("incorrect number of stopped hosts: args %q: %v", rr.Command(), rr.Stdout.String()) - } - - if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 1 { - t.Errorf("incorrect number of stopped kubelets: args %q: %v", rr.Command(), rr.Stdout.String()) - } + s.addNode(1) + validateClusterStatus(ctx, t, profile, s) } -func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile string) { - if DockerDriver() { - rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) +func validateStopRunningNode(nodeName string) validatorFunc { + return func(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { + // Run minikube node stop on that node + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", nodeName)) if err != nil { - t.Fatalf("docker is broken: %v", err) - } - if strings.Contains(rr.Stdout.String(), "azure") { - t.Skip("kic containers are not supported on docker's azure") + t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) } - } - // Start the node back up - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", ThirdNodeName, "--alsologtostderr")) - if err != nil { - t.Logf(rr.Stderr.String()) - t.Errorf("node start returned an error. args %q: %v", rr.Command(), err) - } - - // Make sure minikube status shows 3 running hosts - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) - if err != nil { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + s.stopNode() + validateClusterStatus(ctx, t, profile, s) } +} - if strings.Count(rr.Stdout.String(), "host: Running") != 3 { - t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } +func validateStartNodeAfterStop(nodeName string) validatorFunc { + return func(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { + if DockerDriver() { + rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) + if err != nil { + t.Fatalf("docker is broken: %v", err) + } + if strings.Contains(rr.Stdout.String(), "azure") { + s.startNode() // Make GitHub test happy + t.Skip("kic containers are not supported on docker's azure") + } + } - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { - t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } + // Start the node back up + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", nodeName, "--alsologtostderr")) + if err != nil { + t.Logf(rr.Stderr.String()) + t.Errorf("node start returned an error. args %q: %v", rr.Command(), err) + } - // Make sure kubectl can connect correctly - rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) - if err != nil { - t.Fatalf("failed to kubectl get nodes. args %q : %v", rr.Command(), err) + s.startNode() + validateClusterStatus(ctx, t, profile, s) } } -func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile string) { +func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { // Run minikube stop on the cluster rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "stop")) if err != nil { t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) } - // Run status to see the stopped hosts - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) - // Exit code 7 means one host is stopped, which we are expecting - if err != nil && rr.ExitCode != 7 { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - // Make sure minikube status shows 2 stopped nodes - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) - if err != nil && rr.ExitCode != 7 { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - if strings.Count(rr.Stdout.String(), "host: Stopped") != 2 { - t.Errorf("incorrect number of stopped hosts: args %q: %v", rr.Command(), rr.Stdout.String()) - } - - if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 2 { - t.Errorf("incorrect number of stopped kubelets: args %q: %v", rr.Command(), rr.Stdout.String()) - } + s.stopCluster() + validateClusterStatus(ctx, t, profile, s) } -func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile string) { +func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { if DockerDriver() { rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) if err != nil { @@ -227,19 +176,8 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err) } - // Make sure minikube status shows 2 running nodes - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) - if err != nil { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - if strings.Count(rr.Stdout.String(), "host: Running") != 2 { - t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Output()) - } - - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 { - t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Output()) - } + s.startCluster() + validateClusterStatus(ctx, t, profile, s) // Make sure kubectl reports that all nodes are ready rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) @@ -247,64 +185,140 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) } if strings.Count(rr.Stdout.String(), "NotReady") > 0 { - t.Errorf("expected 2 nodes to be Ready, got %v", rr.Output()) + t.Errorf("expected %v nodes to be Ready, got %v", s.wantRunningNodes, rr.Output()) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", `go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'`)) if err != nil { t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) } - if strings.Count(rr.Stdout.String(), "True") != 2 { - t.Errorf("expected 2 nodes Ready status to be True, got %v", rr.Output()) + if strings.Count(rr.Stdout.String(), "True") != s.wantRunningNodes { + t.Errorf("expected %v nodes Ready status to be True, got %v", s.wantRunningNodes, rr.Output()) } } -func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) { - - // Start the node back up - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", ThirdNodeName)) - if err != nil { - t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) - } +func validateDeleteNodeFromMultiNode(nodeName string, running bool) validatorFunc { + return func(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { + // Start the node back up + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", nodeName)) + if err != nil { + t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) + } - // Make sure status is back down to 2 hosts - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) - if err != nil { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } + if running { + s.deleteRunningNode() + } else { + s.deleteStoppedNode() + } + validateClusterStatus(ctx, t, profile, s) - if strings.Count(rr.Stdout.String(), "host: Running") != 2 { - t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } + if DockerDriver() { + rr, err := Run(t, exec.Command("docker", "volume", "ls")) + if err != nil { + t.Errorf("failed to run %q : %v", rr.Command(), err) + } + if strings.Contains(rr.Stdout.String(), fmt.Sprintf("%s-%s", profile, ThirdNodeName)) { + t.Errorf("docker volume was not properly deleted: %s", rr.Stdout.String()) + } + } - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 { - t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } + // Make sure kubectl knows the node is gone + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) + if err != nil { + t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "NotReady") > 0 { + t.Errorf("expected %v nodes to be Ready, got %v", s.wantRunningNodes, rr.Output()) + } - if DockerDriver() { - rr, err := Run(t, exec.Command("docker", "volume", "ls")) + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", `go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'`)) if err != nil { - t.Errorf("failed to run %q : %v", rr.Command(), err) + t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) } - if strings.Contains(rr.Stdout.String(), fmt.Sprintf("%s-%s", profile, ThirdNodeName)) { - t.Errorf("docker volume was not properly deleted: %s", rr.Stdout.String()) + if strings.Count(rr.Stdout.String(), "True") != s.wantRunningNodes { + t.Errorf("expected %v nodes Ready status to be True, got %v", s.wantRunningNodes, rr.Output()) } } +} - // Make sure kubectl knows the node is gone - rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) +func (s *clusterStatus) addNode(count int) { + s.totalNodes += count + s.wantRunningNodes += count +} + +func (s *clusterStatus) stopNode() { + s.wantRunningNodes-- + s.wantStoppedNodes++ +} + +func (s *clusterStatus) startNode() { + s.wantRunningNodes++ + s.wantStoppedNodes-- +} + +func (s *clusterStatus) deleteRunningNode() { + s.totalNodes-- + s.wantRunningNodes-- +} + +func (s *clusterStatus) deleteStoppedNode() { + s.totalNodes-- + s.wantStoppedNodes-- +} + +func (s *clusterStatus) stopCluster() { + s.running = false + s.wantRunningNodes = 0 + s.wantStoppedNodes = s.totalNodes +} + +func (s *clusterStatus) startCluster() { + s.running = true + s.wantRunningNodes = s.totalNodes + s.wantStoppedNodes = 0 +} + +// validateClusterStatus validates running/stopped kubelet/host count, check kubectl config and api serve connection. +func validateClusterStatus(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { + // Make sure minikube status shows expected running nodes and stopped nodes + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) if err != nil { - t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) + if s.wantStoppedNodes > 0 { + // Exit code 7 means one host is stopped, which we are expecting + if rr.ExitCode != 7 { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + } else { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } } - if strings.Count(rr.Stdout.String(), "NotReady") > 0 { - t.Errorf("expected 2 nodes to be Ready, got %v", rr.Output()) + var count int + + count = strings.Count(rr.Stdout.String(), "kubelet: Running") + if count != s.wantRunningNodes { + t.Errorf("incorrect number of running kubelets (want: %v, got %v): args %q: %v", s.wantRunningNodes, count, rr.Command(), rr.Stdout.String()) } - rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", `go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'`)) - if err != nil { - t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) + count = strings.Count(rr.Stdout.String(), "kubelet: Stopped") + if count != s.wantStoppedNodes { + t.Errorf("incorrect number of stopped kubelets (want: %v, got %v): args %q: %v", s.wantStoppedNodes, count, rr.Command(), rr.Stdout.String()) } - if strings.Count(rr.Stdout.String(), "True") != 2 { - t.Errorf("expected 2 nodes Ready status to be True, got %v", rr.Output()) + + count = strings.Count(rr.Stdout.String(), "host: Running") + if count != s.wantRunningNodes { + t.Errorf("incorrect number of running hosts (want: %v, got %v): args %q: %v", s.wantRunningNodes, count, rr.Command(), rr.Stdout.String()) + } + + count = strings.Count(rr.Stdout.String(), "host: Stopped") + if count != s.wantStoppedNodes { + t.Errorf("incorrect number of stopped hosts (want: %v, got %v): args %q: %v", s.wantStoppedNodes, count, rr.Command(), rr.Stdout.String()) + } + + if s.running { + // Make sure kubectl can connect correctly + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) + if err != nil { + t.Fatalf("failed to kubectl get nodes. args %q : %v", rr.Command(), err) + } } } From 8ca38c3308666de81d5cf052a6258ab41e3a38c2 Mon Sep 17 00:00:00 2001 From: Ling Samuel Date: Thu, 17 Dec 2020 15:46:28 +0800 Subject: [PATCH 07/10] Fix test in github Signed-off-by: Ling Samuel --- pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 1 - test/integration/multinode_test.go | 40 ++++++++++---------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index d2dda85ff6fd..76756ef1701e 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -784,7 +784,6 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig, genCertKey bool) ( } certCmd := fmt.Sprintf("%s init phase upload-certs --upload-certs --config %s", ka, confPath) - out.Step(style.Tip, certCmd) certKeyCmd := exec.Command("/bin/bash", "-c", certCmd) certKeyResult, err := k.c.RunCmd(certKeyCmd) if err != nil { diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index de22b84d5701..3fa6fe289ac2 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -29,6 +29,8 @@ type clusterStatus struct { totalNodes int wantRunningNodes int wantStoppedNodes int + + isAzure bool } type validatorFunc func(context.Context, *testing.T, string, *clusterStatus) @@ -61,6 +63,17 @@ func TestMultiNode(t *testing.T) { } s := &clusterStatus{} + + if DockerDriver() { + rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) + if err != nil { + t.Fatalf("docker is broken: %v", err) + } + if strings.Contains(rr.Stdout.String(), "azure") { + s.isAzure = true + } + } + for _, tc := range tests { tc := tc if ctx.Err() == context.DeadlineExceeded { @@ -125,15 +138,8 @@ func validateStopRunningNode(nodeName string) validatorFunc { func validateStartNodeAfterStop(nodeName string) validatorFunc { return func(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { - if DockerDriver() { - rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) - if err != nil { - t.Fatalf("docker is broken: %v", err) - } - if strings.Contains(rr.Stdout.String(), "azure") { - s.startNode() // Make GitHub test happy - t.Skip("kic containers are not supported on docker's azure") - } + if s.isAzure { + t.Skip("kic containers are not supported on docker's azure") } // Start the node back up @@ -160,15 +166,11 @@ func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile str } func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { - if DockerDriver() { - rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) - if err != nil { - t.Fatalf("docker is broken: %v", err) - } - if strings.Contains(rr.Stdout.String(), "azure") { - t.Skip("kic containers are not supported on docker's azure") - } + if s.isAzure { + s.startCluster() + t.Skip("kic containers are not supported on docker's azure") } + // Restart a full cluster with minikube start startArgs := append([]string{"start", "-p", profile, "--wait=true", "-v=8", "--alsologtostderr"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) @@ -205,7 +207,7 @@ func validateDeleteNodeFromMultiNode(nodeName string, running bool) validatorFun t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) } - if running { + if running && !s.isAzure { s.deleteRunningNode() } else { s.deleteStoppedNode() @@ -283,7 +285,7 @@ func validateClusterStatus(ctx context.Context, t *testing.T, profile string, s // Make sure minikube status shows expected running nodes and stopped nodes rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) if err != nil { - if s.wantStoppedNodes > 0 { + if s.wantStoppedNodes > 0 || s.isAzure { // If isAzure, the start process skipped, so some hosts are stopped // Exit code 7 means one host is stopped, which we are expecting if rr.ExitCode != 7 { t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) From b7d80823ccb4a7af0025af58fe008fc0768ea4f7 Mon Sep 17 00:00:00 2001 From: Ling Samuel Date: Fri, 18 Dec 2020 09:18:32 +0800 Subject: [PATCH 08/10] Reorder tests to avoid: `kubectl get nodes` reports outdated info causes test failure. Signed-off-by: Ling Samuel --- test/integration/main_test.go | 1 - test/integration/multinode_test.go | 11 +++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/test/integration/main_test.go b/test/integration/main_test.go index 8bd7584c4db6..f0a1b9502cd5 100644 --- a/test/integration/main_test.go +++ b/test/integration/main_test.go @@ -48,7 +48,6 @@ var testdataDir = flag.String("testdata-dir", "testdata", "the directory relativ const ( SecondNodeName = "m02" ThirdNodeName = "m03" - FourthNodeName = "m04" ) // TestMain is the test main diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index 3fa6fe289ac2..b17f1194e6d0 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -50,14 +50,17 @@ func TestMultiNode(t *testing.T) { validator validatorFunc }{ {"FreshStart2Nodes", validateMultiNodeStart}, + // Add worker node {"AddNode", validateAddNodeToMultiNode}, {"StopNode", validateStopRunningNode(ThirdNodeName)}, - {"AddControlPlaneNode", validateAddControlPlaneNodeToMultiNode}, - {"StopControlPlaneNode", validateStopRunningNode(FourthNodeName)}, {"StartAfterStop", validateStartNodeAfterStop(ThirdNodeName)}, - {"StartControlPlaneAfterStop", validateStartNodeAfterStop(FourthNodeName)}, {"DeleteNode", validateDeleteNodeFromMultiNode(ThirdNodeName, true)}, - {"DeleteControlPlaneNode", validateDeleteNodeFromMultiNode(FourthNodeName, true)}, + // Add control plane node + {"AddControlPlaneNode", validateAddControlPlaneNodeToMultiNode}, + {"StopControlPlaneNode", validateStopRunningNode(ThirdNodeName)}, + {"StartControlPlaneAfterStop", validateStartNodeAfterStop(ThirdNodeName)}, + {"DeleteControlPlaneNode", validateDeleteNodeFromMultiNode(ThirdNodeName, true)}, + // Test cluster stop && start {"StopMultiNode", validateStopMultiNodeCluster}, {"RestartMultiNode", validateRestartMultiNodeCluster}, } From 8fa3ccfc3886e282f37760953cabe7e816d0cfdf Mon Sep 17 00:00:00 2001 From: Ling Samuel Date: Thu, 24 Dec 2020 10:48:27 +0800 Subject: [PATCH 09/10] Add --control-planes to `start` Signed-off-by: Ling Samuel --- cmd/minikube/cmd/node_add.go | 3 +-- cmd/minikube/cmd/start.go | 12 ++++++++++++ cmd/minikube/cmd/start_flags.go | 2 ++ site/content/en/docs/commands/start.md | 1 + 4 files changed, 16 insertions(+), 2 deletions(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 768c001dcd88..b90a41ddcf58 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -21,7 +21,6 @@ import ( "github.com/spf13/viper" "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/driver" "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/mustload" @@ -68,7 +67,7 @@ var nodeAddCmd = &cobra.Command{ } if n.ControlPlane { - n.Port = constants.APIServerPort + n.Port = cc.KubernetesConfig.NodePort } // Make sure to decrease the default amount of memory we use per VM if this is the first worker node diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 8b605d2cb490..8bb3d0ae9811 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -131,6 +131,13 @@ func platform() string { func runStart(cmd *cobra.Command, args []string) { register.SetEventLogPath(localpath.EventLog(ClusterFlagValue())) + controlPlanesNum := viper.GetInt(controlPlanes) + nodesNum := viper.GetInt(nodes) + if controlPlanesNum > nodesNum { + out.WarningT(fmt.Sprintf("control planes number %v larger than nodes number %v, enlarge nodes to %v.", controlPlanesNum, nodesNum, controlPlanesNum)) + viper.Set(nodes, controlPlanesNum) + } + out.SetJSON(outputFormat == "json") if err := pkgtrace.Initialize(viper.GetString(trace)); err != nil { exit.Message(reason.Usage, "error initializing tracing: {{.Error}}", out.V{"Error": err.Error()}) @@ -359,6 +366,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. } } + numControlPlanes := viper.GetInt(controlPlanes) numNodes := viper.GetInt(nodes) if existing != nil { if numNodes > 1 { @@ -381,6 +389,10 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. PrimaryControlPlane: false, KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, } + if i < numControlPlanes { + n.ControlPlane = true + n.Port = starter.Cfg.KubernetesConfig.NodePort + } out.Ln("") // extra newline for clarity on the command line err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure)) if err != nil { diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 53ccb287967c..6807f7c46739 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -102,6 +102,7 @@ const ( hostOnlyNicType = "host-only-nic-type" natNicType = "nat-nic-type" nodes = "nodes" + controlPlanes = "control-planes" preload = "preload" deleteOnFailure = "delete-on-failure" forceSystemd = "force-systemd" @@ -150,6 +151,7 @@ func initMinikubeFlags() { startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.") startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.") startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.") + startCmd.Flags().Int(controlPlanes, 1, "The number of control planes to spin up. Defaults to 1.") startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.") startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.") startCmd.Flags().Bool(forceSystemd, false, "If set, force the container runtime to use sytemd as cgroup manager. Defaults to false.") diff --git a/site/content/en/docs/commands/start.md b/site/content/en/docs/commands/start.md index dcbc8a79b123..4fdd96eabb66 100644 --- a/site/content/en/docs/commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -30,6 +30,7 @@ minikube start [flags] --cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true) --cni string CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto) --container-runtime string The container runtime to be used (docker, cri-o, containerd). (default "docker") + --control-planes int The number of control planes to spin up. Defaults to 1. (default 1) --cpus int Number of CPUs allocated to Kubernetes. (default 2) --cri-socket string The cri socket path to be used. --delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false. From fb46e4202f161cd316439670b823c5f9c7ca7f7a Mon Sep 17 00:00:00 2001 From: Ling Samuel Date: Wed, 13 Jan 2021 20:10:29 +0800 Subject: [PATCH 10/10] Add k8s version check Signed-off-by: Ling Samuel --- cmd/minikube/cmd/node_add.go | 5 +++++ cmd/minikube/cmd/start.go | 7 +++++++ pkg/minikube/bootstrapper/kubeadm/kubeadm.go | 2 +- pkg/util/utils.go | 11 +++++++++++ 4 files changed, 24 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index b90a41ddcf58..5ba3ed0ba736 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -29,6 +29,7 @@ import ( "k8s.io/minikube/pkg/minikube/out/register" "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/style" + "k8s.io/minikube/pkg/util" ) var ( @@ -67,6 +68,10 @@ var nodeAddCmd = &cobra.Command{ } if n.ControlPlane { + err := util.CheckMultiControlPlaneVersion(cc.KubernetesConfig.KubernetesVersion) + if err != nil { + exit.Error(reason.KubernetesTooOld, "target kubernetes version too old", err) + } n.Port = cc.KubernetesConfig.NodePort } diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 8bb3d0ae9811..891a293703fe 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -137,6 +137,13 @@ func runStart(cmd *cobra.Command, args []string) { out.WarningT(fmt.Sprintf("control planes number %v larger than nodes number %v, enlarge nodes to %v.", controlPlanesNum, nodesNum, controlPlanesNum)) viper.Set(nodes, controlPlanesNum) } + k8sVersion := viper.GetString(kubernetesVersion) + if controlPlanesNum > 1 { + err := util.CheckMultiControlPlaneVersion(k8sVersion) + if err != nil { + exit.Error(reason.KubernetesTooOld, "target kubernetes version too old", err) + } + } out.SetJSON(outputFormat == "json") if err := pkgtrace.Initialize(viper.GetString(trace)); err != nil { diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 76756ef1701e..4876444acb5e 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -236,7 +236,7 @@ func (k *Bootstrapper) init(cfg config.ClusterConfig) error { ctx, cancel := context.WithTimeout(context.Background(), initTimeoutMinutes*time.Minute) defer cancel() kr, kw := io.Pipe() - c := exec.CommandContext(ctx, "/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s --upload-certs", + c := exec.CommandContext(ctx, "/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ","))) c.Stdout = kw c.Stderr = kw diff --git a/pkg/util/utils.go b/pkg/util/utils.go index f5fe5a3b226a..a64954be48a9 100644 --- a/pkg/util/utils.go +++ b/pkg/util/utils.go @@ -109,3 +109,14 @@ func MaybeChownDirRecursiveToMinikubeUser(dir string) error { func ParseKubernetesVersion(version string) (semver.Version, error) { return semver.Make(version[1:]) } + +func CheckMultiControlPlaneVersion(version string) error { + ver, err := ParseKubernetesVersion(version) + if err != nil { + return err + } + if ver.Minor < 15 { + return errors.Errorf("Multi control plane requires Kubernetes 1.15+, current version %s is not supported.", version) + } + return nil +}