diff --git a/cmd/minikube/cmd/node_add.go b/cmd/minikube/cmd/node_add.go index 622a2d3d018d..5ba3ed0ba736 100644 --- a/cmd/minikube/cmd/node_add.go +++ b/cmd/minikube/cmd/node_add.go @@ -26,8 +26,10 @@ import ( "k8s.io/minikube/pkg/minikube/mustload" "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" + "k8s.io/minikube/pkg/minikube/out/register" "k8s.io/minikube/pkg/minikube/reason" "k8s.io/minikube/pkg/minikube/style" + "k8s.io/minikube/pkg/util" ) var ( @@ -42,6 +44,7 @@ var nodeAddCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { co := mustload.Healthy(ClusterFlagValue()) cc := co.Config + config.TagPrimaryControlPlane(cc) if driver.BareMetal(cc.Driver) { out.FailureT("none driver does not support multi-node clusters") @@ -49,14 +52,27 @@ var nodeAddCmd = &cobra.Command{ name := node.Name(len(cc.Nodes) + 1) - out.Step(style.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + if cp { + out.Step(style.Happy, "Adding control plane node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + } else { + out.Step(style.Happy, "Adding node {{.name}} to cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) + } // TODO: Deal with parameters better. Ideally we should be able to acceot any node-specific minikube start params here. n := config.Node{ - Name: name, - Worker: worker, - ControlPlane: cp, - KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, + Name: name, + Worker: worker, + ControlPlane: cp, + PrimaryControlPlane: false, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, + } + + if n.ControlPlane { + err := util.CheckMultiControlPlaneVersion(cc.KubernetesConfig.KubernetesVersion) + if err != nil { + exit.Error(reason.KubernetesTooOld, "target kubernetes version too old", err) + } + n.Port = cc.KubernetesConfig.NodePort } // Make sure to decrease the default amount of memory we use per VM if this is the first worker node @@ -70,6 +86,7 @@ var nodeAddCmd = &cobra.Command{ } } + register.Reg.SetStep(register.InitialSetup) if err := node.Add(cc, n, false); err != nil { _, err := maybeDeleteAndRetry(cmd, *cc, n, nil, err) if err != nil { diff --git a/cmd/minikube/cmd/node_start.go b/cmd/minikube/cmd/node_start.go index dda95ca8eb75..45b4025b1d64 100644 --- a/cmd/minikube/cmd/node_start.go +++ b/cmd/minikube/cmd/node_start.go @@ -56,7 +56,7 @@ var nodeStartCmd = &cobra.Command{ } register.Reg.SetStep(register.InitialSetup) - r, p, m, h, err := node.Provision(cc, n, n.ControlPlane, viper.GetBool(deleteOnFailure)) + r, p, m, h, err := node.Provision(cc, n, viper.GetBool(deleteOnFailure)) if err != nil { exit.Error(reason.GuestNodeProvision, "provisioning host for node", err) } @@ -71,7 +71,7 @@ var nodeStartCmd = &cobra.Command{ ExistingAddons: nil, } - _, err = node.Start(s, n.ControlPlane) + _, err = node.Start(s) if err != nil { _, err := maybeDeleteAndRetry(cmd, *cc, *n, nil, err) if err != nil { diff --git a/cmd/minikube/cmd/node_stop.go b/cmd/minikube/cmd/node_stop.go index 65c92f71f8af..5d747f2c9ae5 100644 --- a/cmd/minikube/cmd/node_stop.go +++ b/cmd/minikube/cmd/node_stop.go @@ -46,7 +46,7 @@ var nodeStopCmd = &cobra.Command{ } machineName := config.MachineName(*cc, *n) - + node.MustReset(*cc, *n, api, machineName) err = machine.StopHost(api, machineName) if err != nil { out.FatalT("Failed to stop node {{.name}}", out.V{"name": name}) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 13df195611fb..891a293703fe 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -131,6 +131,20 @@ func platform() string { func runStart(cmd *cobra.Command, args []string) { register.SetEventLogPath(localpath.EventLog(ClusterFlagValue())) + controlPlanesNum := viper.GetInt(controlPlanes) + nodesNum := viper.GetInt(nodes) + if controlPlanesNum > nodesNum { + out.WarningT(fmt.Sprintf("control planes number %v larger than nodes number %v, enlarge nodes to %v.", controlPlanesNum, nodesNum, controlPlanesNum)) + viper.Set(nodes, controlPlanesNum) + } + k8sVersion := viper.GetString(kubernetesVersion) + if controlPlanesNum > 1 { + err := util.CheckMultiControlPlaneVersion(k8sVersion) + if err != nil { + exit.Error(reason.KubernetesTooOld, "target kubernetes version too old", err) + } + } + out.SetJSON(outputFormat == "json") if err := pkgtrace.Initialize(viper.GetString(trace)); err != nil { exit.Message(reason.Usage, "error initializing tracing: {{.Error}}", out.V{"Error": err.Error()}) @@ -328,7 +342,7 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * ssh.SetDefaultClient(ssh.External) } - mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, true, viper.GetBool(deleteOnFailure)) + mRunner, preExists, mAPI, host, err := node.Provision(&cc, &n, viper.GetBool(deleteOnFailure)) if err != nil { return node.Starter{}, err } @@ -345,7 +359,13 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * } func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config.ClusterConfig) (*kubeconfig.Settings, error) { - kubeconfig, err := node.Start(starter, true) + // TODO: Currently, we start the primary control plane first. If there are multiple control planes, + // the kube-apiserver will keep crash to wait for other apiserver to respond, which blocks health checks. + // As a temporary solution, we reset the stacked control planes before we stopped it. + // To fix this, we could: + // - Delay the health check. + // - Start all control planes at the same time. + kubeconfig, err := node.Start(starter) if err != nil { kubeconfig, err = maybeDeleteAndRetry(cmd, *starter.Cfg, *starter.Node, starter.ExistingAddons, err) if err != nil { @@ -353,6 +373,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. } } + numControlPlanes := viper.GetInt(controlPlanes) numNodes := viper.GetInt(nodes) if existing != nil { if numNodes > 1 { @@ -369,10 +390,15 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. for i := 1; i < numNodes; i++ { nodeName := node.Name(i + 1) n := config.Node{ - Name: nodeName, - Worker: true, - ControlPlane: false, - KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, + Name: nodeName, + Worker: true, + ControlPlane: false, + PrimaryControlPlane: false, + KubernetesVersion: starter.Cfg.KubernetesConfig.KubernetesVersion, + } + if i < numControlPlanes { + n.ControlPlane = true + n.Port = starter.Cfg.KubernetesConfig.NodePort } out.Ln("") // extra newline for clarity on the command line err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure)) @@ -382,7 +408,7 @@ func startWithDriver(cmd *cobra.Command, starter node.Starter, existing *config. } } else { for _, n := range existing.Nodes { - if !n.ControlPlane { + if !n.PrimaryControlPlane { err := node.Add(starter.Cfg, n, viper.GetBool(deleteOnFailure)) if err != nil { return nil, errors.Wrap(err, "adding node") @@ -489,7 +515,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co cc := updateExistingConfigFromFlags(cmd, &existing) var kubeconfig *kubeconfig.Settings for _, n := range cc.Nodes { - r, p, m, h, err := node.Provision(&cc, &n, n.ControlPlane, false) + r, p, m, h, err := node.Provision(&cc, &n, false) s := node.Starter{ Runner: r, PreExists: p, @@ -504,7 +530,7 @@ func maybeDeleteAndRetry(cmd *cobra.Command, existing config.ClusterConfig, n co return nil, err } - k, err := node.Start(s, n.ControlPlane) + k, err := node.Start(s) if n.ControlPlane { kubeconfig = k } @@ -1190,11 +1216,12 @@ func createNode(cc config.ClusterConfig, kubeNodeName string, existing *config.C } cp := config.Node{ - Port: cc.KubernetesConfig.NodePort, - KubernetesVersion: getKubernetesVersion(&cc), - Name: kubeNodeName, - ControlPlane: true, - Worker: true, + Port: cc.KubernetesConfig.NodePort, + KubernetesVersion: getKubernetesVersion(&cc), + Name: kubeNodeName, + ControlPlane: true, + PrimaryControlPlane: true, + Worker: true, } cc.Nodes = []config.Node{cp} return cc, cp, nil diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index 8cd3fdc6acc0..6807f7c46739 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -102,6 +102,7 @@ const ( hostOnlyNicType = "host-only-nic-type" natNicType = "nat-nic-type" nodes = "nodes" + controlPlanes = "control-planes" preload = "preload" deleteOnFailure = "delete-on-failure" forceSystemd = "force-systemd" @@ -150,6 +151,7 @@ func initMinikubeFlags() { startCmd.Flags().Bool(autoUpdate, true, "If set, automatically updates drivers to the latest version. Defaults to true.") startCmd.Flags().Bool(installAddons, true, "If set, install addons. Defaults to true.") startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.") + startCmd.Flags().Int(controlPlanes, 1, "The number of control planes to spin up. Defaults to 1.") startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.") startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.") startCmd.Flags().Bool(forceSystemd, false, "If set, force the container runtime to use sytemd as cgroup manager. Defaults to false.") @@ -412,6 +414,8 @@ func upgradeExistingConfig(cc *config.ClusterConfig) { cc.KicBaseImage = viper.GetString(kicBaseImage) klog.Infof("config upgrade: KicBaseImage=%s", cc.KicBaseImage) } + + config.TagPrimaryControlPlane(cc) } // updateExistingConfigFromFlags will update the existing config from the flags - used on a second start @@ -645,6 +649,8 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC cc.KicBaseImage = viper.GetString(kicBaseImage) } + config.TagPrimaryControlPlane(&cc) + return cc } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index 43b191973b16..87257f31afed 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -51,10 +51,11 @@ import ( ) var ( - statusFormat string - output string - layout string - watch time.Duration + statusFormat string + output string + layout string + watch time.Duration + updatePrimaryControlPlaneTag bool ) const ( @@ -129,13 +130,18 @@ var ( // Status holds string representations of component states type Status struct { - Name string - Host string - Kubelet string + Name string + Host string + Kubelet string + + // APIServer indicates kube-apiserver status APIServer string Kubeconfig string Worker bool TimeToStop string + // IsAPIEndpoint indicates primary control plane (api endpoint) + IsAPIEndpoint bool + IP string } // ClusterState holds a cluster state representation @@ -177,18 +183,20 @@ const ( clusterNotRunningStatusFlag = 1 << 1 k8sNotRunningStatusFlag = 1 << 2 defaultStatusFormat = `{{.Name}} -type: Control Plane +type: Control Plane{{if .IsAPIEndpoint}} (Primary){{end}} host: {{.Host}} kubelet: {{.Kubelet}} apiserver: {{.APIServer}} kubeconfig: {{.Kubeconfig}} timeToStop: {{.TimeToStop}} +IP: {{.IP}} ` workerStatusFormat = `{{.Name}} type: Worker host: {{.Host}} kubelet: {{.Kubelet}} +IP: {{.IP}} ` ) @@ -211,6 +219,27 @@ var statusCmd = &cobra.Command{ cname := ClusterFlagValue() api, cc := mustload.Partial(cname) + // We should warn user if primary control plane no tagged + tagged := false + for i := range cc.Nodes { + if cc.Nodes[i].PrimaryControlPlane { + tagged = true + break + } + } + if !tagged { + if updatePrimaryControlPlaneTag { + out.Ln("Updating primary control plane tag...") + config.TagPrimaryControlPlane(cc) + err := config.SaveProfile(cc.Name, cc) + if err != nil { + exit.Error(reason.HostSaveProfile, "failed to save config", err) + } + } else { + out.Ln("There is no primary control plane, set --update-primary-control-plane-tag=true to update profile.") + } + } + duration := watch if !cmd.Flags().Changed("watch") || watch < 0 { duration = 0 @@ -302,15 +331,18 @@ func exitCode(statuses []*Status) int { func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status, error) { controlPlane := n.ControlPlane name := config.MachineName(cc, n) + apiEndpoint := n.PrimaryControlPlane st := &Status{ - Name: name, - Host: Nonexistent, - APIServer: Nonexistent, - Kubelet: Nonexistent, - Kubeconfig: Nonexistent, - Worker: !controlPlane, - TimeToStop: Nonexistent, + Name: name, + Host: Nonexistent, + APIServer: Nonexistent, + Kubelet: Nonexistent, + Kubeconfig: Nonexistent, + Worker: !controlPlane, + TimeToStop: Nonexistent, + IsAPIEndpoint: n.PrimaryControlPlane, + IP: n.IP, } hs, err := machine.Status(api, name) @@ -343,9 +375,11 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St st.Kubeconfig = Configured if !controlPlane { - st.Kubeconfig = Irrelevant st.APIServer = Irrelevant } + if !apiEndpoint { + st.Kubeconfig = Irrelevant + } host, err := machine.LoadHost(api, name) if err != nil { @@ -380,25 +414,29 @@ func nodeStatus(api libmachine.API, cc config.ClusterConfig, n config.Node) (*St } hostname, _, port, err := driver.ControlPlaneEndpoint(&cc, &n, host.DriverName) - if err != nil { - klog.Errorf("forwarded endpoint: %v", err) - st.Kubeconfig = Misconfigured - } else { - err := kubeconfig.VerifyEndpoint(cc.Name, hostname, port) + if st.Kubeconfig != Irrelevant { if err != nil { - klog.Errorf("kubeconfig endpoint: %v", err) + klog.Errorf("forwarded endpoint: %v", err) st.Kubeconfig = Misconfigured + } else { + err := kubeconfig.VerifyEndpoint(cc.Name, hostname, port) + if err != nil { + klog.Errorf("kubeconfig endpoint: %v", err) + st.Kubeconfig = Misconfigured + } } } - sta, err := kverify.APIServerStatus(cr, hostname, port) - klog.Infof("%s apiserver status = %s (err=%v)", name, stk, err) + if st.APIServer != Irrelevant { + sta, err := kverify.APIServerStatus(cr, hostname, port) + klog.Infof("%s apiserver status = %s (err=%v)", name, stk, err) - if err != nil { - klog.Errorln("Error apiserver status:", err) - st.APIServer = state.Error.String() - } else { - st.APIServer = sta.String() + if err != nil { + klog.Errorln("Error apiserver status:", err) + st.APIServer = state.Error.String() + } else { + st.APIServer = sta.String() + } } return st, nil @@ -415,6 +453,7 @@ For the list accessible variables for the template, see the struct values here: statusCmd.Flags().StringVarP(&nodeName, "node", "n", "", "The node to check status for. Defaults to control plane. Leave blank with default format for status on all nodes.") statusCmd.Flags().DurationVarP(&watch, "watch", "w", 1*time.Second, "Continuously listing/getting the status with optional interval duration.") statusCmd.Flags().Lookup("watch").NoOptDefVal = "1s" + statusCmd.Flags().BoolVar(&updatePrimaryControlPlaneTag, "update-primary-control-plane-tag", false, "Update primary control plane tag if there is no control plane marked as API endpoint.") } func statusText(st *Status, w io.Writer) error { diff --git a/cmd/minikube/cmd/status_test.go b/cmd/minikube/cmd/status_test.go index aa9f905c22af..f5a663e1d4b3 100644 --- a/cmd/minikube/cmd/status_test.go +++ b/cmd/minikube/cmd/status_test.go @@ -51,18 +51,18 @@ func TestStatusText(t *testing.T) { }{ { name: "ok", - state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured, TimeToStop: "10m"}, - want: "minikube\ntype: Control Plane\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\ntimeToStop: 10m\n\n", + state: &Status{Name: "minikube", Host: "Running", Kubelet: "Running", APIServer: "Running", Kubeconfig: Configured, TimeToStop: "10m", IP: "192.168.39.10"}, + want: "minikube\ntype: Control Plane\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\ntimeToStop: 10m\nIP: 192.168.39.10\n\n", }, { name: "paused", - state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured, TimeToStop: Nonexistent}, - want: "minikube\ntype: Control Plane\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\ntimeToStop: Nonexistent\n\n", + state: &Status{Name: "minikube", Host: "Running", Kubelet: "Stopped", APIServer: "Paused", Kubeconfig: Configured, TimeToStop: Nonexistent, IP: "192.168.39.10"}, + want: "minikube\ntype: Control Plane\nhost: Running\nkubelet: Stopped\napiserver: Paused\nkubeconfig: Configured\ntimeToStop: Nonexistent\nIP: 192.168.39.10\n\n", }, { name: "down", - state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured, TimeToStop: Nonexistent}, - want: "minikube\ntype: Control Plane\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\ntimeToStop: Nonexistent\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", + state: &Status{Name: "minikube", Host: "Stopped", Kubelet: "Stopped", APIServer: "Stopped", Kubeconfig: Misconfigured, TimeToStop: Nonexistent, IP: "192.168.39.10"}, + want: "minikube\ntype: Control Plane\nhost: Stopped\nkubelet: Stopped\napiserver: Stopped\nkubeconfig: Misconfigured\ntimeToStop: Nonexistent\nIP: 192.168.39.10\n\n\nWARNING: Your kubectl is pointing to stale minikube-vm.\nTo fix the kubectl context, run `minikube update-context`\n", }, } for _, tc := range tests { @@ -75,7 +75,7 @@ func TestStatusText(t *testing.T) { got := b.String() if got != tc.want { - t.Errorf("text(%+v) = %q, want: %q", tc.state, got, tc.want) + t.Errorf("text(%+v)\n got: %q\nwant: %q", tc.state, got, tc.want) } }) } diff --git a/cmd/minikube/cmd/stop.go b/cmd/minikube/cmd/stop.go index bc2e2d6b51f4..1a1c1e5cdc91 100644 --- a/cmd/minikube/cmd/stop.go +++ b/cmd/minikube/cmd/stop.go @@ -33,6 +33,7 @@ import ( "k8s.io/minikube/pkg/minikube/localpath" "k8s.io/minikube/pkg/minikube/machine" "k8s.io/minikube/pkg/minikube/mustload" + "k8s.io/minikube/pkg/minikube/node" "k8s.io/minikube/pkg/minikube/out" "k8s.io/minikube/pkg/minikube/out/register" "k8s.io/minikube/pkg/minikube/reason" @@ -134,16 +135,36 @@ func stopProfile(profile string) int { // end new code api, cc := mustload.Partial(profile) + + // ensure tag primary control plane properly + config.TagPrimaryControlPlane(cc) + defer api.Close() + primaryMachineName := "" for _, n := range cc.Nodes { machineName := config.MachineName(*cc, n) + if n.PrimaryControlPlane { + // Skip because we need to update etcd members + primaryMachineName = machineName + continue + } else if n.ControlPlane { + // Remove from primary control plane + node.MustReset(*cc, n, api, machineName) + } + nonexistent := stop(api, machineName) if !nonexistent { stoppedNodes++ } } + if primaryMachineName != "" { + nonexistent := stop(api, primaryMachineName) + if !nonexistent { + stoppedNodes++ + } + } if err := killMountProcess(); err != nil { out.WarningT("Unable to kill mount process: {{.error}}", out.V{"error": err}) diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index 4d18749ccd23..bc65f243433d 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -41,7 +41,7 @@ type Bootstrapper interface { WaitForNode(config.ClusterConfig, config.Node, time.Duration) error JoinCluster(config.ClusterConfig, config.Node, string) error UpdateNode(config.ClusterConfig, config.Node, cruntime.Manager) error - GenerateToken(config.ClusterConfig) (string, error) + GenerateToken(config.ClusterConfig, bool) (string, error) // LogCommands returns a map of log type to a command which will display that log. LogCommands(config.ClusterConfig, LogOptions) map[string]string SetupCerts(config.KubernetesConfig, config.Node) error diff --git a/pkg/minikube/bootstrapper/bsutil/extraconfig.go b/pkg/minikube/bootstrapper/bsutil/extraconfig.go index 7e8c04944afe..513621d87561 100644 --- a/pkg/minikube/bootstrapper/bsutil/extraconfig.go +++ b/pkg/minikube/bootstrapper/bsutil/extraconfig.go @@ -70,7 +70,7 @@ var KubeadmExtraArgsAllowed = map[int][]string{ "kubeconfig-dir", "node-name", "cri-socket", - "experimental-upload-certs", + "upload-certs", "certificate-key", "rootfs", "skip-phases", diff --git a/pkg/minikube/bootstrapper/bsutil/kubeadm.go b/pkg/minikube/bootstrapper/bsutil/kubeadm.go index 04ce5dd7669f..1a58ee1af267 100644 --- a/pkg/minikube/bootstrapper/bsutil/kubeadm.go +++ b/pkg/minikube/bootstrapper/bsutil/kubeadm.go @@ -128,7 +128,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana CgroupDriver: cgroupDriver, ClientCAFile: path.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), StaticPodPath: vmpath.GuestManifestsDir, - ControlPlaneAddress: constants.ControlPlaneAlias, + ControlPlaneAddress: constants.APIEndpointAlias, KubeProxyOptions: createKubeProxyOptions(k8s.ExtraOptions), } diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 90fd9870faec..30713038f835 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -93,9 +93,13 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) copyableFiles = append(copyableFiles, certFile) } + endpoint := net.JoinHostPort(constants.APIEndpointAlias, fmt.Sprint(k8s.NodePort)) + if n.PrimaryControlPlane { + endpoint = net.JoinHostPort("localhost", fmt.Sprint(n.Port)) + } kcs := &kubeconfig.Settings{ ClusterName: n.Name, - ClusterServerAddress: fmt.Sprintf("https://%s", net.JoinHostPort("localhost", fmt.Sprint(n.Port))), + ClusterServerAddress: fmt.Sprintf("https://%s", endpoint), ClientCertificate: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.crt"), ClientKey: path.Join(vmpath.GuestKubernetesCertsDir, "apiserver.key"), CertificateAuthority: path.Join(vmpath.GuestKubernetesCertsDir, "ca.crt"), @@ -111,7 +115,6 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) if err != nil { return nil, errors.Wrap(err, "encoding kubeconfig") } - if n.ControlPlane { kubeCfgFile := assets.NewMemoryAsset(data, vmpath.GuestPersistentDir, "kubeconfig", "0644") copyableFiles = append(copyableFiles, kubeCfgFile) @@ -183,7 +186,7 @@ func generateSharedCACerts() (CACerts, error) { func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACerts) ([]string, error) { // Only generate these certs for the api server - if !n.ControlPlane { + if !n.PrimaryControlPlane { return []string{}, nil } @@ -201,7 +204,7 @@ func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACert apiServerIPs = append(apiServerIPs, net.ParseIP(v)) } - apiServerNames := append(k8s.APIServerNames, k8s.APIServerName, constants.ControlPlaneAlias) + apiServerNames := append(k8s.APIServerNames, k8s.APIServerName, constants.APIEndpointAlias) apiServerAlternateNames := append( apiServerNames, util.GetAlternateDNS(k8s.DNSDomain)...) diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 4b0fc2e10846..4876444acb5e 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -72,6 +72,8 @@ type Bootstrapper struct { contextName string } +var _ bootstrapper.Bootstrapper = (*Bootstrapper)(nil) + // NewBootstrapper creates a new kubeadm.Bootstrapper func NewBootstrapper(api libmachine.API, cc config.ClusterConfig, r command.Runner) (*Bootstrapper, error) { return &Bootstrapper{c: r, contextName: cc.Name, k8sClient: nil}, nil @@ -156,7 +158,7 @@ func (k *Bootstrapper) clearStaleConfigs(cfg config.ClusterConfig) error { return err } - endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(constants.ControlPlaneAlias, strconv.Itoa(cp.Port))) + endpoint := fmt.Sprintf("https://%s", net.JoinHostPort(constants.APIEndpointAlias, strconv.Itoa(cp.Port))) for _, path := range paths { _, err := k.c.RunCmd(exec.Command("sudo", "grep", endpoint, path)) if err != nil { @@ -445,9 +447,15 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time klog.Warningf("Couldn't ensure kubelet is started this might cause issues: %v", err) } // TODO: #7706: for better performance we could use k.client inside minikube to avoid asking for external IP:PORT - cp, err := config.PrimaryControlPlane(&cfg) - if err != nil { - return errors.Wrap(err, "get primary control plane") + var cp config.Node + var err error + if n.ControlPlane { + cp = n + } else { + cp, err = config.PrimaryControlPlane(&cfg) + if err != nil { + return errors.Wrap(err, "get primary control plane") + } } hostname, _, port, err := driver.ControlPlaneEndpoint(&cfg, &cp, cfg.Driver) if err != nil { @@ -710,6 +718,11 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC // Join the master by specifying its token joinCmd = fmt.Sprintf("%s --node-name=%s", joinCmd, config.MachineName(cc, n)) + if n.ControlPlane { + // Specify advertise address here because we are using interface eth1 and port 8443 (by default) + // We can't use `--config bsutil.KubeadmYamlPath` here because cannot mix '--config' with [certificate-key control-plane discovery-token-ca-cert-hash token] + joinCmd = fmt.Sprintf("%s --control-plane --apiserver-advertise-address %s --apiserver-bind-port %v", joinCmd, n.IP, n.Port) + } join := func() error { // reset first to clear any possibly existing state @@ -740,17 +753,57 @@ func (k *Bootstrapper) JoinCluster(cc config.ClusterConfig, n config.Node, joinC } // GenerateToken creates a token and returns the appropriate kubeadm join command to run, or the already existing token -func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) { +func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig, genCertKey bool) (string, error) { + ka := bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion) + // Take that generated token and use it to get a kubeadm join command - tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion))) + tokenCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("%s token create --print-join-command --ttl=0", ka)) r, err := k.c.RunCmd(tokenCmd) if err != nil { return "", errors.Wrap(err, "generating join command") } joinCmd := r.Stdout.String() - joinCmd = strings.Replace(joinCmd, "kubeadm", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion), 1) + joinCmd = strings.Replace(joinCmd, "kubeadm", ka, 1) joinCmd = fmt.Sprintf("%s --ignore-preflight-errors=all", strings.TrimSpace(joinCmd)) + if genCertKey { + // Generate config first because init phase upload-certs cannot specify --cert-dir + confPath := path.Join(vmpath.GuestPersistentDir, "kubeadm-conf.yaml") + // TODO kubeadm config view is deprecated + conf, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s config view", ka))) + if err != nil { + return "", errors.Wrap(err, "generate kubeadm-conf") + } + + confAsset := assets.NewMemoryAssetTarget(conf.Stdout.Bytes(), confPath, "0644") + err = bsutil.CopyFiles(k.c, []assets.CopyableFile{ + confAsset, + }) + if err != nil { + return "", errors.Wrap(err, "write kubeadm-conf") + } + + certCmd := fmt.Sprintf("%s init phase upload-certs --upload-certs --config %s", ka, confPath) + certKeyCmd := exec.Command("/bin/bash", "-c", certCmd) + certKeyResult, err := k.c.RunCmd(certKeyCmd) + if err != nil { + return "", errors.Wrap(err, "generating join command") + } + // Currently we have to parse stdout manually to get certificate key + outputs := strings.Split(certKeyResult.Stdout.String(), "\n") + + certKey := "" + for i, s := range outputs { + if strings.Contains(s, "Using certificate key") { + certKey = outputs[i+1] + break + } + } + if certKey == "" { + return "", errors.Wrap(err, "parse certificate-key") + } + joinCmd = fmt.Sprintf("%s --certificate-key %s", joinCmd, certKey) + } if cc.KubernetesConfig.CRISocket != "" { joinCmd = fmt.Sprintf("%s --cri-socket %s", joinCmd, cc.KubernetesConfig.CRISocket) } @@ -898,7 +951,7 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru return errors.Wrap(err, "control plane") } - if err := machine.AddHostAlias(k.c, constants.ControlPlaneAlias, net.ParseIP(cp.IP)); err != nil { + if err := machine.AddHostAlias(k.c, constants.APIEndpointAlias, net.ParseIP(cp.IP)); err != nil { return errors.Wrap(err, "host alias") } diff --git a/pkg/minikube/cni/kindnet.go b/pkg/minikube/cni/kindnet.go index cf7a18301900..1ba80cc46752 100644 --- a/pkg/minikube/cni/kindnet.go +++ b/pkg/minikube/cni/kindnet.go @@ -149,7 +149,7 @@ type KindNet struct { // String returns a string representation of this CNI func (c KindNet) String() string { - return "CNI" + return "KindNet" } // manifest returns a Kubernetes manifest for a CNI diff --git a/pkg/minikube/config/profile.go b/pkg/minikube/config/profile.go index a632f0485958..bdc0c7cdfa84 100644 --- a/pkg/minikube/config/profile.go +++ b/pkg/minikube/config/profile.go @@ -53,19 +53,25 @@ func (p *Profile) IsValid() bool { // PrimaryControlPlane gets the node specific config for the first created control plane func PrimaryControlPlane(cc *ClusterConfig) (Node, error) { for _, n := range cc.Nodes { - if n.ControlPlane { + if n.PrimaryControlPlane { + return n, nil + } + } + for _, n := range cc.Nodes { + if n.ControlPlane { // keep n.ControlPlane for backward compatibility return n, nil } } // This config is probably from 1.6 or earlier, let's convert it. cp := Node{ - Name: cc.KubernetesConfig.NodeName, - IP: cc.KubernetesConfig.NodeIP, - Port: cc.KubernetesConfig.NodePort, - KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, - ControlPlane: true, - Worker: true, + Name: cc.KubernetesConfig.NodeName, + IP: cc.KubernetesConfig.NodeIP, + Port: cc.KubernetesConfig.NodePort, + KubernetesVersion: cc.KubernetesConfig.KubernetesVersion, + ControlPlane: true, + PrimaryControlPlane: true, + Worker: true, } cc.Nodes = []Node{cp} @@ -138,6 +144,20 @@ func SaveNode(cfg *ClusterConfig, node *Node) error { return SaveProfile(viper.GetString(ProfileName), cfg) } +func TagPrimaryControlPlane(cc *ClusterConfig) { + for i := range cc.Nodes { + if cc.Nodes[i].PrimaryControlPlane { + return + } + } + for i := range cc.Nodes { + if cc.Nodes[i].ControlPlane && cc.Nodes[i].Name == "" { + cc.Nodes[i].PrimaryControlPlane = true + break + } + } +} + // SaveProfile creates an profile out of the cfg and stores in $MINIKUBE_HOME/profiles//config.json func SaveProfile(name string, cfg *ClusterConfig, miniHome ...string) error { data, err := json.MarshalIndent(cfg, "", " ") @@ -291,7 +311,7 @@ func ProfileFolderPath(profile string, miniHome ...string) string { // MachineName returns the name of the machine, as seen by the hypervisor given the cluster and node names func MachineName(cc ClusterConfig, n Node) string { // For single node cluster, default to back to old naming - if len(cc.Nodes) == 1 || n.ControlPlane { + if len(cc.Nodes) == 1 || n.PrimaryControlPlane || (n.ControlPlane && n.Name == "") { return cc.Name } return fmt.Sprintf("%s-%s", cc.Name, n.Name) diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go index 00cc1b54d4d3..7bacd8ca6b9b 100644 --- a/pkg/minikube/config/types.go +++ b/pkg/minikube/config/types.go @@ -110,12 +110,13 @@ type KubernetesConfig struct { // Node contains information about specific nodes in a cluster type Node struct { - Name string - IP string - Port int - KubernetesVersion string - ControlPlane bool - Worker bool + Name string + IP string + Port int + KubernetesVersion string + PrimaryControlPlane bool + ControlPlane bool + Worker bool } // VersionedExtraOption holds information on flags to apply to a specific range diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index c14a97443f17..ae82f2cc7354 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -54,8 +54,8 @@ const ( DefaultServiceCIDR = "10.96.0.0/12" // HostAlias is a DNS alias to the the container/VM host IP HostAlias = "host.minikube.internal" - // ControlPlaneAlias is a DNS alias pointing to the apiserver frontend - ControlPlaneAlias = "control-plane.minikube.internal" + // APIEndpointAlias is a DNS alias pointing to the apiserver frontend + APIEndpointAlias = "control-plane.minikube.internal" // DockerHostEnv is used for docker daemon settings DockerHostEnv = "DOCKER_HOST" diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 672e52276d54..45d92d05010f 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -20,14 +20,17 @@ import ( "fmt" "os/exec" + "github.com/docker/machine/libmachine" "github.com/pkg/errors" "github.com/spf13/viper" - "k8s.io/klog/v2" "k8s.io/minikube/pkg/kapi" + "k8s.io/minikube/pkg/minikube/bootstrapper/bsutil" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/driver" + "k8s.io/minikube/pkg/minikube/exit" "k8s.io/minikube/pkg/minikube/machine" + "k8s.io/minikube/pkg/minikube/reason" ) // TODO: Share these between cluster and node packages @@ -42,7 +45,7 @@ func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error { return errors.Wrap(err, "save node") } - r, p, m, h, err := Provision(cc, &n, false, delOnFail) + r, p, m, h, err := Provision(cc, &n, delOnFail) if err != nil { return err } @@ -56,7 +59,7 @@ func Add(cc *config.ClusterConfig, n config.Node, delOnFail bool) error { ExistingAddons: nil, } - _, err = Start(s, false) + _, err = Start(s) return err } @@ -84,6 +87,26 @@ func Delete(cc config.ClusterConfig, name string) (*config.Node, error) { return n, err } + // leave master + if n.ControlPlane { + host, err := machine.LoadHost(api, m) + if err != nil { + return n, err + } + + runner, err := machine.CommandRunner(host) + if err != nil { + return n, err + } + + resetCmd := fmt.Sprintf("%s reset -f", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion)) + rc := exec.Command("/bin/bash", "-c", resetCmd) + _, err = runner.RunCmd(rc) + if err != nil { + klog.Errorf("Failed to reset kubeadm", err) + } + } + // kubectl drain kubectl := kapi.KubectlBinaryPath(cc.KubernetesConfig.KubernetesVersion) cmd := exec.Command("sudo", "KUBECONFIG=/var/lib/minikube/kubeconfig", kubectl, "drain", m) @@ -155,3 +178,24 @@ func Save(cfg *config.ClusterConfig, node *config.Node) error { func Name(index int) string { return fmt.Sprintf("m%02d", index) } + +// MustReset reset a stacked control plane to avoid blocking the start of primary control plane +// Exit if failed +func MustReset(cc config.ClusterConfig, n config.Node, api libmachine.API, machineName string) { + if n.ControlPlane && !n.PrimaryControlPlane { + host, err := machine.LoadHost(api, machineName) + if err != nil { + exit.Error(reason.GuestLoadHost, "Error getting host", err) + } + runner, err := machine.CommandRunner(host) + if err != nil { + exit.Error(reason.InternalCommandRunner, "Failed to get command runner", err) + } + resetCmd := fmt.Sprintf("%s reset -f", bsutil.InvokeKubeadm(cc.KubernetesConfig.KubernetesVersion)) + rc := exec.Command("/bin/bash", "-c", resetCmd) + _, err = runner.RunCmd(rc) + if err != nil { + exit.Error(reason.GuestNodeReset, "Failed to reset kubeadm", err) + } + } +} diff --git a/pkg/minikube/node/start.go b/pkg/minikube/node/start.go index d1081fd7bb70..e2ca668b4af2 100644 --- a/pkg/minikube/node/start.go +++ b/pkg/minikube/node/start.go @@ -80,7 +80,8 @@ type Starter struct { } // Start spins up a guest and starts the Kubernetes node. -func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { +func Start(starter Starter) (*kubeconfig.Settings, error) { + apiEndpointServer := starter.Node.PrimaryControlPlane // wait for preloaded tarball to finish downloading before configuring runtimes waitCacheRequiredImages(&cacheGroup) @@ -103,7 +104,8 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { var bs bootstrapper.Bootstrapper var kcs *kubeconfig.Settings - if apiServer { + if apiEndpointServer { + out.Step(style.Tip, "Preparing control plane node...") // Must be written before bootstrap, otherwise health checks may flake due to stale IP kcs = setupKubeconfig(starter.Host, starter.Cfg, starter.Node, starter.Cfg.Name) if err != nil { @@ -124,6 +126,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "Failed kubeconfig update") } } else { + out.Step(style.Tip, "Preparing worker node...") bs, err = cluster.Bootstrapper(starter.MachineAPI, viper.GetString(cmdcfg.Bootstrapper), *starter.Cfg, starter.Runner) if err != nil { return nil, errors.Wrap(err, "Failed to get bootstrapper") @@ -163,24 +166,26 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { wg.Done() }() - if apiServer { - // special ops for none , like change minikube directory. + if apiEndpointServer { + // special ops for none, like change minikube directory. // multinode super doesn't work on the none driver if starter.Cfg.Driver == driver.None && len(starter.Cfg.Nodes) == 1 { prepareNone() } } else { // Make sure to use the command runner for the control plane to generate the join token + out.Step(style.Tip, "Preparing kubeadm...") cpBs, cpr, err := cluster.ControlPlaneBootstrapper(starter.MachineAPI, starter.Cfg, viper.GetString(cmdcfg.Bootstrapper)) if err != nil { return nil, errors.Wrap(err, "getting control plane bootstrapper") } - joinCmd, err := cpBs.GenerateToken(*starter.Cfg) + joinCmd, err := cpBs.GenerateToken(*starter.Cfg, starter.Node.ControlPlane) if err != nil { return nil, errors.Wrap(err, "generating join token") } + out.Step(style.Tip, "Joining cluster...") if err = bs.JoinCluster(*starter.Cfg, *starter.Node, joinCmd); err != nil { return nil, errors.Wrap(err, "joining cluster") } @@ -190,6 +195,7 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { return nil, errors.Wrap(err, "cni") } + out.Step(style.Tip, fmt.Sprintf("Applying CNI %s...", cnm.String())) if err := cnm.Apply(cpr); err != nil { return nil, errors.Wrap(err, "cni apply") } @@ -208,10 +214,10 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) { } // Provision provisions the machine/container for the node -func Provision(cc *config.ClusterConfig, n *config.Node, apiServer bool, delOnFail bool) (command.Runner, bool, libmachine.API, *host.Host, error) { +func Provision(cc *config.ClusterConfig, n *config.Node, delOnFail bool) (command.Runner, bool, libmachine.API, *host.Host, error) { register.Reg.SetStep(register.StartingNode) name := config.MachineName(*cc, *n) - if apiServer { + if n.ControlPlane { out.Step(style.ThumbsUp, "Starting control plane node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) } else { out.Step(style.ThumbsUp, "Starting node {{.name}} in cluster {{.cluster}}", out.V{"name": name, "cluster": cc.Name}) diff --git a/pkg/minikube/reason/reason.go b/pkg/minikube/reason/reason.go index ee406919d95b..92c75720e0ae 100644 --- a/pkg/minikube/reason/reason.go +++ b/pkg/minikube/reason/reason.go @@ -242,6 +242,7 @@ var ( GuestMount = Kind{ID: "GUEST_MOUNT", ExitCode: ExGuestError} GuestMountConflict = Kind{ID: "GUEST_MOUNT_CONFLICT", ExitCode: ExGuestConflict} GuestNodeAdd = Kind{ID: "GUEST_NODE_ADD", ExitCode: ExGuestError} + GuestNodeReset = Kind{ID: "GUEST_NODE_RESET", ExitCode: ExGuestError} GuestNodeDelete = Kind{ID: "GUEST_NODE_DELETE", ExitCode: ExGuestError} GuestNodeProvision = Kind{ID: "GUEST_NODE_PROVISION", ExitCode: ExGuestError} GuestNodeRetrieve = Kind{ID: "GUEST_NODE_RETRIEVE", ExitCode: ExGuestNotFound} diff --git a/pkg/util/utils.go b/pkg/util/utils.go index f5fe5a3b226a..a64954be48a9 100644 --- a/pkg/util/utils.go +++ b/pkg/util/utils.go @@ -109,3 +109,14 @@ func MaybeChownDirRecursiveToMinikubeUser(dir string) error { func ParseKubernetesVersion(version string) (semver.Version, error) { return semver.Make(version[1:]) } + +func CheckMultiControlPlaneVersion(version string) error { + ver, err := ParseKubernetesVersion(version) + if err != nil { + return err + } + if ver.Minor < 15 { + return errors.Errorf("Multi control plane requires Kubernetes 1.15+, current version %s is not supported.", version) + } + return nil +} diff --git a/site/content/en/docs/commands/start.md b/site/content/en/docs/commands/start.md index aac90df39d8b..4fdd96eabb66 100644 --- a/site/content/en/docs/commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -30,6 +30,7 @@ minikube start [flags] --cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true) --cni string CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto) --container-runtime string The container runtime to be used (docker, cri-o, containerd). (default "docker") + --control-planes int The number of control planes to spin up. Defaults to 1. (default 1) --cpus int Number of CPUs allocated to Kubernetes. (default 2) --cri-socket string The cri socket path to be used. --delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false. @@ -47,7 +48,7 @@ minikube start [flags] --extra-config ExtraOption A set of key=value pairs that describe configuration that may be passed to different components. The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler - Valid kubeadm parameters: ignore-preflight-errors, dry-run, kubeconfig, kubeconfig-dir, node-name, cri-socket, experimental-upload-certs, certificate-key, rootfs, skip-phases, pod-network-cidr + Valid kubeadm parameters: ignore-preflight-errors, dry-run, kubeconfig, kubeconfig-dir, node-name, cri-socket, upload-certs, certificate-key, rootfs, skip-phases, pod-network-cidr --feature-gates string A set of key=value pairs that describe feature gates for alpha/experimental features. --force Force minikube to perform possibly dangerous operations --force-systemd If set, force the container runtime to use sytemd as cgroup manager. Defaults to false. diff --git a/site/content/en/docs/commands/status.md b/site/content/en/docs/commands/status.md index e5c3c89e7dd5..824777edafc7 100644 --- a/site/content/en/docs/commands/status.md +++ b/site/content/en/docs/commands/status.md @@ -22,12 +22,13 @@ minikube status [flags] ### Options ``` - -f, --format string Go template format string for the status output. The format for Go templates can be found here: https://golang.org/pkg/text/template/ - For the list accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#Status (default "{{.Name}}\ntype: Control Plane\nhost: {{.Host}}\nkubelet: {{.Kubelet}}\napiserver: {{.APIServer}}\nkubeconfig: {{.Kubeconfig}}\ntimeToStop: {{.TimeToStop}}\n\n") - -l, --layout string output layout (EXPERIMENTAL, JSON only): 'nodes' or 'cluster' (default "nodes") - -n, --node string The node to check status for. Defaults to control plane. Leave blank with default format for status on all nodes. - -o, --output string minikube status --output OUTPUT. json, text (default "text") - -w, --watch duration[=1s] Continuously listing/getting the status with optional interval duration. (default 1s) + -f, --format string Go template format string for the status output. The format for Go templates can be found here: https://golang.org/pkg/text/template/ + For the list accessible variables for the template, see the struct values here: https://godoc.org/k8s.io/minikube/cmd/minikube/cmd#Status (default "{{.Name}}\ntype: Control Plane{{if .IsAPIEndpoint}} (Primary){{end}}\nhost: {{.Host}}\nkubelet: {{.Kubelet}}\napiserver: {{.APIServer}}\nkubeconfig: {{.Kubeconfig}}\ntimeToStop: {{.TimeToStop}}\nIP: {{.IP}}\n\n") + -l, --layout string output layout (EXPERIMENTAL, JSON only): 'nodes' or 'cluster' (default "nodes") + -n, --node string The node to check status for. Defaults to control plane. Leave blank with default format for status on all nodes. + -o, --output string minikube status --output OUTPUT. json, text (default "text") + --update-primary-control-plane-tag Update primary control plane tag if there is no control plane marked as API endpoint. + -w, --watch duration[=1s] Continuously listing/getting the status with optional interval duration. (default 1s) ``` ### Options inherited from parent commands diff --git a/test/integration/multinode_test.go b/test/integration/multinode_test.go index bad0bf781789..b17f1194e6d0 100644 --- a/test/integration/multinode_test.go +++ b/test/integration/multinode_test.go @@ -1,5 +1,3 @@ -// +build integration - /* Copyright 2020 The Kubernetes Authors All rights reserved. @@ -26,14 +24,24 @@ import ( "testing" ) +type clusterStatus struct { + running bool + totalNodes int + wantRunningNodes int + wantStoppedNodes int + + isAzure bool +} + +type validatorFunc func(context.Context, *testing.T, string, *clusterStatus) + func TestMultiNode(t *testing.T) { if NoneDriver() { t.Skip("none driver does not support multinode") } - type validatorFunc func(context.Context, *testing.T, string) profile := UniqueProfileName("multinode") - ctx, cancel := context.WithTimeout(context.Background(), Minutes(30)) + ctx, cancel := context.WithTimeout(context.Background(), Minutes(45)) defer CleanupWithLogs(t, profile, cancel) t.Run("serial", func(t *testing.T) { @@ -42,13 +50,33 @@ func TestMultiNode(t *testing.T) { validator validatorFunc }{ {"FreshStart2Nodes", validateMultiNodeStart}, + // Add worker node {"AddNode", validateAddNodeToMultiNode}, - {"StopNode", validateStopRunningNode}, - {"StartAfterStop", validateStartNodeAfterStop}, - {"DeleteNode", validateDeleteNodeFromMultiNode}, + {"StopNode", validateStopRunningNode(ThirdNodeName)}, + {"StartAfterStop", validateStartNodeAfterStop(ThirdNodeName)}, + {"DeleteNode", validateDeleteNodeFromMultiNode(ThirdNodeName, true)}, + // Add control plane node + {"AddControlPlaneNode", validateAddControlPlaneNodeToMultiNode}, + {"StopControlPlaneNode", validateStopRunningNode(ThirdNodeName)}, + {"StartControlPlaneAfterStop", validateStartNodeAfterStop(ThirdNodeName)}, + {"DeleteControlPlaneNode", validateDeleteNodeFromMultiNode(ThirdNodeName, true)}, + // Test cluster stop && start {"StopMultiNode", validateStopMultiNodeCluster}, {"RestartMultiNode", validateRestartMultiNodeCluster}, } + + s := &clusterStatus{} + + if DockerDriver() { + rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) + if err != nil { + t.Fatalf("docker is broken: %v", err) + } + if strings.Contains(rr.Stdout.String(), "azure") { + s.isAzure = true + } + } + for _, tc := range tests { tc := tc if ctx.Err() == context.DeadlineExceeded { @@ -56,13 +84,13 @@ func TestMultiNode(t *testing.T) { } t.Run(tc.name, func(t *testing.T) { defer PostMortemLogs(t, profile) - tc.validator(ctx, t, profile) + tc.validator(ctx, t, profile, s) }) } }) } -func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) { +func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { // Start a 2 node cluster with the --nodes param startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=2200", "--nodes=2", "-v=8", "--alsologtostderr"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) @@ -70,156 +98,82 @@ func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) { t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err) } - // Make sure minikube status shows 2 nodes - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) - if err != nil { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - if strings.Count(rr.Stdout.String(), "host: Running") != 2 { - t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } - - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 { - t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } - + s.startCluster() + s.addNode(2) + validateClusterStatus(ctx, t, profile, s) } -func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile string) { +func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { // Add a node to the current cluster addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr"} rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...)) if err != nil { t.Fatalf("failed to add node to current cluster. args %q : %v", rr.Command(), err) } - - // Make sure minikube status shows 3 nodes - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) - if err != nil { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - if strings.Count(rr.Stdout.String(), "host: Running") != 3 { - t.Errorf("status says all hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } - - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { - t.Errorf("status says all kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } + s.addNode(1) + validateClusterStatus(ctx, t, profile, s) } -func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) { - // Run minikube node stop on that node - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", ThirdNodeName)) +func validateAddControlPlaneNodeToMultiNode(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { + // Add a node to the current cluster + addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr", "--control-plane"} + rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...)) if err != nil { - t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) - } - - // Run status again to see the stopped host - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) - // Exit code 7 means one host is stopped, which we are expecting - if err != nil && rr.ExitCode != 7 { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + t.Fatalf("failed to add control plane node to current cluster. args %q : %v", rr.Command(), err) } - // Make sure minikube status shows 2 running nodes and 1 stopped one - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) - if err != nil && rr.ExitCode != 7 { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 { - t.Errorf("incorrect number of running kubelets: args %q: %v", rr.Command(), rr.Stdout.String()) - } - - if strings.Count(rr.Stdout.String(), "host: Stopped") != 1 { - t.Errorf("incorrect number of stopped hosts: args %q: %v", rr.Command(), rr.Stdout.String()) - } - - if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 1 { - t.Errorf("incorrect number of stopped kubelets: args %q: %v", rr.Command(), rr.Stdout.String()) - } + s.addNode(1) + validateClusterStatus(ctx, t, profile, s) } -func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile string) { - if DockerDriver() { - rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) +func validateStopRunningNode(nodeName string) validatorFunc { + return func(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { + // Run minikube node stop on that node + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", nodeName)) if err != nil { - t.Fatalf("docker is broken: %v", err) + t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) } - if strings.Contains(rr.Stdout.String(), "azure") { - t.Skip("kic containers are not supported on docker's azure") - } - } - // Start the node back up - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", ThirdNodeName, "--alsologtostderr")) - if err != nil { - t.Logf(rr.Stderr.String()) - t.Errorf("node start returned an error. args %q: %v", rr.Command(), err) - } - - // Make sure minikube status shows 3 running hosts - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) - if err != nil { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + s.stopNode() + validateClusterStatus(ctx, t, profile, s) } +} - if strings.Count(rr.Stdout.String(), "host: Running") != 3 { - t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } +func validateStartNodeAfterStop(nodeName string) validatorFunc { + return func(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { + if s.isAzure { + t.Skip("kic containers are not supported on docker's azure") + } - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 { - t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } + // Start the node back up + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", nodeName, "--alsologtostderr")) + if err != nil { + t.Logf(rr.Stderr.String()) + t.Errorf("node start returned an error. args %q: %v", rr.Command(), err) + } - // Make sure kubectl can connect correctly - rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) - if err != nil { - t.Fatalf("failed to kubectl get nodes. args %q : %v", rr.Command(), err) + s.startNode() + validateClusterStatus(ctx, t, profile, s) } } -func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile string) { +func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { // Run minikube stop on the cluster rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "stop")) if err != nil { t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) } - // Run status to see the stopped hosts - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status")) - // Exit code 7 means one host is stopped, which we are expecting - if err != nil && rr.ExitCode != 7 { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - // Make sure minikube status shows 2 stopped nodes - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) - if err != nil && rr.ExitCode != 7 { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - if strings.Count(rr.Stdout.String(), "host: Stopped") != 2 { - t.Errorf("incorrect number of stopped hosts: args %q: %v", rr.Command(), rr.Stdout.String()) - } - - if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 2 { - t.Errorf("incorrect number of stopped kubelets: args %q: %v", rr.Command(), rr.Stdout.String()) - } + s.stopCluster() + validateClusterStatus(ctx, t, profile, s) } -func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile string) { - if DockerDriver() { - rr, err := Run(t, exec.Command("docker", "version", "-f", "{{.Server.Version}}")) - if err != nil { - t.Fatalf("docker is broken: %v", err) - } - if strings.Contains(rr.Stdout.String(), "azure") { - t.Skip("kic containers are not supported on docker's azure") - } +func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { + if s.isAzure { + s.startCluster() + t.Skip("kic containers are not supported on docker's azure") } + // Restart a full cluster with minikube start startArgs := append([]string{"start", "-p", profile, "--wait=true", "-v=8", "--alsologtostderr"}, StartArgs()...) rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...)) @@ -227,19 +181,8 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err) } - // Make sure minikube status shows 2 running nodes - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) - if err != nil { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } - - if strings.Count(rr.Stdout.String(), "host: Running") != 2 { - t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Output()) - } - - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 { - t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Output()) - } + s.startCluster() + validateClusterStatus(ctx, t, profile, s) // Make sure kubectl reports that all nodes are ready rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) @@ -247,64 +190,140 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) } if strings.Count(rr.Stdout.String(), "NotReady") > 0 { - t.Errorf("expected 2 nodes to be Ready, got %v", rr.Output()) + t.Errorf("expected %v nodes to be Ready, got %v", s.wantRunningNodes, rr.Output()) } rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", `go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'`)) if err != nil { t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) } - if strings.Count(rr.Stdout.String(), "True") != 2 { - t.Errorf("expected 2 nodes Ready status to be True, got %v", rr.Output()) + if strings.Count(rr.Stdout.String(), "True") != s.wantRunningNodes { + t.Errorf("expected %v nodes Ready status to be True, got %v", s.wantRunningNodes, rr.Output()) } } -func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) { - - // Start the node back up - rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", ThirdNodeName)) - if err != nil { - t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) - } +func validateDeleteNodeFromMultiNode(nodeName string, running bool) validatorFunc { + return func(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { + // Start the node back up + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", nodeName)) + if err != nil { + t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err) + } - // Make sure status is back down to 2 hosts - rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) - if err != nil { - t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) - } + if running && !s.isAzure { + s.deleteRunningNode() + } else { + s.deleteStoppedNode() + } + validateClusterStatus(ctx, t, profile, s) - if strings.Count(rr.Stdout.String(), "host: Running") != 2 { - t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } + if DockerDriver() { + rr, err := Run(t, exec.Command("docker", "volume", "ls")) + if err != nil { + t.Errorf("failed to run %q : %v", rr.Command(), err) + } + if strings.Contains(rr.Stdout.String(), fmt.Sprintf("%s-%s", profile, ThirdNodeName)) { + t.Errorf("docker volume was not properly deleted: %s", rr.Stdout.String()) + } + } - if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 { - t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String()) - } + // Make sure kubectl knows the node is gone + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) + if err != nil { + t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) + } + if strings.Count(rr.Stdout.String(), "NotReady") > 0 { + t.Errorf("expected %v nodes to be Ready, got %v", s.wantRunningNodes, rr.Output()) + } - if DockerDriver() { - rr, err := Run(t, exec.Command("docker", "volume", "ls")) + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", `go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'`)) if err != nil { - t.Errorf("failed to run %q : %v", rr.Command(), err) + t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) } - if strings.Contains(rr.Stdout.String(), fmt.Sprintf("%s-%s", profile, ThirdNodeName)) { - t.Errorf("docker volume was not properly deleted: %s", rr.Stdout.String()) + if strings.Count(rr.Stdout.String(), "True") != s.wantRunningNodes { + t.Errorf("expected %v nodes Ready status to be True, got %v", s.wantRunningNodes, rr.Output()) } } +} - // Make sure kubectl knows the node is gone - rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) +func (s *clusterStatus) addNode(count int) { + s.totalNodes += count + s.wantRunningNodes += count +} + +func (s *clusterStatus) stopNode() { + s.wantRunningNodes-- + s.wantStoppedNodes++ +} + +func (s *clusterStatus) startNode() { + s.wantRunningNodes++ + s.wantStoppedNodes-- +} + +func (s *clusterStatus) deleteRunningNode() { + s.totalNodes-- + s.wantRunningNodes-- +} + +func (s *clusterStatus) deleteStoppedNode() { + s.totalNodes-- + s.wantStoppedNodes-- +} + +func (s *clusterStatus) stopCluster() { + s.running = false + s.wantRunningNodes = 0 + s.wantStoppedNodes = s.totalNodes +} + +func (s *clusterStatus) startCluster() { + s.running = true + s.wantRunningNodes = s.totalNodes + s.wantStoppedNodes = 0 +} + +// validateClusterStatus validates running/stopped kubelet/host count, check kubectl config and api serve connection. +func validateClusterStatus(ctx context.Context, t *testing.T, profile string, s *clusterStatus) { + // Make sure minikube status shows expected running nodes and stopped nodes + rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status", "--alsologtostderr")) if err != nil { - t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) + if s.wantStoppedNodes > 0 || s.isAzure { // If isAzure, the start process skipped, so some hosts are stopped + // Exit code 7 means one host is stopped, which we are expecting + if rr.ExitCode != 7 { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } + } else { + t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err) + } } - if strings.Count(rr.Stdout.String(), "NotReady") > 0 { - t.Errorf("expected 2 nodes to be Ready, got %v", rr.Output()) + var count int + + count = strings.Count(rr.Stdout.String(), "kubelet: Running") + if count != s.wantRunningNodes { + t.Errorf("incorrect number of running kubelets (want: %v, got %v): args %q: %v", s.wantRunningNodes, count, rr.Command(), rr.Stdout.String()) } - rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", `go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'`)) - if err != nil { - t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err) + count = strings.Count(rr.Stdout.String(), "kubelet: Stopped") + if count != s.wantStoppedNodes { + t.Errorf("incorrect number of stopped kubelets (want: %v, got %v): args %q: %v", s.wantStoppedNodes, count, rr.Command(), rr.Stdout.String()) } - if strings.Count(rr.Stdout.String(), "True") != 2 { - t.Errorf("expected 2 nodes Ready status to be True, got %v", rr.Output()) + + count = strings.Count(rr.Stdout.String(), "host: Running") + if count != s.wantRunningNodes { + t.Errorf("incorrect number of running hosts (want: %v, got %v): args %q: %v", s.wantRunningNodes, count, rr.Command(), rr.Stdout.String()) + } + + count = strings.Count(rr.Stdout.String(), "host: Stopped") + if count != s.wantStoppedNodes { + t.Errorf("incorrect number of stopped hosts (want: %v, got %v): args %q: %v", s.wantStoppedNodes, count, rr.Command(), rr.Stdout.String()) + } + + if s.running { + // Make sure kubectl can connect correctly + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes")) + if err != nil { + t.Fatalf("failed to kubectl get nodes. args %q : %v", rr.Command(), err) + } } }