From 8cd13c0af7c16bd7c3cbefd67e27b0a421bc3105 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Mon, 8 Apr 2019 10:36:36 -0700 Subject: [PATCH] cmd/openshift-install/waitfor: Rename from user-provided-infrastructure There is no hard line between installer- and user-provided infrastructure. Rename these commands to focus on what they'll do instead of the work-flow into which we expect them to fit. We're still working out how we can drop the router-CA injection to avoid 'wait-for cluster-ready' surprising users my editing their on-disk kubeconfig [1]. But that's mitigated somewhat by the fact that addRouterCAToClusterCA is idempotent, because AppendCertsFromPEM wraps AddCert [2] and AddCert checks to avoid duplicate certificates [3]. [1]: https://github.com/openshift/installer/pull/1541 [2]: https://github.com/golang/go/blob/go1.12/src/crypto/x509/cert_pool.go#L144 [3]: https://github.com/golang/go/blob/go1.12/src/crypto/x509/cert_pool.go#L106-L109 --- cmd/openshift-install/create.go | 4 +- cmd/openshift-install/main.go | 2 +- cmd/openshift-install/upi.go | 98 -------------------------------- cmd/openshift-install/waitfor.go | 79 +++++++++++++++++++++++++ docs/user/aws/install_upi.md | 4 +- docs/user/metal/install_upi.md | 8 +-- 6 files changed, 88 insertions(+), 107 deletions(-) delete mode 100644 cmd/openshift-install/upi.go create mode 100644 cmd/openshift-install/waitfor.go diff --git a/cmd/openshift-install/create.go b/cmd/openshift-install/create.go index d6a98dbbdcd..26f843cc3a1 100644 --- a/cmd/openshift-install/create.go +++ b/cmd/openshift-install/create.go @@ -105,7 +105,7 @@ var ( logrus.Fatal(err) } - err = finish(ctx, config, rootOpts.dir) + err = waitForClusterReady(ctx, config, rootOpts.dir) if err != nil { logrus.Fatal(err) } @@ -433,7 +433,7 @@ func logComplete(directory, consoleURL string) error { return nil } -func finish(ctx context.Context, config *rest.Config, directory string) error { +func waitForClusterReady(ctx context.Context, config *rest.Config, directory string) error { if err := waitForInitializedCluster(ctx, config); err != nil { return err } diff --git a/cmd/openshift-install/main.go b/cmd/openshift-install/main.go index 51b3a9fd8c8..d5056b2422c 100644 --- a/cmd/openshift-install/main.go +++ b/cmd/openshift-install/main.go @@ -46,7 +46,7 @@ func installerMain() { for _, subCmd := range []*cobra.Command{ newCreateCmd(), newDestroyCmd(), - newUPICmd(), + newWaitForCmd(), newVersionCmd(), newGraphCmd(), newCompletionCmd(), diff --git a/cmd/openshift-install/upi.go b/cmd/openshift-install/upi.go deleted file mode 100644 index 7bdb014f3af..00000000000 --- a/cmd/openshift-install/upi.go +++ /dev/null @@ -1,98 +0,0 @@ -package main - -import ( - "context" - "path/filepath" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "k8s.io/client-go/tools/clientcmd" -) - -var ( - upiLong = `Entry-points for user-provided infrastructure. - -Most users will want to use 'create cluster' to have the installer -create the required infrastructure for their cluster. But in some -installations the infrastructure needs to be adapted in ways that -installer-created infrastructure does not support. This command -provides entry points to support the following workflow: - -1. Call 'create ignition-configs' to create the bootstrap Ignition - config and admin kubeconfig. -2. Creates all required cluster resources, after which the cluster - will begin bootstrapping. -3. Call 'user-provided-infrastructure bootstrap-complete' to wait - until the bootstrap phase has completed. -4. Destroy the bootstrap resources. -5. Call 'user-provided-infrastructure finish' to wait until the - cluster finishes deploying its initial version. This also - retrieves the router certificate authority from the cluster and - inserts it into the admin kubeconfig.` -) - -func newUPICmd() *cobra.Command { - cmd := &cobra.Command{ - Use: "user-provided-infrastructure", - Aliases: []string{"upi"}, - Short: "Entry-points for user-provided infrastructure", - Long: upiLong, - RunE: func(cmd *cobra.Command, args []string) error { - return cmd.Help() - }, - } - cmd.AddCommand(newUPIBootstrapCompleteCmd()) - cmd.AddCommand(newUPIFinishCmd()) - return cmd -} - -func newUPIBootstrapCompleteCmd() *cobra.Command { - return &cobra.Command{ - Use: "bootstrap-complete", - Short: "Wait until cluster bootstrapping has completed", - Args: cobra.ExactArgs(0), - Run: func(_ *cobra.Command, _ []string) { - ctx := context.Background() - - cleanup := setupFileHook(rootOpts.dir) - defer cleanup() - - config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(rootOpts.dir, "auth", "kubeconfig")) - if err != nil { - logrus.Fatal(errors.Wrap(err, "loading kubeconfig")) - } - - err = waitForBootstrapComplete(ctx, config, rootOpts.dir) - if err != nil { - logrus.Fatal(err) - } - - logrus.Info("It is now safe to remove the bootstrap resources") - }, - } -} - -func newUPIFinishCmd() *cobra.Command { - return &cobra.Command{ - Use: "finish", - Short: "Wait for the cluster to finish updating and update local resources", - Args: cobra.ExactArgs(0), - Run: func(cmd *cobra.Command, args []string) { - ctx := context.Background() - - cleanup := setupFileHook(rootOpts.dir) - defer cleanup() - - config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(rootOpts.dir, "auth", "kubeconfig")) - if err != nil { - logrus.Fatal(errors.Wrap(err, "loading kubeconfig")) - } - - err = finish(ctx, config, rootOpts.dir) - if err != nil { - logrus.Fatal(err) - } - }, - } -} diff --git a/cmd/openshift-install/waitfor.go b/cmd/openshift-install/waitfor.go new file mode 100644 index 00000000000..868b93e9daf --- /dev/null +++ b/cmd/openshift-install/waitfor.go @@ -0,0 +1,79 @@ +package main + +import ( + "context" + "path/filepath" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "k8s.io/client-go/tools/clientcmd" +) + +func newWaitForCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "wait-for", + Short: "Wait for install-time events", + Long: `Wait for install-time events. + +'create cluster' has a few stages that wait for cluster events. But +these waits can also be useful on their own. This subcommand exposes +them directly.`, + RunE: func(cmd *cobra.Command, args []string) error { + return cmd.Help() + }, + } + cmd.AddCommand(newWaitForBootstrapCompleteCmd()) + cmd.AddCommand(newWaitForClusterReadyCmd()) + return cmd +} + +func newWaitForBootstrapCompleteCmd() *cobra.Command { + return &cobra.Command{ + Use: "bootstrap-complete", + Short: "Wait until cluster bootstrapping has completed", + Args: cobra.ExactArgs(0), + Run: func(_ *cobra.Command, _ []string) { + ctx := context.Background() + + cleanup := setupFileHook(rootOpts.dir) + defer cleanup() + + config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(rootOpts.dir, "auth", "kubeconfig")) + if err != nil { + logrus.Fatal(errors.Wrap(err, "loading kubeconfig")) + } + + err = waitForBootstrapComplete(ctx, config, rootOpts.dir) + if err != nil { + logrus.Fatal(err) + } + + logrus.Info("It is now safe to remove the bootstrap resources") + }, + } +} + +func newWaitForClusterReadyCmd() *cobra.Command { + return &cobra.Command{ + Use: "cluster-ready", + Short: "Wait until the cluster is ready", + Args: cobra.ExactArgs(0), + Run: func(cmd *cobra.Command, args []string) { + ctx := context.Background() + + cleanup := setupFileHook(rootOpts.dir) + defer cleanup() + + config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(rootOpts.dir, "auth", "kubeconfig")) + if err != nil { + logrus.Fatal(errors.Wrap(err, "loading kubeconfig")) + } + + err = waitForClusterReady(ctx, config, rootOpts.dir) + if err != nil { + logrus.Fatal(err) + } + }, + } +} diff --git a/docs/user/aws/install_upi.md b/docs/user/aws/install_upi.md index 7ce9517c0df..132510eb47c 100644 --- a/docs/user/aws/install_upi.md +++ b/docs/user/aws/install_upi.md @@ -84,7 +84,7 @@ and load balancer configuration. ## Monitor for `bootstrap-complete` and Initialization ```console -$ bin/openshift-install user-provided-infrastructure bootstrap-complete +$ bin/openshift-install wait-for bootstrap-complete INFO Waiting up to 30m0s for the Kubernetes API at https://api.test.example.com:6443... INFO API v1.12.4+c53f462 up INFO Waiting up to 30m0s for the bootstrap-complete event... @@ -248,7 +248,7 @@ TODO: Identify changes needed to Router or Ingress for DNS `*.apps` registration ## Monitor for Cluster Completion ```console -$ bin/openshift-install user-provided-infrastructure finish +$ bin/openshift-install wait-for cluster-ready INFO Waiting up to 30m0s for the cluster to initialize... ``` diff --git a/docs/user/metal/install_upi.md b/docs/user/metal/install_upi.md index 8f4694c63d3..1d90f0baf7b 100644 --- a/docs/user/metal/install_upi.md +++ b/docs/user/metal/install_upi.md @@ -202,10 +202,10 @@ TODO RHEL CoreOS does not have assets for bare-metal. ### Monitor for bootstrap-complete -The administrators can use the `upi bootstrap-complete` target of the OpenShift Installer to monitor cluster bootstrapping. The command succeeds when it notices `bootstrap-complete` event from Kubernetes APIServer. This event is generated by the bootstrap machine after the Kubernetes APIServer has been bootstrapped on the control plane machines. For example, +The administrators can use the `wait-for bootstrap-complete` target of the OpenShift Installer to monitor cluster bootstrapping. The command succeeds when it notices `bootstrap-complete` event from Kubernetes APIServer. This event is generated by the bootstrap machine after the Kubernetes APIServer has been bootstrapped on the control plane machines. For example, ```console -$ openshift-install --dir test-bare-metal upi bootstrap-complete +$ openshift-install --dir test-bare-metal wait-for bootstrap-complete INFO Waiting up to 30m0s for the Kubernetes API at https://api.test.example.com:6443... INFO API v1.12.4+c53f462 up INFO Waiting up to 30m0s for the bootstrap-complete event... @@ -213,10 +213,10 @@ INFO Waiting up to 30m0s for the bootstrap-complete event... ## Monitor for cluster completion -The administrators can use the `upi finish` target of the OpenShift Installer to monitor cluster completion. The command succeeds when it notices that Cluster Version Operator has completed rolling out the OpenShift cluster from Kubernetes APIServer. +The administrators can use the `wait-for cluster-ready` target of the OpenShift Installer to monitor cluster completion. The command succeeds when it notices that Cluster Version Operator has completed rolling out the OpenShift cluster from Kubernetes APIServer. ```console -$ openshift-install upi finish +$ openshift-install wait-for cluster-ready INFO Waiting up to 30m0s for the cluster to initialize... ```