From 6c03a5f557aef1e2ca629279e29c60cc67423b8a Mon Sep 17 00:00:00 2001 From: Chuck Ha Date: Wed, 17 Jul 2019 16:36:43 -0400 Subject: [PATCH] Fix panics Signed-off-by: Chuck Ha --- Dockerfile | 1 - cmd/capd-manager/main.go | 38 +++++++----- cmd/capdctl/main.go | 130 +++++++++++++++++++++++++-------------- cmd/kind-test/main.go | 17 ----- logger/logger.go | 33 ---------- 5 files changed, 107 insertions(+), 112 deletions(-) delete mode 100644 logger/logger.go diff --git a/Dockerfile b/Dockerfile index b4314b2..dbdcd9d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,7 +22,6 @@ ADD cmd cmd ADD actuators actuators ADD kind kind ADD third_party third_party -ADD logger logger RUN go install -v ./cmd/capd-manager RUN curl https://get.docker.com | sh diff --git a/cmd/capd-manager/main.go b/cmd/capd-manager/main.go index 4189bf2..f25c45d 100644 --- a/cmd/capd-manager/main.go +++ b/cmd/capd-manager/main.go @@ -18,13 +18,13 @@ package main import ( "flag" + "os" "time" "k8s.io/client-go/kubernetes" "k8s.io/klog" "k8s.io/klog/klogr" "sigs.k8s.io/cluster-api-provider-docker/actuators" - "sigs.k8s.io/cluster-api-provider-docker/logger" "sigs.k8s.io/cluster-api/pkg/apis" "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" @@ -36,12 +36,17 @@ import ( ) func main() { + // Must set up klog for the cluster api loggers klog.InitFlags(flag.CommandLine) flag.Parse() + log := klogr.New() + setupLogger := log.WithName("setup") + cfg, err := config.GetConfig() if err != nil { - panic(err) + setupLogger.Error(err, "failed to get cluster config") + os.Exit(1) } // Setup a Manager @@ -52,25 +57,26 @@ func main() { mgr, err := manager.New(cfg, opts) if err != nil { - panic(err) + setupLogger.Error(err, "failed to create a manager") + os.Exit(1) } k8sclientset, err := kubernetes.NewForConfig(cfg) if err != nil { - panic(err) + setupLogger.Error(err, "failed to get a kubernetes clientset") + os.Exit(1) } cs, err := clientset.NewForConfig(cfg) if err != nil { - panic(err) + setupLogger.Error(err, "failed to get a cluster api clientset") + os.Exit(1) } - clusterLogger := logger.Log{} - clusterLogger.Logger = klogr.New().WithName("[cluster-actuator]") + clusterLogger := log.WithName("cluster-actuator") clusterActuator := actuators.Cluster{ Log: clusterLogger, } - machineLogger := logger.Log{} - machineLogger.Logger = klogr.New().WithName("[machine-actuator]") + machineLogger := log.WithName("machine-actuator") machineActuator := actuators.Machine{ Core: k8sclientset.CoreV1(), @@ -81,19 +87,23 @@ func main() { // Register our cluster deployer (the interface is in clusterctl and we define the Deployer interface on the actuator) common.RegisterClusterProvisioner("docker", clusterActuator) if err := apis.AddToScheme(mgr.GetScheme()); err != nil { - panic(err) + setupLogger.Error(err, "failed to apply cluster API types to our scheme") + os.Exit(1) } if err := capimachine.AddWithActuator(mgr, &machineActuator); err != nil { - panic(err) + setupLogger.Error(err, "failed to install the machine actuator") + os.Exit(1) } if err := capicluster.AddWithActuator(mgr, &clusterActuator); err != nil { - panic(err) + setupLogger.Error(err, "failed to install the cluster actuator") + os.Exit(1) } - klogr.New().Info("Starting the controller") + setupLogger.Info("starting the manager") if err := mgr.Start(signals.SetupSignalHandler()); err != nil { - panic(err) + setupLogger.Error(err, "failed to start the manager") + os.Exit(1) } } diff --git a/cmd/capdctl/main.go b/cmd/capdctl/main.go index 6a8b282..1e2d7d2 100644 --- a/cmd/capdctl/main.go +++ b/cmd/capdctl/main.go @@ -23,16 +23,13 @@ import ( "fmt" "os" + "github.com/pkg/errors" "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog" "sigs.k8s.io/cluster-api-provider-docker/kind/controlplane" "sigs.k8s.io/cluster-api-provider-docker/objects" crclient "sigs.k8s.io/controller-runtime/pkg/client" ) -// TODO: Generate the RBAC stuff from somewhere instead of copy pasta - const ( // Important to keep this consistent. controlPlaneSet = "controlplane" @@ -88,9 +85,6 @@ func main() { machineDeploymentOpts := new(machineDeploymentOptions) machineDeploymentOpts.initFlags(machineDeployment) - kflags := flag.NewFlagSet("klog", flag.ExitOnError) - klog.InitFlags(kflags) - if len(os.Args) < 2 { fmt.Println("At least one subcommand is requied.") fmt.Println(usage()) @@ -99,31 +93,69 @@ func main() { switch os.Args[1] { case "setup": - setup.Parse(os.Args[2:]) - makeManagementCluster(*managementClusterName, *version, *capdImage, *capiImage) + if err := setup.Parse(os.Args[2:]); err != nil { + fmt.Printf("%+v\n", err) + os.Exit(1) + } + if err := makeManagementCluster(*managementClusterName, *version, *capdImage, *capiImage); err != nil { + fmt.Printf("%+v\n", err) + os.Exit(1) + } case "apply": - kflags.Parse(os.Args[2:]) - applyControlPlane(*managementClusterName, *version, *capiImage, *capdImage) + if err := applyControlPlane(*managementClusterName, *version, *capiImage, *capdImage); err != nil { + fmt.Printf("%+v\n", err) + os.Exit(1) + } case "control-plane": - controlPlane.Parse(os.Args[2:]) - fmt.Fprintf(os.Stdout, machineYAML(controlPlaneOpts)) + if err := controlPlane.Parse(os.Args[2:]); err != nil { + fmt.Printf("%+v\n", err) + os.Exit(1) + } + m, err := machineYAML(controlPlaneOpts) + if err != nil { + fmt.Printf("%+v\n", err) + os.Exit(1) + } + fmt.Fprintf(os.Stdout, m) case "worker": - worker.Parse(os.Args[2:]) - fmt.Fprintf(os.Stdout, machineYAML(workerOpts)) + if err := worker.Parse(os.Args[2:]); err != nil { + fmt.Printf("%+v\n", err) + os.Exit(1) + } + m, err := machineYAML(workerOpts) + if err != nil { + fmt.Printf("%+v\n", err) + os.Exit(1) + } + fmt.Fprintf(os.Stdout, m) case "cluster": - cluster.Parse(os.Args[2:]) - fmt.Fprintf(os.Stdout, clusterYAML(*clusterName, *clusterNamespace)) + if err := cluster.Parse(os.Args[2:]); err != nil { + fmt.Printf("%+v\n", err) + os.Exit(1) + } + c, err := clusterYAML(*clusterName, *clusterNamespace) + if err != nil { + fmt.Printf("%+v", err) + os.Exit(1) + } + fmt.Fprintf(os.Stdout, c) case "machine-deployment": - machineDeployment.Parse(os.Args[2:]) - fmt.Fprint(os.Stdout, machineDeploymentYAML(machineDeploymentOpts)) + if err := machineDeployment.Parse(os.Args[2:]); err != nil { + fmt.Printf("%+v\n", err) + os.Exit(1) + } + md, err := machineDeploymentYAML(machineDeploymentOpts) + if err != nil { + fmt.Printf("%+v\n", err) + os.Exit(1) + } + fmt.Fprint(os.Stdout, md) case "help": fmt.Println(usage()) default: fmt.Println(usage()) os.Exit(1) } - - klog.Flush() } func usage() string { @@ -154,32 +186,35 @@ subcommands are: ` } -func clusterYAML(clusterName, namespace string) string { +func clusterYAML(clusterName, namespace string) (string, error) { cluster := objects.GetCluster(clusterName, namespace) - return marshal(&cluster) + b, err := json.Marshal(&cluster) + if err != nil { + return "", errors.WithStack(err) + } + return string(b), nil } -func machineYAML(opts *machineOptions) string { +func machineYAML(opts *machineOptions) (string, error) { machine := objects.GetMachine(*opts.name, *opts.namespace, *opts.clusterName, *opts.set, *opts.version) - return marshal(&machine) + b, err := json.Marshal(&machine) + if err != nil { + return "", errors.WithStack(err) + } + return string(b), nil } -func machineDeploymentYAML(opts *machineDeploymentOptions) string { +func machineDeploymentYAML(opts *machineDeploymentOptions) (string, error) { machineDeploy := objects.GetMachineDeployment(*opts.name, *opts.namespace, *opts.clusterName, *opts.kubeletVersion, int32(*opts.replicas)) - return marshal(&machineDeploy) - -} - -func marshal(obj runtime.Object) string { - b, err := json.Marshal(obj) - // TODO don't panic on the error + b, err := json.Marshal(&machineDeploy) if err != nil { - panic(err) + return "", errors.WithStack(err) } - return string(b) + return string(b), nil + } -func makeManagementCluster(clusterName, capiVersion, capdImage, capiImageOverride string) { +func makeManagementCluster(clusterName, capiVersion, capdImage, capiImageOverride string) error { fmt.Println("Creating a brand new cluster") capiImage := fmt.Sprintf("us.gcr.io/k8s-artifacts-prod/cluster-api/cluster-api-controller:%s", capiVersion) if capiImageOverride != "" { @@ -187,40 +222,41 @@ func makeManagementCluster(clusterName, capiVersion, capdImage, capiImageOverrid } if err := controlplane.CreateKindCluster(capiImage, clusterName); err != nil { - panic(err) + return err } - applyControlPlane(clusterName, capiVersion, capiImage, capdImage) + return applyControlPlane(clusterName, capiVersion, capiImage, capdImage) } -func applyControlPlane(clusterName, capiVersion, capiImage, capdImage string) { +func applyControlPlane(clusterName, capiVersion, capiImage, capdImage string) error { fmt.Println("Downloading the latest CRDs for CAPI version", capiVersion) - objects, err := objects.GetManegementCluster(capiVersion, capiImage, capdImage) + objs, err := objects.GetManegementCluster(capiVersion, capiImage, capdImage) if err != nil { - panic(err) + return err } fmt.Println("Applying the control plane") cfg, err := controlplane.GetKubeconfig(clusterName) if err != nil { - panic(err) + return err } client, err := crclient.New(cfg, crclient.Options{}) if err != nil { - panic(err) + return err } - for _, obj := range objects { + for _, obj := range objs { accessor, err := meta.Accessor(obj) if err != nil { - panic(err) + return err } fmt.Printf("creating %q %q\n", obj.GetObjectKind().GroupVersionKind().String(), accessor.GetName()) - if client.Create(context.Background(), obj); err != nil { - panic(err) + if err := client.Create(context.Background(), obj); err != nil { + return err } } + return nil } diff --git a/cmd/kind-test/main.go b/cmd/kind-test/main.go index 9a77fa2..4740153 100644 --- a/cmd/kind-test/main.go +++ b/cmd/kind-test/main.go @@ -23,8 +23,6 @@ import ( "strings" "sigs.k8s.io/cluster-api-provider-docker/kind/actions" - "sigs.k8s.io/kind/pkg/cluster/constants" - "sigs.k8s.io/kind/pkg/cluster/nodes" ) func main() { @@ -87,18 +85,3 @@ func main() { fmt.Println("Done!") } } - -func getName(clusterName, role string) string { - ns, err := nodes.List( - fmt.Sprintf("label=%s=%s", constants.ClusterLabelKey, clusterName), - fmt.Sprintf("label=%s=%s", constants.NodeRoleKey, role)) - if err != nil { - panic(err) - } - count := len(ns) - suffix := fmt.Sprintf("%d", count) - if count == 0 { - suffix = "" - } - return fmt.Sprintf("%s-%s%s", clusterName, role, suffix) -} diff --git a/logger/logger.go b/logger/logger.go deleted file mode 100644 index eb1f123..0000000 --- a/logger/logger.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logger - -import ( - "fmt" - - "github.com/go-logr/logr" -) - -// Log is a wrapper to add a stacktrace to the Error message -type Log struct { - logr.Logger -} - -func (k Log) Error(err error, msg string, keysAndValues ...interface{}) { - keysAndValues = append(keysAndValues, "stacktrace", fmt.Sprintf("%+v", err)) - k.Logger.Error(err, msg, keysAndValues...) -}