diff --git a/CHANGELOG.md b/CHANGELOG.md index 421e0236c5c..5d92babf436 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -254,8 +254,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). The `openshift-install` command. This moves us to the new install-config approach with [asset -generation](docs/design/assetgeneration.md) in Go instead of in -Terraform. Terraform is still used to push the assets out to +generation](docs/user/overview.md#asset-generation) in Go instead of +in Terraform. Terraform is still used to push the assets out to resources on the backing platform (AWS, libvirt, or OpenStack), but that push happens in a single Terraform invocation instead of in multiple steps. This makes installation faster, because more diff --git a/Gopkg.lock b/Gopkg.lock index 92d7eafe15f..31579a7047b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,14 +1,6 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. -[[projects]] - branch = "go15" - digest = "1:8dab0033e231d02909f00a501a1f1ac01e63fd267dcb374a5de71b99ba1ee74f" - name = "github.com/ajeddeloh/go-json" - packages = ["."] - pruneopts = "NUT" - revision = "6a2fe990e08303c82d966297ddb29a58678a4783" - [[projects]] digest = "1:1929b21a34400d463a99336f8e2908d2a154dc525c52411a8d99bb519942dc4c" name = "github.com/apparentlymart/go-cidr" @@ -100,14 +92,12 @@ version = "v18" [[projects]] - digest = "1:1a216755b1570c329bada24deac0032f487c1a5b7c690963c96ad2da9b8c3ec9" + digest = "1:3e8408e6f735040cb09c32e4989c401c676756257986bb33c77cbac2e4084009" name = "github.com/coreos/ignition" packages = [ "config/shared/errors", "config/shared/validations", - "config/util", "config/v2_2/types", - "config/v2_3_experimental/types", "config/validate/report", ] pruneopts = "NUT" @@ -130,6 +120,17 @@ revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" version = "v1.0.0" +[[projects]] + branch = "master" + digest = "1:87d71aae554a7a897c98539533a950e6ad390ff7f2e99c00a478a4277a445f74" + name = "github.com/go-log/log" + packages = [ + ".", + "print", + ] + pruneopts = "NUT" + revision = "9635bc1e124b13d79978db4673fc713d4e30954c" + [[projects]] digest = "1:8679b8a64f3613e9749c5640c3535c83399b8e69f67ce54d91dc73f6d77373af" name = "github.com/gogo/protobuf" @@ -477,14 +478,6 @@ pruneopts = "NUT" revision = "9a301d65acbb728fcc3ace14f45f511a4cfeea9c" -[[projects]] - branch = "master" - digest = "1:91ed6116b126ec7fa7b9f9460356ee6b3005e5ed320b8e85510937f44d3d62d5" - name = "go4.org" - packages = ["errorutil"] - pruneopts = "NUT" - revision = "417644f6feb5ed3a356ca5d6d8e3a3fac7dfd33f" - [[projects]] branch = "master" digest = "1:3f3a05ae0b95893d90b9b3b5afdb79a9b3d96e4e36e099d841ae602e4aca0da8" @@ -806,9 +799,10 @@ "github.com/aws/aws-sdk-go/aws", "github.com/aws/aws-sdk-go/aws/session", "github.com/aws/aws-sdk-go/service/ec2", - "github.com/coreos/ignition/config/util", "github.com/coreos/ignition/config/v2_2/types", "github.com/ghodss/yaml", + "github.com/go-log/log", + "github.com/go-log/log/print", "github.com/gophercloud/gophercloud/openstack/compute/v2/servers", "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers", "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups", @@ -840,7 +834,6 @@ "k8s.io/apimachinery/pkg/apis/meta/v1", "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/util/net", - "k8s.io/apimachinery/pkg/util/sets", "k8s.io/apimachinery/pkg/util/wait", "k8s.io/apimachinery/pkg/watch", "k8s.io/client-go/kubernetes", diff --git a/README.md b/README.md index 6edf7873cbf..b28e09ba238 100644 --- a/README.md +++ b/README.md @@ -41,11 +41,11 @@ Log in using the admin credentials you configured when creating the cluster. #### Kubeconfig -You can also use the admin kubeconfig which `openshift-install create cluster` placed under `--dir` (which defaults to `.`) in `auth/kubeconfig`. +You can also use the admin kubeconfig which `openshift-install create cluster` placed under `--dir` (which defaults to `.`) in `auth/kubeconfig-admin`. If you launched the cluster with `openshift-install --dir "${DIR}" create cluster`, you can use: ```sh -export KUBECONFIG="${DIR}/auth/kubeconfig" +export KUBECONFIG="${DIR}/auth/kubeconfig-admin" ``` ### Cleanup diff --git a/cmd/openshift-install/create.go b/cmd/openshift-install/create.go index 5ba6517f63a..c8b2337edb4 100644 --- a/cmd/openshift-install/create.go +++ b/cmd/openshift-install/create.go @@ -2,11 +2,14 @@ package main import ( "context" - "os/exec" + "fmt" + "io/ioutil" + "os" + "path" "path/filepath" - "strings" "time" + "github.com/go-log/log/print" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -17,88 +20,20 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/cluster" - "github.com/openshift/installer/pkg/asset/ignition/bootstrap" - "github.com/openshift/installer/pkg/asset/ignition/machine" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/kubeconfig" - "github.com/openshift/installer/pkg/asset/manifests" - "github.com/openshift/installer/pkg/asset/templates" + "github.com/openshift/installer/pkg/assets" destroybootstrap "github.com/openshift/installer/pkg/destroy/bootstrap" + "github.com/openshift/installer/pkg/installerassets" + _ "github.com/openshift/installer/pkg/installerassets/aws" + _ "github.com/openshift/installer/pkg/installerassets/libvirt" + _ "github.com/openshift/installer/pkg/installerassets/openstack" + _ "github.com/openshift/installer/pkg/installerassets/tls" + "github.com/openshift/installer/pkg/terraform" ) -type target struct { - name string - command *cobra.Command - assets []asset.WritableAsset -} - -// each target is a variable to preserve the order when creating subcommands and still -// allow other functions to directly access each target individually. var ( - installConfigTarget = target{ - name: "Install Config", - command: &cobra.Command{ - Use: "install-config", - Short: "Generates the Install Config asset", - // FIXME: add longer descriptions for our commands with examples for better UX. - // Long: "", - }, - assets: []asset.WritableAsset{&installconfig.InstallConfig{}}, - } - - manifestsTarget = target{ - name: "Manifests", - command: &cobra.Command{ - Use: "manifests", - Short: "Generates the Kubernetes manifests", - // FIXME: add longer descriptions for our commands with examples for better UX. - // Long: "", - }, - assets: []asset.WritableAsset{&manifests.Manifests{}, &manifests.Tectonic{}}, - } - - manifestTemplatesTarget = target{ - name: "Manifest templates", - command: &cobra.Command{ - Use: "manifest-templates", - Short: "Generates the unrendered Kubernetes manifest templates", - Long: "", - }, - assets: []asset.WritableAsset{&templates.Templates{}}, + createAssetsOpts struct { + prune bool } - - ignitionConfigsTarget = target{ - name: "Ignition Configs", - command: &cobra.Command{ - Use: "ignition-configs", - Short: "Generates the Ignition Config asset", - // FIXME: add longer descriptions for our commands with examples for better UX. - // Long: "", - }, - assets: []asset.WritableAsset{&bootstrap.Bootstrap{}, &machine.Master{}, &machine.Worker{}}, - } - - clusterTarget = target{ - name: "Cluster", - command: &cobra.Command{ - Use: "cluster", - Short: "Create an OpenShift cluster", - // FIXME: add longer descriptions for our commands with examples for better UX. - // Long: "", - PostRunE: func(_ *cobra.Command, _ []string) error { - err := destroyBootstrap(context.Background(), rootOpts.dir) - if err != nil { - return err - } - return logComplete(rootOpts.dir) - }, - }, - assets: []asset.WritableAsset{&cluster.TerraformVariables{}, &kubeconfig.Admin{}, &cluster.Cluster{}}, - } - - targets = []target{installConfigTarget, manifestTemplatesTarget, manifestsTarget, ignitionConfigsTarget, clusterTarget} ) func newCreateCmd() *cobra.Command { @@ -110,64 +45,131 @@ func newCreateCmd() *cobra.Command { }, } - for _, t := range targets { - t.command.RunE = runTargetCmd(t.assets...) - cmd.AddCommand(t.command) - } + assets := &cobra.Command{ + Use: "assets", + Short: "Generates installer assets", + Long: "Generates installer assets. Can be run multiple times on the same directory to propagate changes made to any asset through the Merkle tree.", + RunE: func(cmd *cobra.Command, args []string) error { + ctx := context.Background() - return cmd -} + cleanup, err := setupFileHook(rootOpts.dir) + if err != nil { + return errors.Wrap(err, "failed to setup logging hook") + } + defer cleanup() -func runTargetCmd(targets ...asset.WritableAsset) func(cmd *cobra.Command, args []string) error { - return func(cmd *cobra.Command, args []string) error { - cleanup, err := setupFileHook(rootOpts.dir) - if err != nil { - return errors.Wrap(err, "failed to setup logging hook") - } - defer cleanup() + _, err = syncAssets(ctx, rootOpts.dir, createAssetsOpts.prune) + return err + }, + } + assets.PersistentFlags().BoolVar(&createAssetsOpts.prune, "prune", false, "remove everything except referenced assets from the asset directory") + cmd.AddCommand(assets) - assetStore, err := asset.NewStore(rootOpts.dir) - if err != nil { - return errors.Wrapf(err, "failed to create asset store") - } + cmd.AddCommand(&cobra.Command{ + Use: "cluster", + Short: "Creates the cluster", + Long: "Generates resources based on the installer assets, launching the cluster.", + RunE: func(cmd *cobra.Command, args []string) (err error) { + ctx := context.Background() - for _, a := range targets { - err := assetStore.Fetch(a) + cleanup, err := setupFileHook(rootOpts.dir) if err != nil { - if exitError, ok := errors.Cause(err).(*exec.ExitError); ok && len(exitError.Stderr) > 0 { - logrus.Error(strings.Trim(string(exitError.Stderr), "\n")) - } - err = errors.Wrapf(err, "failed to fetch %s", a.Name()) + return errors.Wrap(err, "failed to setup logging hook") } + defer cleanup() - if err2 := asset.PersistToFile(a, rootOpts.dir); err2 != nil { - err2 = errors.Wrapf(err2, "failed to write asset (%s) to disk", a.Name()) - if err != nil { - logrus.Error(err2) - return err - } - return err2 + assets, err := syncAssets(ctx, rootOpts.dir, createAssetsOpts.prune) + if err != nil { + return err } + err = createCluster(ctx, assets, rootOpts.dir) if err != nil { return err } - } - return nil + + err = destroyBootstrap(ctx, assets) + if err != nil { + return err + } + + return logComplete(rootOpts.dir) + }, + }) + + return cmd +} + +func syncAssets(ctx context.Context, directory string, prune bool) (*assets.Assets, error) { + assets := installerassets.New() + err := assets.Read(ctx, directory, installerassets.GetDefault, print.New(logrus.StandardLogger())) + if err != nil { + return nil, err } + + err = assets.Write(ctx, directory, prune) + return assets, err } -// FIXME: pulling the kubeconfig and metadata out of the root -// directory is a bit cludgy when we already have them in memory. -func destroyBootstrap(ctx context.Context, directory string) (err error) { - cleanup, err := setupFileHook(rootOpts.dir) +func createCluster(ctx context.Context, assets *assets.Assets, directory string) error { + tmpDir, err := ioutil.TempDir("", "openshift-install-") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + platformAsset, err := assets.GetByName(ctx, "platform") + if err != nil { + return errors.Wrapf(err, `retrieve "platform" by name`) + } + platform := string(platformAsset.Data) + + for _, filename := range []string{"terraform.tfvars", fmt.Sprintf("%s-terraform.auto.tfvars", platform)} { + assetName := path.Join("terraform", filename) + tfVars, err := assets.GetByName(ctx, assetName) + if err != nil { + return errors.Wrapf(err, "retrieve %q by name", assetName) + } + + if err := ioutil.WriteFile(filepath.Join(tmpDir, filename), tfVars.Data, 0600); err != nil { + return err + } + } + logrus.Info("Using Terraform to create cluster...") + stateFile, err := terraform.Apply(tmpDir, platform) if err != nil { - return errors.Wrap(err, "failed to setup logging hook") + err = errors.Wrap(err, "run Terraform") + + err2 := terraform.Destroy(tmpDir, platform) + if err2 != nil { + logrus.Errorf("Destroying failed resources: %v", err) + } + } + + if stateFile != "" { + data, err2 := ioutil.ReadFile(stateFile) + if err2 == nil { + err2 = ioutil.WriteFile(filepath.Join(directory, "terraform", "terraform.tfstate"), data, 0666) + } + if err == nil { + err = err2 + } else { + logrus.Error(errors.Wrap(err2, "read Terraform state")) + } } - defer cleanup() + return err +} + +func destroyBootstrap(ctx context.Context, assets *assets.Assets) error { logrus.Info("Waiting for bootstrap completion...") - config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(directory, "auth", "kubeconfig")) + + kubeconfig, err := assets.GetByName(ctx, "auth/kubeconfig-admin") + if err != nil { + return errors.Wrap(err, `retrieve "auth/kubeconfig-admin" by name`) + } + + config, err := clientcmd.RESTConfigFromKubeConfig(kubeconfig.Data) if err != nil { return errors.Wrap(err, "loading kubeconfig") } @@ -191,7 +193,7 @@ func destroyBootstrap(ctx context.Context, directory string) (err error) { } }, 2*time.Second, apiContext.Done()) - events := client.CoreV1().Events("kube-system") + events := client.CoreV1().Events(metav1.NamespaceSystem) eventContext, cancel := context.WithTimeout(ctx, 30*time.Minute) defer cancel() @@ -239,6 +241,8 @@ func destroyBootstrap(ctx context.Context, directory string) (err error) { } logrus.Info("Destroying the bootstrap resources...") + // FIXME: pulling the metadata out of the root directory is a bit + // cludgy when we already have it in memory. return destroybootstrap.Destroy(rootOpts.dir) } diff --git a/cmd/openshift-install/destroy.go b/cmd/openshift-install/destroy.go index 5da614b7047..9ab6997969b 100644 --- a/cmd/openshift-install/destroy.go +++ b/cmd/openshift-install/destroy.go @@ -5,7 +5,6 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/openshift/installer/pkg/asset" "github.com/openshift/installer/pkg/destroy" "github.com/openshift/installer/pkg/destroy/bootstrap" _ "github.com/openshift/installer/pkg/destroy/libvirt" @@ -48,16 +47,6 @@ func runDestroyCmd(cmd *cobra.Command, args []string) error { if err := destroyer.Run(); err != nil { return errors.Wrap(err, "Failed to destroy cluster") } - - store, err := asset.NewStore(rootOpts.dir) - if err != nil { - return errors.Wrapf(err, "failed to create asset store") - } - for _, asset := range clusterTarget.assets { - if err := store.Destroy(asset); err != nil { - return errors.Wrapf(err, "failed to destroy asset %q", asset.Name()) - } - } return nil } diff --git a/cmd/openshift-install/graph.go b/cmd/openshift-install/graph.go index 56e272eb429..5427ca8ee36 100644 --- a/cmd/openshift-install/graph.go +++ b/cmd/openshift-install/graph.go @@ -1,16 +1,21 @@ package main import ( + "context" "fmt" "io" "os" - "reflect" + "path" + "sort" + "strings" "github.com/awalterschulze/gographviz" + "github.com/go-log/log/print" "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/openshift/installer/pkg/asset" + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" ) var ( @@ -31,22 +36,57 @@ func newGraphCmd() *cobra.Command { } func runGraphCmd(cmd *cobra.Command, args []string) error { + ctx := context.Background() g := gographviz.NewGraph() g.SetName("G") g.SetDir(true) g.SetStrict(true) + g.AddAttr("G", string(gographviz.RankDir), "LR") - tNodeAttr := map[string]string{ - string(gographviz.Shape): "box", - string(gographviz.Style): "filled", + installerAssets := installerassets.New() + err := installerAssets.Read(ctx, rootOpts.dir, installerassets.GetDefault, print.New(logrus.StandardLogger())) + if err != nil { + logrus.Fatal(err) } - for _, t := range targets { - name := fmt.Sprintf(`"Target %s"`, t.name) - g.AddNode("G", name, tNodeAttr) - for _, dep := range t.assets { - addEdge(g, name, dep) + + root, err := installerAssets.GetByHash(ctx, installerAssets.Root.Hash) + if err != nil { + logrus.Fatal(err) + } + + directories := make(map[string]float32) + seen := make(map[string]bool) + stack := []*assets.Asset{&root} + for len(stack) > 0 { + asset := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if seen[asset.Name] { + continue + } + + directories[path.Dir(asset.Name)] = 1 + for _, reference := range asset.Parents { + parent, err := installerAssets.GetByHash(ctx, reference.Hash) + if err != nil { + logrus.Fatal(err) + } + stack = append(stack, &parent) } } + dirSlice := make([]string, 0, len(directories)) + for dir := range directories { + dirSlice = append(dirSlice, dir) + } + sort.Strings(dirSlice) + for i, dir := range dirSlice { + directories[dir] = float32(i) / float32(len(dirSlice)) + } + + added := make(map[string]bool) + err = addNodes(ctx, g, &root, installerAssets.GetByHash, added, directories) + if err != nil { + logrus.Fatal(err) + } out := os.Stdout if graphOpts.outputFile != "" { @@ -61,33 +101,64 @@ func runGraphCmd(cmd *cobra.Command, args []string) error { if _, err := io.WriteString(out, g.String()); err != nil { return err } + + var unused []string + for key := range installerassets.Defaults { + if _, ok := added[key]; !ok { + unused = append(unused, key) + } + } + for key := range installerassets.Rebuilders { + if _, ok := added[key]; !ok { + unused = append(unused, key) + } + } + sort.Strings(unused) + if unused != nil { + logrus.Warnf("potentially unused asset(s): %s", strings.Join(unused, ", ")) + } + return nil } -func addEdge(g *gographviz.Graph, parent string, asset asset.Asset) { - elem := reflect.TypeOf(asset).Elem() - name := fmt.Sprintf(`"%s"`, elem.Name()) - - if !g.IsNode(name) { - logrus.Debugf("adding node %s", name) - g.AddNode("G", name, nil) - } - if !isEdge(g, name, parent) { - logrus.Debugf("adding edge %s -> %s", name, parent) - g.AddEdge(name, parent, true, nil) +func addNodes(ctx context.Context, g *gographviz.Graph, asset *assets.Asset, getByHash assets.GetByBytes, added map[string]bool, directories map[string]float32) (err error) { + _, ok := added[asset.Name] + if ok { + return nil } + added[asset.Name] = true + + assetName := fmt.Sprintf("%q", asset.Name) + attrs := make(map[string]string) + hue, ok := directories[path.Dir(asset.Name)] + if ok { + saturation := 0.1 + if asset.Name == "tls/kubelet-client.crt" { + saturation = 0.3 + } + attrs[string(gographviz.FillColor)] = fmt.Sprintf("\"%.2f %.2f 1\"", hue, saturation) - deps := asset.Dependencies() - for _, dep := range deps { - addEdge(g, name, dep) + attrs[string(gographviz.Style)] = "filled" } -} + g.AddNode("G", assetName, attrs) -func isEdge(g *gographviz.Graph, src, dst string) bool { - for _, edge := range g.Edges.Edges { - if edge.Src == src && edge.Dst == dst { - return true + for _, parentReference := range asset.Parents { + parent, err := getByHash(ctx, parentReference.Hash) + if err != nil { + return err } + + err = addNodes(ctx, g, &parent, getByHash, added, directories) + if err != nil { + return err + } + + parentName := fmt.Sprintf("%q", parent.Name) + attrs := map[string]string{ + string(gographviz.Tooltip): fmt.Sprintf("\"%s -> %s\"", parent.Name, asset.Name), + } + g.AddEdge(parentName, assetName, true, attrs) } - return false + + return nil } diff --git a/cmd/openshift-install/main.go b/cmd/openshift-install/main.go index b79be626ed3..17dbc9f3896 100644 --- a/cmd/openshift-install/main.go +++ b/cmd/openshift-install/main.go @@ -43,7 +43,7 @@ func newRootCmd() *cobra.Command { SilenceErrors: true, SilenceUsage: true, } - cmd.PersistentFlags().StringVar(&rootOpts.dir, "dir", ".", "assets directory") + cmd.PersistentFlags().StringVar(&rootOpts.dir, "dir", ".", "asset directory") cmd.PersistentFlags().StringVar(&rootOpts.logLevel, "log-level", "info", "log level (e.g. \"debug | info | warn | error\")") return cmd } diff --git a/data/data/manifests/bootkube/03-openshift-web-console-namespace.yaml b/data/data/bootstrap/files/opt/tectonic/manifests/03-openshift-web-console-namespace.yaml similarity index 100% rename from data/data/manifests/bootkube/03-openshift-web-console-namespace.yaml rename to data/data/bootstrap/files/opt/tectonic/manifests/03-openshift-web-console-namespace.yaml diff --git a/data/data/manifests/bootkube/04-openshift-machine-config-operator.yaml b/data/data/bootstrap/files/opt/tectonic/manifests/04-openshift-machine-config-operator.yaml similarity index 100% rename from data/data/manifests/bootkube/04-openshift-machine-config-operator.yaml rename to data/data/bootstrap/files/opt/tectonic/manifests/04-openshift-machine-config-operator.yaml diff --git a/data/data/manifests/bootkube/05-openshift-cluster-api-namespace.yaml b/data/data/bootstrap/files/opt/tectonic/manifests/05-openshift-cluster-api-namespace.yaml similarity index 100% rename from data/data/manifests/bootkube/05-openshift-cluster-api-namespace.yaml rename to data/data/bootstrap/files/opt/tectonic/manifests/05-openshift-cluster-api-namespace.yaml diff --git a/data/data/manifests/bootkube/09-openshift-service-cert-signer-namespace.yaml b/data/data/bootstrap/files/opt/tectonic/manifests/09-openshift-service-cert-signer-namespace.yaml similarity index 100% rename from data/data/manifests/bootkube/09-openshift-service-cert-signer-namespace.yaml rename to data/data/bootstrap/files/opt/tectonic/manifests/09-openshift-service-cert-signer-namespace.yaml diff --git a/data/data/manifests/bootkube/cluster-ingress-01-crd.yaml b/data/data/bootstrap/files/opt/tectonic/manifests/cluster-ingress-01-crd.yaml similarity index 100% rename from data/data/manifests/bootkube/cluster-ingress-01-crd.yaml rename to data/data/bootstrap/files/opt/tectonic/manifests/cluster-ingress-01-crd.yaml diff --git a/data/data/bootstrap/files/opt/tectonic/manifests/cluster-network-01-crd.yaml b/data/data/bootstrap/files/opt/tectonic/manifests/cluster-network-01-crd.yaml new file mode 100644 index 00000000000..ec1e108bf38 --- /dev/null +++ b/data/data/bootstrap/files/opt/tectonic/manifests/cluster-network-01-crd.yaml @@ -0,0 +1,16 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkconfigs.networkoperator.openshift.io +spec: + group: networkoperator.openshift.io + names: + kind: NetworkConfig + listKind: NetworkConfigList + plural: networkconfigs + singular: networkconfig + scope: Cluster + versions: + - name: v1 + served: true + storage: true diff --git a/data/data/manifests/bootkube/cvo-overrides.yaml.template b/data/data/bootstrap/files/opt/tectonic/manifests/cvo-overrides.yaml.template similarity index 91% rename from data/data/manifests/bootkube/cvo-overrides.yaml.template rename to data/data/bootstrap/files/opt/tectonic/manifests/cvo-overrides.yaml.template index a798ade5d4b..1b83477a507 100644 --- a/data/data/manifests/bootkube/cvo-overrides.yaml.template +++ b/data/data/bootstrap/files/opt/tectonic/manifests/cvo-overrides.yaml.template @@ -6,7 +6,7 @@ metadata: spec: upstream: http://localhost:8080/graph channel: fast - clusterID: {{.CVOClusterID}} + clusterID: {{.ClusterID}} overrides: - kind: APIService # packages.apps.redhat.com fails to start properly name: v1alpha1.packages.apps.redhat.com diff --git a/data/data/bootstrap/files/opt/tectonic/manifests/etcd-service-endpoints.yaml.template b/data/data/bootstrap/files/opt/tectonic/manifests/etcd-service-endpoints.yaml.template new file mode 100644 index 00000000000..5b7dfb93f91 --- /dev/null +++ b/data/data/bootstrap/files/opt/tectonic/manifests/etcd-service-endpoints.yaml.template @@ -0,0 +1,20 @@ +{{$masterCount := int .MasterCount -}} +{{$etcdURIs := etcdURIs .ClusterName .BaseDomain $masterCount -}} +{{$clusterName := .ClusterName -}} +apiVersion: v1 +kind: Endpoints +metadata: + name: etcd + namespace: kube-system + annotations: + alpha.installer.openshift.io/dns-suffix: {{.BaseDomain}} +subsets: +- addresses: +{{- range $index, $member := $etcdURIs }} + - ip: 192.0.2.{{ add $index 1 }}{{/* FIXME: this isn't right, why do we set it */}} + hostname: {{$clusterName}}-etcd-{{$index}} +{{- end }} + ports: + - name: etcd + port: 2379 + protocol: TCP diff --git a/data/data/manifests/bootkube/etcd-service.yaml b/data/data/bootstrap/files/opt/tectonic/manifests/etcd-service.yaml similarity index 100% rename from data/data/manifests/bootkube/etcd-service.yaml rename to data/data/bootstrap/files/opt/tectonic/manifests/etcd-service.yaml diff --git a/data/data/bootstrap/files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml.template b/data/data/bootstrap/files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml.template new file mode 100644 index 00000000000..8eeb8a2bd37 --- /dev/null +++ b/data/data/bootstrap/files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml.template @@ -0,0 +1,20 @@ +{{$masterCount := int .MasterCount -}} +{{$etcdURIs := etcdURIs .ClusterName .BaseDomain $masterCount -}} +{{$clusterName := .ClusterName -}} +apiVersion: v1 +kind: Endpoints +metadata: + name: host-etcd + namespace: kube-system + annotations: + alpha.installer.openshift.io/dns-suffix: {{.BaseDomain}} +subsets: +- addresses: +{{- range $index, $member := $etcdURIs }} + - ip: 192.0.2.{{ add $index 1 }}{{/* FIXME: this isn't right, why do we set it */}} + hostname: {{$clusterName}}-etcd-{{$index}} +{{- end }} + ports: + - name: etcd + port: 2379 + protocol: TCP diff --git a/data/data/manifests/bootkube/host-etcd-service.yaml b/data/data/bootstrap/files/opt/tectonic/manifests/host-etcd-service.yaml similarity index 100% rename from data/data/manifests/bootkube/host-etcd-service.yaml rename to data/data/bootstrap/files/opt/tectonic/manifests/host-etcd-service.yaml diff --git a/data/data/manifests/bootkube/kube-cloud-config.yaml b/data/data/bootstrap/files/opt/tectonic/manifests/kube-cloud-config.yaml similarity index 100% rename from data/data/manifests/bootkube/kube-cloud-config.yaml rename to data/data/bootstrap/files/opt/tectonic/manifests/kube-cloud-config.yaml diff --git a/data/data/manifests/bootkube/kube-system-configmap-etcd-serving-ca.yaml.template b/data/data/bootstrap/files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml.template similarity index 78% rename from data/data/manifests/bootkube/kube-system-configmap-etcd-serving-ca.yaml.template rename to data/data/bootstrap/files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml.template index 910c186e545..6e7acda7b57 100644 --- a/data/data/manifests/bootkube/kube-system-configmap-etcd-serving-ca.yaml.template +++ b/data/data/bootstrap/files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml.template @@ -5,4 +5,4 @@ metadata: namespace: kube-system data: ca-bundle.crt: | - {{.EtcdCaCert | indent 4}} + {{.Cert | indent 4}} diff --git a/data/data/manifests/bootkube/kube-system-configmap-root-ca.yaml.template b/data/data/bootstrap/files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml.template similarity index 76% rename from data/data/manifests/bootkube/kube-system-configmap-root-ca.yaml.template rename to data/data/bootstrap/files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml.template index 0211a5e81b5..c9420f21fbb 100644 --- a/data/data/manifests/bootkube/kube-system-configmap-root-ca.yaml.template +++ b/data/data/bootstrap/files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml.template @@ -5,4 +5,4 @@ metadata: namespace: kube-system data: ca.crt: | - {{.RootCaCert | indent 4}} + {{.Cert | indent 4}} diff --git a/data/data/manifests/bootkube/kube-system-secret-etcd-client.yaml.template b/data/data/bootstrap/files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml.template similarity index 62% rename from data/data/manifests/bootkube/kube-system-secret-etcd-client.yaml.template rename to data/data/bootstrap/files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml.template index 2e11a2ee605..4a208549d8a 100644 --- a/data/data/manifests/bootkube/kube-system-secret-etcd-client.yaml.template +++ b/data/data/bootstrap/files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml.template @@ -5,5 +5,5 @@ metadata: namespace: kube-system type: SecretTypeTLS data: - tls.crt: {{ .EtcdClientCert }} - tls.key: {{ .EtcdClientKey }} + tls.crt: {{.Cert | base64}} + tls.key: {{.Key | base64}} diff --git a/data/data/manifests/bootkube/legacy-cvo-overrides.yaml.template b/data/data/bootstrap/files/opt/tectonic/manifests/legacy-cvo-overrides.yaml.template similarity index 92% rename from data/data/manifests/bootkube/legacy-cvo-overrides.yaml.template rename to data/data/bootstrap/files/opt/tectonic/manifests/legacy-cvo-overrides.yaml.template index 9c1e7a06512..e1b9e974d77 100644 --- a/data/data/manifests/bootkube/legacy-cvo-overrides.yaml.template +++ b/data/data/bootstrap/files/opt/tectonic/manifests/legacy-cvo-overrides.yaml.template @@ -5,7 +5,7 @@ metadata: name: cluster-version-operator upstream: http://localhost:8080/graph channel: fast -clusterID: {{.CVOClusterID}} +clusterID: {{.ClusterID}} overrides: - kind: APIService # packages.apps.redhat.com fails to start properly name: v1alpha1.packages.apps.redhat.com diff --git a/data/data/manifests/bootkube/machine-config-server-tls-secret.yaml.template b/data/data/bootstrap/files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml.template similarity index 70% rename from data/data/manifests/bootkube/machine-config-server-tls-secret.yaml.template rename to data/data/bootstrap/files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml.template index 2e69149bd4b..7b8a8749a16 100644 --- a/data/data/manifests/bootkube/machine-config-server-tls-secret.yaml.template +++ b/data/data/bootstrap/files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml.template @@ -5,5 +5,5 @@ metadata: namespace: openshift-machine-config-operator type: Opaque data: - tls.crt: {{.McsTLSCert}} - tls.key: {{.McsTLSKey}} + tls.crt: {{.Cert | base64}} + tls.key: {{.Key | base64}} diff --git a/data/data/manifests/bootkube/openshift-service-cert-signer-ca-secret.yaml.template b/data/data/bootstrap/files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml.template similarity index 68% rename from data/data/manifests/bootkube/openshift-service-cert-signer-ca-secret.yaml.template rename to data/data/bootstrap/files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml.template index 38399febc43..0c6eb6f0e74 100644 --- a/data/data/manifests/bootkube/openshift-service-cert-signer-ca-secret.yaml.template +++ b/data/data/bootstrap/files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml.template @@ -5,5 +5,5 @@ metadata: namespace: openshift-service-cert-signer type: kubernetes.io/tls data: - tls.crt: {{.ServiceServingCaCert}} - tls.key: {{.ServiceServingCaKey}} + tls.crt: {{.Cert | base64}} + tls.key: {{.Key | base64}} diff --git a/data/data/manifests/tectonic/binding-discovery.yaml b/data/data/bootstrap/files/opt/tectonic/tectonic/99_binding-discovery.yaml similarity index 100% rename from data/data/manifests/tectonic/binding-discovery.yaml rename to data/data/bootstrap/files/opt/tectonic/tectonic/99_binding-discovery.yaml diff --git a/data/data/bootstrap/files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml.template b/data/data/bootstrap/files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml.template new file mode 100644 index 00000000000..fcf77197ded --- /dev/null +++ b/data/data/bootstrap/files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml.template @@ -0,0 +1,8 @@ +kind: Secret +apiVersion: v1 +metadata: + namespace: kube-system + name: aws-creds +data: + aws_access_key_id: {{.AccessKeyID | base64}} + aws_secret_access_key: {{.SecretAccessKey | base64}} diff --git a/data/data/bootstrap/files/opt/tectonic/tectonic/aws/99_role-cloud-creds-secret-reader.yaml b/data/data/bootstrap/files/opt/tectonic/tectonic/aws/99_role-cloud-creds-secret-reader.yaml new file mode 100644 index 00000000000..35d5771ef68 --- /dev/null +++ b/data/data/bootstrap/files/opt/tectonic/tectonic/aws/99_role-cloud-creds-secret-reader.yaml @@ -0,0 +1,10 @@ +kind: Role +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + namespace: kube-system + name: aws-creds-secret-reader +rules: +- apiGroups: [""] + resources: ["secrets"] + resourceNames: ["aws-creds"] + verbs: ["get"] diff --git a/data/data/bootstrap/files/opt/tectonic/tectonic/openstack/99_cloud-creds-secret.yaml.template b/data/data/bootstrap/files/opt/tectonic/tectonic/openstack/99_cloud-creds-secret.yaml.template new file mode 100644 index 00000000000..859efc970f5 --- /dev/null +++ b/data/data/bootstrap/files/opt/tectonic/tectonic/openstack/99_cloud-creds-secret.yaml.template @@ -0,0 +1,7 @@ +kind: Secret +apiVersion: v1 +metadata: + namespace: kube-system + name: openstack-creds +data: + clouds.yaml: {{.Creds | base64}} diff --git a/data/data/manifests/tectonic/role-cloud-creds-secret-reader.yaml.template b/data/data/bootstrap/files/opt/tectonic/tectonic/openstack/99_role-cloud-creds-secret-reader.yaml similarity index 53% rename from data/data/manifests/tectonic/role-cloud-creds-secret-reader.yaml.template rename to data/data/bootstrap/files/opt/tectonic/tectonic/openstack/99_role-cloud-creds-secret-reader.yaml index 4c7a3be9259..d330b70e21e 100644 --- a/data/data/manifests/tectonic/role-cloud-creds-secret-reader.yaml.template +++ b/data/data/bootstrap/files/opt/tectonic/tectonic/openstack/99_role-cloud-creds-secret-reader.yaml @@ -2,17 +2,9 @@ kind: Role apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: namespace: kube-system -{{- if .CloudCreds.AWS}} - name: aws-creds-secret-reader -{{- else if .CloudCreds.OpenStack}} name: openstack-creds-secret-reader -{{- end}} rules: - apiGroups: [""] resources: ["secrets"] -{{- if .CloudCreds.AWS}} - resourceNames: ["aws-creds"] -{{- else if .CloudCreds.OpenStack}} resourceNames: ["openstack-creds"] -{{- end}} verbs: ["get"] diff --git a/data/data/openstack/OWNERS b/data/data/bootstrap/files/opt/tectonic/tectonic/openstack/OWNERS similarity index 100% rename from data/data/openstack/OWNERS rename to data/data/bootstrap/files/opt/tectonic/tectonic/openstack/OWNERS diff --git a/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template b/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template index b5e146f48a2..28fb69504ac 100755 --- a/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template +++ b/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template @@ -1,4 +1,7 @@ #!/usr/bin/env bash +{{$masterCount := int .MasterCount -}} +{{$etcdURISlice := etcdURIs .ClusterName .BaseDomain $masterCount -}} +{{$etcdURIs := join "," $etcdURISlice -}} set -e mkdir --parents /etc/kubernetes/{manifests,bootstrap-configs,bootstrap-manifests} @@ -48,7 +51,7 @@ then "${KUBE_APISERVER_OPERATOR_IMAGE}" \ /usr/bin/cluster-kube-apiserver-operator render \ --manifest-etcd-serving-ca=etcd-client-ca.crt \ - --manifest-etcd-server-urls={{.EtcdCluster}} \ + --manifest-etcd-server-urls={{$etcdURIs}} \ --manifest-image=${OPENSHIFT_HYPERSHIFT_IMAGE} \ --asset-input-dir=/assets/tls \ --asset-output-dir=/assets/kube-apiserver-bootstrap \ @@ -173,13 +176,13 @@ until podman run \ --name etcdctl \ --env ETCDCTL_API=3 \ --volume /opt/tectonic/tls:/opt/tectonic/tls:ro,z \ - "{{.EtcdctlImage}}" \ + "{{.EtcdCtlImage}}" \ /usr/local/bin/etcdctl \ --dial-timeout=10m \ --cacert=/opt/tectonic/tls/etcd-client-ca.crt \ --cert=/opt/tectonic/tls/etcd-client.crt \ --key=/opt/tectonic/tls/etcd-client.key \ - --endpoints={{.EtcdCluster}} \ + --endpoints={{$etcdURIs}} \ endpoint health do echo "etcdctl failed. Retrying in 5 seconds..." diff --git a/data/data/bootstrap/systemd/units/kubelet.service.template b/data/data/bootstrap/systemd/units/kubelet.service similarity index 100% rename from data/data/bootstrap/systemd/units/kubelet.service.template rename to data/data/bootstrap/systemd/units/kubelet.service diff --git a/data/data/manifests/bootkube/host-etcd-service-endpoints.yaml.template b/data/data/manifests/bootkube/host-etcd-service-endpoints.yaml.template deleted file mode 100644 index a478bdee68f..00000000000 --- a/data/data/manifests/bootkube/host-etcd-service-endpoints.yaml.template +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: v1 -kind: Endpoints -metadata: - name: host-etcd - namespace: kube-system - annotations: - alpha.installer.openshift.io/dns-suffix: {{.EtcdEndpointDNSSuffix}} -subsets: -- addresses: -{{- range $idx, $member := .EtcdEndpointHostnames }} - - ip: 192.0.2.{{ add $idx 1 }} - hostname: {{ $member }} -{{- end }} - ports: - - name: etcd - port: 2379 - protocol: TCP diff --git a/data/data/manifests/bootkube/pull.yaml.template b/data/data/manifests/bootkube/pull.yaml.template deleted file mode 100644 index 980ba3ee35a..00000000000 --- a/data/data/manifests/bootkube/pull.yaml.template +++ /dev/null @@ -1,12 +0,0 @@ -{ - "apiVersion": "v1", - "kind": "Secret", - "type": "kubernetes.io/dockerconfigjson", - "metadata": { - "namespace": "kube-system", - "name": "coreos-pull-secret" - }, - "data": { - ".dockerconfigjson": "{{.PullSecret}}" - } -} diff --git a/data/data/manifests/tectonic/cloud-creds-secret.yaml.template b/data/data/manifests/tectonic/cloud-creds-secret.yaml.template deleted file mode 100644 index 913947b9436..00000000000 --- a/data/data/manifests/tectonic/cloud-creds-secret.yaml.template +++ /dev/null @@ -1,16 +0,0 @@ -kind: Secret -apiVersion: v1 -metadata: - namespace: kube-system -{{- if .CloudCreds.AWS}} - name: aws-creds -{{- else if .CloudCreds.OpenStack}} - name: openstack-creds -{{- end}} -data: -{{- if .CloudCreds.AWS}} - aws_access_key_id: {{.CloudCreds.AWS.Base64encodeAccessKeyID}} - aws_secret_access_key: {{.CloudCreds.AWS.Base64encodeSecretAccessKey}} -{{- else if .CloudCreds.OpenStack}} - clouds.yaml: {{.CloudCreds.OpenStack.Base64encodeCloudCreds}} -{{- end}} diff --git a/data/data/aws/bootstrap/README.md b/data/data/terraform/aws/bootstrap/README.md similarity index 93% rename from data/data/aws/bootstrap/README.md rename to data/data/terraform/aws/bootstrap/README.md index c1cc005ebd5..d735906ddf2 100644 --- a/data/data/aws/bootstrap/README.md +++ b/data/data/terraform/aws/bootstrap/README.md @@ -24,7 +24,7 @@ resource "aws_subnet" "example" { } module "bootstrap" { - source = "github.com/openshift/installer//data/data/aws/bootstrap" + source = "github.com/openshift/installer//data/data/terraform/aws/bootstrap" ami = "ami-0af8953af3ec06b7c" cluster_name = "my-cluster" diff --git a/data/data/aws/bootstrap/main.tf b/data/data/terraform/aws/bootstrap/main.tf similarity index 100% rename from data/data/aws/bootstrap/main.tf rename to data/data/terraform/aws/bootstrap/main.tf diff --git a/data/data/aws/bootstrap/variables.tf b/data/data/terraform/aws/bootstrap/variables.tf similarity index 100% rename from data/data/aws/bootstrap/variables.tf rename to data/data/terraform/aws/bootstrap/variables.tf diff --git a/data/data/aws/iam/main.tf b/data/data/terraform/aws/iam/main.tf similarity index 100% rename from data/data/aws/iam/main.tf rename to data/data/terraform/aws/iam/main.tf diff --git a/data/data/aws/iam/variables.tf b/data/data/terraform/aws/iam/variables.tf similarity index 100% rename from data/data/aws/iam/variables.tf rename to data/data/terraform/aws/iam/variables.tf diff --git a/data/data/aws/main.tf b/data/data/terraform/aws/main.tf similarity index 100% rename from data/data/aws/main.tf rename to data/data/terraform/aws/main.tf diff --git a/data/data/aws/master/main.tf b/data/data/terraform/aws/master/main.tf similarity index 100% rename from data/data/aws/master/main.tf rename to data/data/terraform/aws/master/main.tf diff --git a/data/data/aws/master/outputs.tf b/data/data/terraform/aws/master/outputs.tf similarity index 100% rename from data/data/aws/master/outputs.tf rename to data/data/terraform/aws/master/outputs.tf diff --git a/data/data/aws/master/variables.tf b/data/data/terraform/aws/master/variables.tf similarity index 100% rename from data/data/aws/master/variables.tf rename to data/data/terraform/aws/master/variables.tf diff --git a/data/data/aws/route53/master.tf b/data/data/terraform/aws/route53/master.tf similarity index 100% rename from data/data/aws/route53/master.tf rename to data/data/terraform/aws/route53/master.tf diff --git a/data/data/aws/route53/tectonic.tf b/data/data/terraform/aws/route53/tectonic.tf similarity index 100% rename from data/data/aws/route53/tectonic.tf rename to data/data/terraform/aws/route53/tectonic.tf diff --git a/data/data/aws/route53/variables.tf b/data/data/terraform/aws/route53/variables.tf similarity index 100% rename from data/data/aws/route53/variables.tf rename to data/data/terraform/aws/route53/variables.tf diff --git a/data/data/aws/route53/worker.tf b/data/data/terraform/aws/route53/worker.tf similarity index 100% rename from data/data/aws/route53/worker.tf rename to data/data/terraform/aws/route53/worker.tf diff --git a/data/data/aws/variables-aws.tf b/data/data/terraform/aws/variables-aws.tf similarity index 100% rename from data/data/aws/variables-aws.tf rename to data/data/terraform/aws/variables-aws.tf diff --git a/data/data/aws/vpc/common.tf b/data/data/terraform/aws/vpc/common.tf similarity index 100% rename from data/data/aws/vpc/common.tf rename to data/data/terraform/aws/vpc/common.tf diff --git a/data/data/aws/vpc/master-elb.tf b/data/data/terraform/aws/vpc/master-elb.tf similarity index 100% rename from data/data/aws/vpc/master-elb.tf rename to data/data/terraform/aws/vpc/master-elb.tf diff --git a/data/data/aws/vpc/outputs.tf b/data/data/terraform/aws/vpc/outputs.tf similarity index 100% rename from data/data/aws/vpc/outputs.tf rename to data/data/terraform/aws/vpc/outputs.tf diff --git a/data/data/aws/vpc/sg-elb.tf b/data/data/terraform/aws/vpc/sg-elb.tf similarity index 100% rename from data/data/aws/vpc/sg-elb.tf rename to data/data/terraform/aws/vpc/sg-elb.tf diff --git a/data/data/aws/vpc/sg-etcd.tf b/data/data/terraform/aws/vpc/sg-etcd.tf similarity index 100% rename from data/data/aws/vpc/sg-etcd.tf rename to data/data/terraform/aws/vpc/sg-etcd.tf diff --git a/data/data/aws/vpc/sg-master.tf b/data/data/terraform/aws/vpc/sg-master.tf similarity index 100% rename from data/data/aws/vpc/sg-master.tf rename to data/data/terraform/aws/vpc/sg-master.tf diff --git a/data/data/aws/vpc/sg-worker.tf b/data/data/terraform/aws/vpc/sg-worker.tf similarity index 100% rename from data/data/aws/vpc/sg-worker.tf rename to data/data/terraform/aws/vpc/sg-worker.tf diff --git a/data/data/aws/vpc/variables.tf b/data/data/terraform/aws/vpc/variables.tf similarity index 100% rename from data/data/aws/vpc/variables.tf rename to data/data/terraform/aws/vpc/variables.tf diff --git a/data/data/aws/vpc/vpc-private.tf b/data/data/terraform/aws/vpc/vpc-private.tf similarity index 100% rename from data/data/aws/vpc/vpc-private.tf rename to data/data/terraform/aws/vpc/vpc-private.tf diff --git a/data/data/aws/vpc/vpc-public.tf b/data/data/terraform/aws/vpc/vpc-public.tf similarity index 100% rename from data/data/aws/vpc/vpc-public.tf rename to data/data/terraform/aws/vpc/vpc-public.tf diff --git a/data/data/aws/vpc/vpc.tf b/data/data/terraform/aws/vpc/vpc.tf similarity index 100% rename from data/data/aws/vpc/vpc.tf rename to data/data/terraform/aws/vpc/vpc.tf diff --git a/data/data/config.tf b/data/data/terraform/config.tf similarity index 100% rename from data/data/config.tf rename to data/data/terraform/config.tf diff --git a/data/data/libvirt/bootstrap/README.md b/data/data/terraform/libvirt/bootstrap/README.md similarity index 93% rename from data/data/libvirt/bootstrap/README.md rename to data/data/terraform/libvirt/bootstrap/README.md index f85d103faf4..925eeca75c5 100644 --- a/data/data/libvirt/bootstrap/README.md +++ b/data/data/terraform/libvirt/bootstrap/README.md @@ -25,7 +25,7 @@ resource "libvirt_volume" "example" { } module "bootstrap" { - source = "github.com/openshift/installer//data/data/libvirt/bootstrap" + source = "github.com/openshift/installer//data/data/terraform/libvirt/bootstrap" addresses = ["192.168.0.1"] base_volume_id = "${libvirt_volume.example.id}" diff --git a/data/data/libvirt/bootstrap/main.tf b/data/data/terraform/libvirt/bootstrap/main.tf similarity index 100% rename from data/data/libvirt/bootstrap/main.tf rename to data/data/terraform/libvirt/bootstrap/main.tf diff --git a/data/data/libvirt/bootstrap/variables.tf b/data/data/terraform/libvirt/bootstrap/variables.tf similarity index 100% rename from data/data/libvirt/bootstrap/variables.tf rename to data/data/terraform/libvirt/bootstrap/variables.tf diff --git a/data/data/libvirt/main.tf b/data/data/terraform/libvirt/main.tf similarity index 100% rename from data/data/libvirt/main.tf rename to data/data/terraform/libvirt/main.tf diff --git a/data/data/libvirt/variables-libvirt.tf b/data/data/terraform/libvirt/variables-libvirt.tf similarity index 100% rename from data/data/libvirt/variables-libvirt.tf rename to data/data/terraform/libvirt/variables-libvirt.tf diff --git a/data/data/libvirt/volume/main.tf b/data/data/terraform/libvirt/volume/main.tf similarity index 100% rename from data/data/libvirt/volume/main.tf rename to data/data/terraform/libvirt/volume/main.tf diff --git a/data/data/libvirt/volume/outputs.tf b/data/data/terraform/libvirt/volume/outputs.tf similarity index 100% rename from data/data/libvirt/volume/outputs.tf rename to data/data/terraform/libvirt/volume/outputs.tf diff --git a/data/data/libvirt/volume/variables.tf b/data/data/terraform/libvirt/volume/variables.tf similarity index 100% rename from data/data/libvirt/volume/variables.tf rename to data/data/terraform/libvirt/volume/variables.tf diff --git a/pkg/asset/cluster/openstack/OWNERS b/data/data/terraform/openstack/OWNERS similarity index 100% rename from pkg/asset/cluster/openstack/OWNERS rename to data/data/terraform/openstack/OWNERS diff --git a/data/data/openstack/bootstrap/main.tf b/data/data/terraform/openstack/bootstrap/main.tf similarity index 100% rename from data/data/openstack/bootstrap/main.tf rename to data/data/terraform/openstack/bootstrap/main.tf diff --git a/data/data/openstack/bootstrap/variables.tf b/data/data/terraform/openstack/bootstrap/variables.tf similarity index 100% rename from data/data/openstack/bootstrap/variables.tf rename to data/data/terraform/openstack/bootstrap/variables.tf diff --git a/data/data/openstack/main.tf b/data/data/terraform/openstack/main.tf similarity index 100% rename from data/data/openstack/main.tf rename to data/data/terraform/openstack/main.tf diff --git a/data/data/openstack/masters/main.tf b/data/data/terraform/openstack/masters/main.tf similarity index 100% rename from data/data/openstack/masters/main.tf rename to data/data/terraform/openstack/masters/main.tf diff --git a/data/data/openstack/masters/variables.tf b/data/data/terraform/openstack/masters/variables.tf similarity index 100% rename from data/data/openstack/masters/variables.tf rename to data/data/terraform/openstack/masters/variables.tf diff --git a/data/data/openstack/topology/common.tf b/data/data/terraform/openstack/topology/common.tf similarity index 100% rename from data/data/openstack/topology/common.tf rename to data/data/terraform/openstack/topology/common.tf diff --git a/data/data/openstack/topology/outputs.tf b/data/data/terraform/openstack/topology/outputs.tf similarity index 100% rename from data/data/openstack/topology/outputs.tf rename to data/data/terraform/openstack/topology/outputs.tf diff --git a/data/data/openstack/topology/private-network.tf b/data/data/terraform/openstack/topology/private-network.tf similarity index 100% rename from data/data/openstack/topology/private-network.tf rename to data/data/terraform/openstack/topology/private-network.tf diff --git a/data/data/openstack/topology/sg-lb.tf b/data/data/terraform/openstack/topology/sg-lb.tf similarity index 100% rename from data/data/openstack/topology/sg-lb.tf rename to data/data/terraform/openstack/topology/sg-lb.tf diff --git a/data/data/openstack/topology/sg-master.tf b/data/data/terraform/openstack/topology/sg-master.tf similarity index 100% rename from data/data/openstack/topology/sg-master.tf rename to data/data/terraform/openstack/topology/sg-master.tf diff --git a/data/data/openstack/topology/sg-worker.tf b/data/data/terraform/openstack/topology/sg-worker.tf similarity index 100% rename from data/data/openstack/topology/sg-worker.tf rename to data/data/terraform/openstack/topology/sg-worker.tf diff --git a/data/data/openstack/topology/variables.tf b/data/data/terraform/openstack/topology/variables.tf similarity index 100% rename from data/data/openstack/topology/variables.tf rename to data/data/terraform/openstack/topology/variables.tf diff --git a/data/data/openstack/variables-openstack.tf b/data/data/terraform/openstack/variables-openstack.tf similarity index 100% rename from data/data/openstack/variables-openstack.tf rename to data/data/terraform/openstack/variables-openstack.tf diff --git a/docs/design/assetgeneration.md b/docs/design/assetgeneration.md deleted file mode 100644 index aad6ce5934b..00000000000 --- a/docs/design/assetgeneration.md +++ /dev/null @@ -1,150 +0,0 @@ -# Asset Generation - -The installer internally uses a directed acyclic graph to represent all of the assets it creates as well as their dependencies. This process looks very similar to many build systems (e.g. Bazel, Make). - -## Overview - -The installer generates assets based on the [dependency graph](#dependency-graph). Each asset seperately defines how it can be generated as well as its dependencies. Targets represent a set of assets that should be generated and written to disk for the user's consumption. When a user invokes the installer for a particular target, each of the assets in the set is generated as well as any dependencies. This eventually results in the user being prompted for any missing information (e.g. administrator password, target platform). - -The installer is also able to read assets from disk if they have been provided by the user. In the event that an asset exists on disk, the install won't generate the asset, but will instead consume the asset from disk (removing the file). This allows the installer to be run multiple times, using the assets generated by the previous invocation. It also allows a user to make modifications to the generated assets before continuing to the next target. - -Each asset is individually responsible for declaring its dependencies. Each asset is also responsible resolving conflicts when combining its input from disk and its state from a previous run. The installer ensures all the dependencies for an asset is generated and provides the asset with latest state to generate its own output. - -## Asset - -An asset is the generic representation of work-item for installer that needs to be generated. Each asset defines all the other assets that are required for it to generate itself as dependencies. - -The asset would usually follow these steps to generate its output: - -1. Fetch its parent assets. - -2. Generate the assets either by: - * Using the parent assets - * Loading from on-disk assets - * Loading from state file - -3. If any of the parent assets are **dirty** (currently we think all on-disk assets are **dirty**), then use the parent assets to generate and return **dirty**. - -4. If none of the parent assets are **dirty**, but the asset itself is on disk, then use the on-disk asset and return **dirty**. - -5. If none of the parent assets or this asset is **dirty**, but the asset is found in the state file, then use the asset from state file and return **NOT dirty**. - -6. If none of the parent assets are **dirty**, this asset is not **dirty**, and this asset is not found in the state file, then generate the asset using its parent assets and return **NOT dirty**. - -An example of the Asset: - -```go -type Asset interface { - Dependencies() []Assets - Generate(Parents) error - Name() string -} -``` - -## Writable Asset - -A writable asset is an asset that generates files to write to disk. These files could be for the user to consume as output from installer targets, such as install-config.yml from the InstallConfig asset. Or these files could be used internally by the installer, such as the cert/key files generated by TLS assets. -A writable asset can also be loaded from disk to construct. - -```go -type WritableAsset interface{ - Asset - Files() []File - Load(FileFetcher) (found bool, err error) -} - -type File struct { - Filename string - Data []byte -} - -// FileFetcher is passed to every Loadable asset when implementing -// the Load() function. The FileFetcher enables the Loadable asset -// to read specific file(s) from disk. -type FileFetcher interface { - // FetchByName returns the file with the given name. - FetchByName(string) (*File, error) - // FetchByPattern returns the files whose name match the given glob. - FetchByPattern(*regexp.Regexp) ([]*File, error) -} -``` -After being loaded and consumed by a children asset, the existing on-disk asset will be purged. -E.g. - -```shell -$ openshift-install create install-config -# Generate install-config.yml - -$ openshift-install create manifests -# Generate manifests/ and tectonic/ dir, also remove install-config.yml -``` - -## Target generation - -The installer uses depth-first traversal on the dependency graph, starting at the target nodes, generating all the dependencies of the asset before generating the asset itself. After all the target assets have been generated, the installer outputs the contents of the components of the targets to disk. - -### Dirty detection - -An asset generation reports **DIRTY** when it detects that the components have been modified from previous run. For now the asset is considered dirty when it's on-disk. - -### Example - -```dot -digraph G { - size ="4,4"; - A1; - A2; - A3; - A4; - A5; - A6; - A5 -> {A3, A4}; - A3 -> {A1, A2}; - A6 -> {A1, A2}; -} -``` - -When generating targets **A5 and A6** - -``` -load state; - -A5: (A3, A4) - A3: (A1, A2) - A1: - A1.generate(state) - update state - A2: - A2.generate(state) - update state - set dirty if one of A1/A2 is dirty - A3.generate(state): pass dirty if set - update state - A4: - A4.generate(state) - update state - set dirty if one of A3/A4 is dirty - A5.generate(state): pass dirty if set - update state -A6: (A1, A2) - A1: - reuse - A2: - reuse - set dirty if one of A1/A2 is dirty - A6.generate(state): pass dirty if set - update state -Flush A5 and A6 to disk -``` - -## Dependency graph - -The following graph shows the relationship between the various assets that the installer generates: - -![Image depicting the resource dependency graph](resource_dep.svg) - -This graph is generated from the using the following command: - -```sh -bin/openshift-install graph | dot -Tsvg >docs/design/resource_dep.svg -``` diff --git a/docs/design/resource_dep.svg b/docs/design/resource_dep.svg deleted file mode 100644 index 4543fd3d61d..00000000000 --- a/docs/design/resource_dep.svg +++ /dev/null @@ -1,913 +0,0 @@ - - - - - - -G - - -InstallConfig - -InstallConfig - - -Target Install Config - -Target Install Config - - -InstallConfig->Target Install Config - - - - -Manifests - -Manifests - - -InstallConfig->Manifests - - - - -Networking - -Networking - - -InstallConfig->Networking - - - - -IngressCertKey - -IngressCertKey - - -InstallConfig->IngressCertKey - - - - -MCSCertKey - -MCSCertKey - - -InstallConfig->MCSCertKey - - - - -Tectonic - -Tectonic - - -InstallConfig->Tectonic - - - - -ClusterK8sIO - -ClusterK8sIO - - -InstallConfig->ClusterK8sIO - - - - -Worker - -Worker - - -InstallConfig->Worker - - - - -Master - -Master - - -InstallConfig->Master - - - - -Bootstrap - -Bootstrap - - -InstallConfig->Bootstrap - - - - -APIServerCertKey - -APIServerCertKey - - -InstallConfig->APIServerCertKey - - - - -Admin - -Admin - - -InstallConfig->Admin - - - - -Kubelet - -Kubelet - - -InstallConfig->Kubelet - - - - -TerraformVariables - -TerraformVariables - - -InstallConfig->TerraformVariables - - - - -Cluster - -Cluster - - -InstallConfig->Cluster - - - - -clusterID - -clusterID - - -clusterID->InstallConfig - - - - -emailAddress - -emailAddress - - -emailAddress->InstallConfig - - - - -password - -password - - -password->InstallConfig - - - - -sshPublicKey - -sshPublicKey - - -sshPublicKey->InstallConfig - - - - -baseDomain - -baseDomain - - -baseDomain->InstallConfig - - - - -clusterName - -clusterName - - -clusterName->InstallConfig - - - - -pullSecret - -pullSecret - - -pullSecret->InstallConfig - - - - -platform - -platform - - -platform->InstallConfig - - - - -Templates - -Templates - - -Target Manifest templates - -Target Manifest templates - - -Templates->Target Manifest templates - - - - -KubeCloudConfig - -KubeCloudConfig - - -KubeCloudConfig->Templates - - - - -KubeCloudConfig->Manifests - - - - -MachineConfigServerTLSSecret - -MachineConfigServerTLSSecret - - -MachineConfigServerTLSSecret->Templates - - - - -MachineConfigServerTLSSecret->Manifests - - - - -OpenshiftServiceCertSignerSecret - -OpenshiftServiceCertSignerSecret - - -OpenshiftServiceCertSignerSecret->Templates - - - - -OpenshiftServiceCertSignerSecret->Manifests - - - - -Pull - -Pull - - -Pull->Templates - - - - -Pull->Manifests - - - - -CVOOverrides - -CVOOverrides - - -CVOOverrides->Templates - - - - -CVOOverrides->Manifests - - - - -LegacyCVOOverrides - -LegacyCVOOverrides - - -LegacyCVOOverrides->Templates - - - - -LegacyCVOOverrides->Manifests - - - - -HostEtcdServiceEndpointsKubeSystem - -HostEtcdServiceEndpointsKubeSystem - - -HostEtcdServiceEndpointsKubeSystem->Templates - - - - -HostEtcdServiceEndpointsKubeSystem->Manifests - - - - -KubeSystemConfigmapEtcdServingCA - -KubeSystemConfigmapEtcdServingCA - - -KubeSystemConfigmapEtcdServingCA->Templates - - - - -KubeSystemConfigmapEtcdServingCA->Manifests - - - - -KubeSystemConfigmapRootCA - -KubeSystemConfigmapRootCA - - -KubeSystemConfigmapRootCA->Templates - - - - -KubeSystemConfigmapRootCA->Manifests - - - - -KubeSystemSecretEtcdClient - -KubeSystemSecretEtcdClient - - -KubeSystemSecretEtcdClient->Templates - - - - -KubeSystemSecretEtcdClient->Manifests - - - - -OpenshiftWebConsoleNamespace - -OpenshiftWebConsoleNamespace - - -OpenshiftWebConsoleNamespace->Templates - - - - -OpenshiftWebConsoleNamespace->Manifests - - - - -OpenshiftMachineConfigOperator - -OpenshiftMachineConfigOperator - - -OpenshiftMachineConfigOperator->Templates - - - - -OpenshiftMachineConfigOperator->Manifests - - - - -OpenshiftClusterAPINamespace - -OpenshiftClusterAPINamespace - - -OpenshiftClusterAPINamespace->Templates - - - - -OpenshiftClusterAPINamespace->Manifests - - - - -OpenshiftServiceCertSignerNamespace - -OpenshiftServiceCertSignerNamespace - - -OpenshiftServiceCertSignerNamespace->Templates - - - - -OpenshiftServiceCertSignerNamespace->Manifests - - - - -EtcdServiceKubeSystem - -EtcdServiceKubeSystem - - -EtcdServiceKubeSystem->Templates - - - - -EtcdServiceKubeSystem->Manifests - - - - -HostEtcdServiceKubeSystem - -HostEtcdServiceKubeSystem - - -HostEtcdServiceKubeSystem->Templates - - - - -HostEtcdServiceKubeSystem->Manifests - - - - -BindingDiscovery - -BindingDiscovery - - -BindingDiscovery->Templates - - - - -BindingDiscovery->Tectonic - - - - -CloudCredsSecret - -CloudCredsSecret - - -CloudCredsSecret->Templates - - - - -CloudCredsSecret->Tectonic - - - - -RoleCloudCredsSecretReader - -RoleCloudCredsSecretReader - - -RoleCloudCredsSecretReader->Templates - - - - -RoleCloudCredsSecretReader->Tectonic - - - - -Target Manifests - -Target Manifests - - -Manifests->Target Manifests - - - - -Manifests->Bootstrap - - - - -Networking->Manifests - - - - -Networking->ClusterK8sIO - - - - -RootCA - -RootCA - - -RootCA->Manifests - - - - -EtcdCA - -EtcdCA - - -RootCA->EtcdCA - - - - -KubeCA - -KubeCA - - -RootCA->KubeCA - - - - -ServiceServingCA - -ServiceServingCA - - -RootCA->ServiceServingCA - - - - -RootCA->MCSCertKey - - - - -RootCA->Worker - - - - -RootCA->Master - - - - -RootCA->Bootstrap - - - - -AggregatorCA - -AggregatorCA - - -RootCA->AggregatorCA - - - - -RootCA->Admin - - - - -RootCA->Kubelet - - - - -EtcdCA->Manifests - - - - -EtcdClientCertKey - -EtcdClientCertKey - - -EtcdCA->EtcdClientCertKey - - - - -EtcdCA->Bootstrap - - - - -IngressCertKey->Manifests - - - - -KubeCA->IngressCertKey - - - - -KubeCA->Manifests - - - - -KubeletCertKey - -KubeletCertKey - - -KubeCA->KubeletCertKey - - - - -KubeCA->Bootstrap - - - - -KubeCA->APIServerCertKey - - - - -AdminCertKey - -AdminCertKey - - -KubeCA->AdminCertKey - - - - -ServiceServingCA->Manifests - - - - -ServiceServingCA->Bootstrap - - - - -EtcdClientCertKey->Manifests - - - - -EtcdClientCertKey->Bootstrap - - - - -MCSCertKey->Manifests - - - - -MCSCertKey->Bootstrap - - - - -KubeletCertKey->Manifests - - - - -KubeletCertKey->Bootstrap - - - - -KubeletCertKey->Kubelet - - - - -Tectonic->Target Manifests - - - - -Tectonic->Bootstrap - - - - -ClusterK8sIO->Tectonic - - - - -Worker->Tectonic - - - - -Worker->Worker - - - - -Target Ignition Configs - -Target Ignition Configs - - -Worker->Target Ignition Configs - - - - -Master->Tectonic - - - - -Master->Master - - - - -Master->Target Ignition Configs - - - - -Master->TerraformVariables - - - - -Bootstrap->Target Ignition Configs - - - - -Bootstrap->TerraformVariables - - - - -AggregatorCA->Bootstrap - - - - -APIServerProxyCertKey - -APIServerProxyCertKey - - -AggregatorCA->APIServerProxyCertKey - - - - -APIServerCertKey->Bootstrap - - - - -APIServerProxyCertKey->Bootstrap - - - - -AdminCertKey->Bootstrap - - - - -AdminCertKey->Admin - - - - -ServiceAccountKeyPair - -ServiceAccountKeyPair - - -ServiceAccountKeyPair->Bootstrap - - - - -Admin->Bootstrap - - - - -Target Cluster - -Target Cluster - - -Admin->Target Cluster - - - - -Admin->Cluster - - - - -Kubelet->Bootstrap - - - - -TerraformVariables->Target Cluster - - - - -TerraformVariables->Cluster - - - - -Cluster->Target Cluster - - - - - diff --git a/docs/dev/libvirt-howto.md b/docs/dev/libvirt-howto.md index 5c408a0ff06..98d50d9541b 100644 --- a/docs/dev/libvirt-howto.md +++ b/docs/dev/libvirt-howto.md @@ -285,12 +285,12 @@ Here `OPENSHIFT_INSTALL_LIBVIRT_URI` is the libvirt connection URI which you [pa You'll need a `kubectl` binary on your path and [the kubeconfig from your `cluster` call](../../README.md#kubeconfig). ```sh -export KUBECONFIG="${DIR}/auth/kubeconfig" +export KUBECONFIG="${DIR}/auth/kubeconfig-admin" kubectl get --all-namespaces pods ``` Alternatively, you can run `kubectl` from the bootstrap or master nodes. -Use `scp` or similar to transfer your local `${DIR}/auth/kubeconfig`, then [SSH in](#ssh-access) and run: +Use `scp` or similar to transfer your local `${DIR}/auth/kubeconfig-admin`, then [SSH in](#ssh-access) and run: ```sh export KUBECONFIG=/where/you/put/your/kubeconfig diff --git a/docs/user/assets.svg b/docs/user/assets.svg new file mode 100644 index 00000000000..e4205b7050c --- /dev/null +++ b/docs/user/assets.svg @@ -0,0 +1,2297 @@ + + + + + + +G + + +platform + +platform + + +cluster + +cluster + + +platform->cluster + + + + + + + +metadata.json + +metadata.json + + +platform->metadata.json + + + + + + + +ignition/bootstrap.ign + +ignition/bootstrap.ign + + +platform->ignition/bootstrap.ign + + + + + + + +manifests/cluster-config.yaml + +manifests/cluster-config.yaml + + +platform->manifests/cluster-config.yaml + + + + + + + +machines/master-count + +machines/master-count + + +platform->machines/master-count + + + + + + + +network/node-cidr + +network/node-cidr + + +platform->network/node-cidr + + + + + + + +cluster-name + +cluster-name + + +cluster-name->metadata.json + + + + + + + +aws/metadata.json + +aws/metadata.json + + +cluster-name->aws/metadata.json + + + + + + + +terraform/terraform.tfvars + +terraform/terraform.tfvars + + +cluster-name->terraform/terraform.tfvars + + + + + + + +auth/kubeconfig-kubelet + +auth/kubeconfig-kubelet + + +cluster-name->auth/kubeconfig-kubelet + + + + + + + +auth/kubeconfig-admin + +auth/kubeconfig-admin + + +cluster-name->auth/kubeconfig-admin + + + + + + + +cluster-name->manifests/cluster-config.yaml + + + + + + + +manifests/cluster-ingress-02-config.yaml + +manifests/cluster-ingress-02-config.yaml + + +cluster-name->manifests/cluster-ingress-02-config.yaml + + + + + + + +files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml + +files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml + + +cluster-name->files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml + + + + + + + +tls/machine-config-server.crt + +tls/machine-config-server.crt + + +cluster-name->tls/machine-config-server.crt + + + + + + + +manifests/99_openshift-cluster-api_cluster.yaml + +manifests/99_openshift-cluster-api_cluster.yaml + + +cluster-name->manifests/99_openshift-cluster-api_cluster.yaml + + + + + + + +manifests/aws/99_openshift-cluster-api_master-machines.yaml + +manifests/aws/99_openshift-cluster-api_master-machines.yaml + + +cluster-name->manifests/aws/99_openshift-cluster-api_master-machines.yaml + + + + + + + +ignition/master.ign + +ignition/master.ign + + +cluster-name->ignition/master.ign + + + + + + + +manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml + +manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml + + +cluster-name->manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml + + + + + + + +ignition/worker.ign + +ignition/worker.ign + + +cluster-name->ignition/worker.ign + + + + + + + +tls/api-server.crt + +tls/api-server.crt + + +cluster-name->tls/api-server.crt + + + + + + + +files/usr/local/bin/bootkube.sh + +files/usr/local/bin/bootkube.sh + + +cluster-name->files/usr/local/bin/bootkube.sh + + + + + + + +metadata.json->cluster + + + + + + + +aws/region + +aws/region + + +aws/region->aws/metadata.json + + + + + + + +aws/ami + +aws/ami + + +aws/region->aws/ami + + + + + + + +aws/region->manifests/aws/99_openshift-cluster-api_master-machines.yaml + + + + + + + +aws/zones + +aws/zones + + +aws/region->aws/zones + + + + + + + +aws/region->manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml + + + + + + + +terraform/aws-terraform.auto.tfvars + +terraform/aws-terraform.auto.tfvars + + +aws/region->terraform/aws-terraform.auto.tfvars + + + + + + + +aws/metadata.json->metadata.json + + + + + + + +cluster-id + +cluster-id + + +cluster-id->aws/metadata.json + + + + + + + +cluster-id->terraform/terraform.tfvars + + + + + + + +cluster-id->manifests/cluster-config.yaml + + + + + + + +files/opt/tectonic/manifests/cvo-overrides.yaml + +files/opt/tectonic/manifests/cvo-overrides.yaml + + +cluster-id->files/opt/tectonic/manifests/cvo-overrides.yaml + + + + + + + +files/opt/tectonic/manifests/legacy-cvo-overrides.yaml + +files/opt/tectonic/manifests/legacy-cvo-overrides.yaml + + +cluster-id->files/opt/tectonic/manifests/legacy-cvo-overrides.yaml + + + + + + + +cluster-id->manifests/aws/99_openshift-cluster-api_master-machines.yaml + + + + + + + +cluster-id->manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml + + + + + + + +base-domain + +base-domain + + +base-domain->terraform/terraform.tfvars + + + + + + + +base-domain->auth/kubeconfig-kubelet + + + + + + + +base-domain->auth/kubeconfig-admin + + + + + + + +base-domain->manifests/cluster-config.yaml + + + + + + + +base-domain->manifests/cluster-ingress-02-config.yaml + + + + + + + +base-domain->files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml + + + + + + + +base-domain->tls/machine-config-server.crt + + + + + + + +base-domain->ignition/master.ign + + + + + + + +base-domain->ignition/worker.ign + + + + + + + +base-domain->tls/api-server.crt + + + + + + + +base-domain->files/usr/local/bin/bootkube.sh + + + + + + + +terraform/terraform.tfvars->cluster + + + + + + + +ssh.pub + +ssh.pub + + +ssh.pub->ignition/bootstrap.ign + + + + + + + +ssh.pub->manifests/cluster-config.yaml + + + + + + + +ssh.pub->ignition/master.ign + + + + + + + +ssh.pub->ignition/worker.ign + + + + + + + +ignition/bootstrap.ign->terraform/terraform.tfvars + + + + + + + +systemd/units/bootkube.service + +systemd/units/bootkube.service + + +systemd/units/bootkube.service->ignition/bootstrap.ign + + + + + + + +systemd/units/tectonic.service + +systemd/units/tectonic.service + + +systemd/units/tectonic.service->ignition/bootstrap.ign + + + + + + + +systemd/units/progress.service + +systemd/units/progress.service + + +systemd/units/progress.service->ignition/bootstrap.ign + + + + + + + +systemd/units/kubelet.service + +systemd/units/kubelet.service + + +systemd/units/kubelet.service->ignition/bootstrap.ign + + + + + + + +auth/kubeconfig-kubelet->ignition/bootstrap.ign + + + + + + + +auth/kubeconfig-kubelet->ignition/bootstrap.ign + + + + + + + +tls/kubelet-client.key + +tls/kubelet-client.key + + +tls/kubelet-client.key->auth/kubeconfig-kubelet + + + + + + + +tls/kubelet-client.crt + +tls/kubelet-client.crt + + +tls/kubelet-client.key->tls/kubelet-client.crt + + + + + + + +tls/kubelet-client.key->ignition/bootstrap.ign + + + + + + + +tls/kubelet-client.crt->auth/kubeconfig-kubelet + + + + + + + +tls/kubelet-client.crt->ignition/bootstrap.ign + + + + + + + +tls/kube-ca.key + +tls/kube-ca.key + + +tls/kube-ca.crt + +tls/kube-ca.crt + + +tls/kube-ca.key->tls/kube-ca.crt + + + + + + + +tls/kube-ca.key->tls/kubelet-client.crt + + + + + + + +tls/admin-client.crt + +tls/admin-client.crt + + +tls/kube-ca.key->tls/admin-client.crt + + + + + + + +tls/kube-ca.key->tls/api-server.crt + + + + + + + +tls/api-server-proxy.crt + +tls/api-server-proxy.crt + + +tls/kube-ca.key->tls/api-server-proxy.crt + + + + + + + +tls/kube-ca.key->ignition/bootstrap.ign + + + + + + + +tls/kube-ca.crt->tls/kubelet-client.crt + + + + + + + +tls/kube-ca.crt->tls/admin-client.crt + + + + + + + +tls/kube-ca.crt->tls/api-server.crt + + + + + + + +tls/api-server-chain.crt + +tls/api-server-chain.crt + + +tls/kube-ca.crt->tls/api-server-chain.crt + + + + + + + +tls/kube-ca.crt->tls/api-server-proxy.crt + + + + + + + +tls/kube-ca.crt->ignition/bootstrap.ign + + + + + + + +tls/root-ca.key + +tls/root-ca.key + + +tls/root-ca.crt + +tls/root-ca.crt + + +tls/root-ca.key->tls/root-ca.crt + + + + + + + +tls/root-ca.key->tls/kube-ca.crt + + + + + + + +tls/etcd-ca.crt + +tls/etcd-ca.crt + + +tls/root-ca.key->tls/etcd-ca.crt + + + + + + + +tls/root-ca.key->tls/machine-config-server.crt + + + + + + + +tls/service-serving-ca.crt + +tls/service-serving-ca.crt + + +tls/root-ca.key->tls/service-serving-ca.crt + + + + + + + +tls/aggregator-ca.crt + +tls/aggregator-ca.crt + + +tls/root-ca.key->tls/aggregator-ca.crt + + + + + + + +tls/root-ca.key->ignition/bootstrap.ign + + + + + + + +tls/root-ca.crt->tls/kube-ca.crt + + + + + + + +tls/root-ca.crt->auth/kubeconfig-kubelet + + + + + + + +tls/root-ca.crt->tls/etcd-ca.crt + + + + + + + +tls/root-ca.crt->auth/kubeconfig-admin + + + + + + + +files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml + +files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml + + +tls/root-ca.crt->files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml + + + + + + + +tls/root-ca.crt->tls/machine-config-server.crt + + + + + + + +tls/root-ca.crt->tls/service-serving-ca.crt + + + + + + + +tls/root-ca.crt->ignition/master.ign + + + + + + + +tls/root-ca.crt->ignition/worker.ign + + + + + + + +tls/root-ca.crt->tls/aggregator-ca.crt + + + + + + + +tls/root-ca.crt->ignition/bootstrap.ign + + + + + + + +files/etc/motd + +files/etc/motd + + +files/etc/motd->ignition/bootstrap.ign + + + + + + + +tls/etcd-client.key + +tls/etcd-client.key + + +tls/etcd-client.crt + +tls/etcd-client.crt + + +tls/etcd-client.key->tls/etcd-client.crt + + + + + + + +files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml + +files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml + + +tls/etcd-client.key->files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml + + + + + + + +tls/etcd-client.key->ignition/bootstrap.ign + + + + + + + +tls/etcd-client.crt->ignition/bootstrap.ign + + + + + + + +tls/etcd-client.crt->files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml + + + + + + + +tls/etcd-client.crt->ignition/bootstrap.ign + + + + + + + +tls/etcd-ca.key + +tls/etcd-ca.key + + +tls/etcd-ca.key->tls/etcd-ca.crt + + + + + + + +tls/etcd-ca.key->tls/etcd-client.crt + + + + + + + +tls/etcd-ca.key->ignition/bootstrap.ign + + + + + + + +tls/etcd-ca.crt->tls/etcd-client.crt + + + + + + + +files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml + +files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml + + +tls/etcd-ca.crt->files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml + + + + + + + +tls/etcd-ca.crt->ignition/bootstrap.ign + + + + + + + +files/home/core/.bash_history + +files/home/core/.bash_history + + +files/home/core/.bash_history->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/bootkube-config-overrides/kube-apiserver-config-overrides.yaml + +files/opt/tectonic/bootkube-config-overrides/kube-apiserver-config-overrides.yaml + + +files/opt/tectonic/bootkube-config-overrides/kube-apiserver-config-overrides.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/bootkube-config-overrides/kube-controller-manager-config-overrides.yaml + +files/opt/tectonic/bootkube-config-overrides/kube-controller-manager-config-overrides.yaml + + +files/opt/tectonic/bootkube-config-overrides/kube-controller-manager-config-overrides.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/bootkube-config-overrides/kube-scheduler-config-overrides.yaml + +files/opt/tectonic/bootkube-config-overrides/kube-scheduler-config-overrides.yaml + + +files/opt/tectonic/bootkube-config-overrides/kube-scheduler-config-overrides.yaml->ignition/bootstrap.ign + + + + + + + +auth/kubeconfig-admin->ignition/bootstrap.ign + + + + + + + +tls/admin-client.key + +tls/admin-client.key + + +tls/admin-client.key->auth/kubeconfig-admin + + + + + + + +tls/admin-client.key->tls/admin-client.crt + + + + + + + +tls/admin-client.key->ignition/bootstrap.ign + + + + + + + +tls/admin-client.crt->auth/kubeconfig-admin + + + + + + + +tls/admin-client.crt->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/03-openshift-web-console-namespace.yaml + +files/opt/tectonic/manifests/03-openshift-web-console-namespace.yaml + + +files/opt/tectonic/manifests/03-openshift-web-console-namespace.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/04-openshift-machine-config-operator.yaml + +files/opt/tectonic/manifests/04-openshift-machine-config-operator.yaml + + +files/opt/tectonic/manifests/04-openshift-machine-config-operator.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/05-openshift-cluster-api-namespace.yaml + +files/opt/tectonic/manifests/05-openshift-cluster-api-namespace.yaml + + +files/opt/tectonic/manifests/05-openshift-cluster-api-namespace.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/09-openshift-service-cert-signer-namespace.yaml + +files/opt/tectonic/manifests/09-openshift-service-cert-signer-namespace.yaml + + +files/opt/tectonic/manifests/09-openshift-service-cert-signer-namespace.yaml->ignition/bootstrap.ign + + + + + + + +admin/email + +admin/email + + +admin/email->manifests/cluster-config.yaml + + + + + + + +manifests/cluster-config.yaml->ignition/bootstrap.ign + + + + + + + +manifests/cluster-network-02-config.yaml + +manifests/cluster-network-02-config.yaml + + +manifests/cluster-config.yaml->manifests/cluster-network-02-config.yaml + + + + + + + +admin/password + +admin/password + + +admin/password->manifests/cluster-config.yaml + + + + + + + +network/cluster-cidr + +network/cluster-cidr + + +network/cluster-cidr->manifests/cluster-config.yaml + + + + + + + +network/host-subnet-length + +network/host-subnet-length + + +network/host-subnet-length->manifests/cluster-config.yaml + + + + + + + +network/host-subnet-length->manifests/cluster-network-02-config.yaml + + + + + + + +network/service-cidr + +network/service-cidr + + +network/service-cidr->manifests/cluster-config.yaml + + + + + + + +network/service-cidr->manifests/99_openshift-cluster-api_cluster.yaml + + + + + + + +network/service-cidr->tls/api-server.crt + + + + + + + +pull-secret + +pull-secret + + +pull-secret->manifests/cluster-config.yaml + + + + + + + +manifests/pull.json + +manifests/pull.json + + +pull-secret->manifests/pull.json + + + + + + + +files/opt/tectonic/manifests/cluster-ingress-01-crd.yaml + +files/opt/tectonic/manifests/cluster-ingress-01-crd.yaml + + +files/opt/tectonic/manifests/cluster-ingress-01-crd.yaml->ignition/bootstrap.ign + + + + + + + +manifests/cluster-ingress-02-config.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/cluster-network-01-crd.yaml + +files/opt/tectonic/manifests/cluster-network-01-crd.yaml + + +files/opt/tectonic/manifests/cluster-network-01-crd.yaml->ignition/bootstrap.ign + + + + + + + +manifests/cluster-network-02-config.yaml->ignition/bootstrap.ign + + + + + + + +manifests/cluster-network-02-config.yaml->manifests/99_openshift-cluster-api_cluster.yaml + + + + + + + +files/opt/tectonic/manifests/cvo-overrides.yaml.template + +files/opt/tectonic/manifests/cvo-overrides.yaml.template + + +files/opt/tectonic/manifests/cvo-overrides.yaml.template->files/opt/tectonic/manifests/cvo-overrides.yaml + + + + + + + +files/opt/tectonic/manifests/cvo-overrides.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/etcd-service.yaml + +files/opt/tectonic/manifests/etcd-service.yaml + + +files/opt/tectonic/manifests/etcd-service.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/host-etcd-service.yaml + +files/opt/tectonic/manifests/host-etcd-service.yaml + + +files/opt/tectonic/manifests/host-etcd-service.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml.template + +files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml.template + + +files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml.template->files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml + + + + + + + +files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml->ignition/bootstrap.ign + + + + + + + +machines/master-count->files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml + + + + + + + +machines/master-count->manifests/aws/99_openshift-cluster-api_master-machines.yaml + + + + + + + +machines/master-count->files/usr/local/bin/bootkube.sh + + + + + + + +machines/master-count->terraform/terraform.tfvars + + + + + + + +files/opt/tectonic/manifests/legacy-cvo-overrides.yaml.template + +files/opt/tectonic/manifests/legacy-cvo-overrides.yaml.template + + +files/opt/tectonic/manifests/legacy-cvo-overrides.yaml.template->files/opt/tectonic/manifests/legacy-cvo-overrides.yaml + + + + + + + +files/opt/tectonic/manifests/legacy-cvo-overrides.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/kube-cloud-config.yaml + +files/opt/tectonic/manifests/kube-cloud-config.yaml + + +files/opt/tectonic/manifests/kube-cloud-config.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml.template + +files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml.template + + +files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml.template->files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml + + + + + + + +files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml.template + +files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml.template + + +files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml.template->files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml + + + + + + + +files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml.template + +files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml.template + + +files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml.template->files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml + + + + + + + +files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml.template + +files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml.template + + +files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml + +files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml + + +files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml.template->files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml + + + + + + + +files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml->ignition/bootstrap.ign + + + + + + + +tls/machine-config-server.key + +tls/machine-config-server.key + + +tls/machine-config-server.key->tls/machine-config-server.crt + + + + + + + +tls/machine-config-server.key->files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml + + + + + + + +tls/machine-config-server.key->ignition/bootstrap.ign + + + + + + + +tls/machine-config-server.crt->files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml + + + + + + + +tls/machine-config-server.crt->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml.template + +files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml.template + + +files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml + +files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml + + +files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml.template->files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml + + + + + + + +files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml->ignition/bootstrap.ign + + + + + + + +tls/service-serving-ca.key + +tls/service-serving-ca.key + + +tls/service-serving-ca.key->tls/service-serving-ca.crt + + + + + + + +tls/service-serving-ca.key->files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml + + + + + + + +tls/service-serving-ca.key->ignition/bootstrap.ign + + + + + + + +tls/service-serving-ca.crt->files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml + + + + + + + +tls/service-serving-ca.crt->ignition/bootstrap.ign + + + + + + + +manifests/pull.json->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/tectonic/99_binding-discovery.yaml + +files/opt/tectonic/tectonic/99_binding-discovery.yaml + + +files/opt/tectonic/tectonic/99_binding-discovery.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml.template + +files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml.template + + +files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml + +files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml + + +files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml.template->files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml + + + + + + + +files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml->ignition/bootstrap.ign + + + + + + + +manifests/99_openshift-cluster-api_cluster.yaml->ignition/bootstrap.ign + + + + + + + +aws/ami->manifests/aws/99_openshift-cluster-api_master-machines.yaml + + + + + + + +aws/ami->manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml + + + + + + + +aws/ami->terraform/aws-terraform.auto.tfvars + + + + + + + +manifests/aws/99_openshift-cluster-api_master-machines.yaml->ignition/bootstrap.ign + + + + + + + +aws/instance-type + +aws/instance-type + + +aws/instance-type->manifests/aws/99_openshift-cluster-api_master-machines.yaml + + + + + + + +aws/instance-type->manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml + + + + + + + +aws/instance-type->terraform/aws-terraform.auto.tfvars + + + + + + + +aws/user-tags + +aws/user-tags + + +aws/user-tags->manifests/aws/99_openshift-cluster-api_master-machines.yaml + + + + + + + +aws/user-tags->manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml + + + + + + + +aws/user-tags->terraform/aws-terraform.auto.tfvars + + + + + + + +aws/zones->manifests/aws/99_openshift-cluster-api_master-machines.yaml + + + + + + + +aws/zones->manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml + + + + + + + +manifests/99_openshift-cluster-api_master-user-data-secret.yaml + +manifests/99_openshift-cluster-api_master-user-data-secret.yaml + + +ignition/master.ign->manifests/99_openshift-cluster-api_master-user-data-secret.yaml + + + + + + + +ignition/master.ign->terraform/terraform.tfvars + + + + + + + +manifests/99_openshift-cluster-api_master-user-data-secret.yaml->ignition/bootstrap.ign + + + + + + + +manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml->ignition/bootstrap.ign + + + + + + + +manifests/99_openshift-cluster-api_worker-user-data-secret.yaml + +manifests/99_openshift-cluster-api_worker-user-data-secret.yaml + + +ignition/worker.ign->manifests/99_openshift-cluster-api_worker-user-data-secret.yaml + + + + + + + +manifests/99_openshift-cluster-api_worker-user-data-secret.yaml->ignition/bootstrap.ign + + + + + + + +files/opt/tectonic/tectonic/aws/99_role-cloud-creds-secret-reader.yaml + +files/opt/tectonic/tectonic/aws/99_role-cloud-creds-secret-reader.yaml + + +files/opt/tectonic/tectonic/aws/99_role-cloud-creds-secret-reader.yaml->ignition/bootstrap.ign + + + + + + + +tls/aggregator-ca.key + +tls/aggregator-ca.key + + +tls/aggregator-ca.key->tls/aggregator-ca.crt + + + + + + + +tls/aggregator-ca.key->ignition/bootstrap.ign + + + + + + + +tls/aggregator-ca.crt->ignition/bootstrap.ign + + + + + + + +tls/api-server.key + +tls/api-server.key + + +tls/api-server.key->tls/api-server.crt + + + + + + + +tls/api-server.key->ignition/bootstrap.ign + + + + + + + +tls/api-server.crt->tls/api-server-chain.crt + + + + + + + +tls/api-server-chain.crt->ignition/bootstrap.ign + + + + + + + +tls/api-server-proxy.key + +tls/api-server-proxy.key + + +tls/api-server-proxy.key->tls/api-server-proxy.crt + + + + + + + +tls/api-server-proxy.key->ignition/bootstrap.ign + + + + + + + +tls/api-server-proxy.crt->ignition/bootstrap.ign + + + + + + + +tls/service-account.key + +tls/service-account.key + + +tls/service-account.key->ignition/bootstrap.ign + + + + + + + +files/usr/local/bin/bootkube.sh.template + +files/usr/local/bin/bootkube.sh.template + + +files/usr/local/bin/bootkube.sh.template->files/usr/local/bin/bootkube.sh + + + + + + + +files/usr/local/bin/bootkube.sh->ignition/bootstrap.ign + + + + + + + +image/etcd-cert-signer + +image/etcd-cert-signer + + +image/etcd-cert-signer->files/usr/local/bin/bootkube.sh + + + + + + + +image/etcdctl + +image/etcdctl + + +image/etcdctl->files/usr/local/bin/bootkube.sh + + + + + + + +image/release + +image/release + + +image/release->files/usr/local/bin/bootkube.sh + + + + + + + +image/bootkube + +image/bootkube + + +image/bootkube->files/usr/local/bin/bootkube.sh + + + + + + + +files/usr/local/bin/report-progress.sh + +files/usr/local/bin/report-progress.sh + + +files/usr/local/bin/report-progress.sh->ignition/bootstrap.ign + + + + + + + +files/usr/local/bin/tectonic.sh + +files/usr/local/bin/tectonic.sh + + +files/usr/local/bin/tectonic.sh->ignition/bootstrap.ign + + + + + + + +terraform/aws-terraform.auto.tfvars->cluster + + + + + + + +aws/external-vpc-id + +aws/external-vpc-id + + +aws/external-vpc-id->terraform/aws-terraform.auto.tfvars + + + + + + + +network/node-cidr->terraform/aws-terraform.auto.tfvars + + + + + + + + diff --git a/docs/user/overview.md b/docs/user/overview.md index da51eb05bc5..adde4e459ba 100644 --- a/docs/user/overview.md +++ b/docs/user/overview.md @@ -4,7 +4,57 @@ The OpenShift Installer is designed to help users, ranging from novices to exper In [supported environments](#supported-environments), the installer is also capable of provisioning the underlying infrastructure for the cluster. It is recommended that most users make use of this functionality in order to avoid having to provision their own infrastructure. In unsupported environments or scenarios in which installer-created infrastructure would be incompatible, the installer can stop short of creating the infrastructure, and allow the user to provision their own infrastructure using the cluster assets generated by the installer. -## Cluster Installation Process +## Lifecycle + +### Asset Generation + +The first installation phase is completely local, as the installer generates the assets that will be used to create the cluster resources. +The installer maintains an asset graph tracking dependency relations between assets. + +![Asset dependencies](assets.svg) + +This graph is generated with: + +```sh +openshift-install graph | dot -Tsvg >docs/user/assets.svg +``` + +This is similar to other tools which operate on a dependency graph (e.g. `make` and `systemd`). +The root target is `cluster`, and once that target is built, there is enough information to move on to [installation](#installation). + +Running: + +```sh +openshift-install --dir example create assets +``` + +will render the asset graph into the `example` directory. +The installer will pick reasonable defaults where possible, and prompt the user for assets where we cannot find a reasonable default for all users. +To avoid being prompted, you can pass your choices into the installer using [environment variables](environment-variables.md). + +Each asset is represented by two files, one in the asset directory itself, and another in its `.state` subdirectory. +The entry in the `.state` subdirectory holds the rendered JSON `Asset` structure, containing the asset's raw data along with associated metadata (parent references, etc.). +The entry in the asset directory holds the asset's raw data without the associated metadata. + +The installer uses the JSON state files to preserve asset information between invocations, but editing the JSON payload is an awkward way to make manual adjustments. +The raw data files are provided as a more convenient avenue for user adjustments. + +To adjust a particular asset, you can edit the raw data file and rerun `openshift-install ... create assets`. +For example: + +```sh +openshift-install --dir example create assets +sed -i 's|^\(OPENSHIFT_HYPERSHIFT_IMAGE\)=.*|\1=quay.io/your/hypershift:123|' example/bootkube.sh +openshift-install --dir example create assets +``` + +will rebuild the asset graph using your custom HyperShift image in `bootkube.sh`. + +You can continue to edit and run `openshift-install ... create assets` as many times as you like. +To return to the installer's built-in logic for an asset, remove its `.state/{slugged-asset-name}` file (it doesn't matter if you also remove the associated raw data file or not). +When you are satisfied with the asset store, move on to installation. + +### Installation OpenShift is unique in that its management extends all the way down to the operating system itself. Every machine boots with a configuration which references resources hosted in the cluster its joining. This allows the cluster to manage itself as updates are applied. A downside to this approach, however, is that new clusters have no way of starting without external help - every machine in the to-be-created cluster is waiting on the to-be-created cluster. @@ -25,31 +75,19 @@ The result of this bootstrapping process is a fully running OpenShift cluster. T [ignition]: https://github.com/coreos/ignition/blob/master/doc/getting-started.md -## Key Concepts - -While striving to remain simple and easy to use, the installer allows many aspects of the clusters it creates to be customized. It is helpful to understand certain key concepts before attempting to customize the installation. - -### Targets - -The OpenShift Installer operates on the notion of creating and destroying targets. Similar to other tools which operate on a graph of dependencies (e.g. make, systemd), each target represents a subset of the dependencies in the graph. The main target in the installer creates a cluster, but the other targets allow the user to interrupt this process and consume or modify the intermediate artifacts (e.g. the Kubernetes manifests that will be installed into the cluster). Only the immediate dependencies of a target are written to disk by the installer, but the installer can be invoked [multiple times](#multiple-invocations). - -The following targets can be created by the installer: - -- `install-config` - The install config contains the main parameters for the installation process. This configuration provides the user with more options than the interactive prompts and comes pre-populated with default values. -- `manifests` - This target outputs all of the Kubernetes manifests that will be installed on the cluster. -- `ignition-configs` - These are the three Ignition Configs for the bootstrap, master, and worker machines. -- `cluster` - This target provisions the cluster by invoking a locally-installed Terraform. - -The following targets can be destroyed by the installer: +### Teardown -- `cluster` - This destroys the created cluster and its associated infrastructure. -- `bootstrap` - This destroys the bootstrap infrastructure. +You can manually remove the bootstrap infrastructure with: -### Multiple Invocations +```sh +openshift-install ... destroy bootstrap +``` -In order to allow users to customize their installation, the installer can be invoked multiple times. The state is stored in a hidden file in the target directory and contains all of the intermediate artifacts. This allows the installer to pause during the installation and wait for the user to modify intermediate artifacts. +And you can remove the entire cluster with: -For example, if changes to the install config were desired (e.g. the number of worker machines to create), the user would first invoke the installer with the `install-config` target: `openshift-install create install-config`. After prompting the user for the base parameters, the installer writes the install config into the target directory. The user can then make the desired modifications to the install config and invoke the installer with the `cluster` target: `openshift-install create cluster`. The installer will consume the install config from disk, removing it from the target directory, and proceed to create a cluster using the provided configuration. +```sh +openshift-install ... destroy cluster +``` ## Supported Environments diff --git a/docs/user/troubleshooting.md b/docs/user/troubleshooting.md index 76bff13eeab..d71c387d41a 100644 --- a/docs/user/troubleshooting.md +++ b/docs/user/troubleshooting.md @@ -11,7 +11,7 @@ The installer doesn't provision worker nodes directly, like it does with master The status of the Machine API Operator can be checked by running the following command from the machine used to install the cluster: ```sh -oc --config=${INSTALL_DIR}/auth/kubeconfig --namespace=openshift-cluster-api get pods +oc --config=${INSTALL_DIR}/auth/kubeconfig-admin --namespace=openshift-cluster-api get pods ``` If the API is unavailable, that will need to be [investigated first](#kubernetes-api-is-unavailable). @@ -27,7 +27,7 @@ machine-api-operator-7894d8f85-lq2ts 1/1 Running 0 The logs for the machine-controller container within the `clusterapi-manager-controllers` pod need to be checked to determine why the workers haven't been created. That can be done with the following (the exact name of the pod will need to be substituted): ```sh -oc --config=${INSTALL_DIR}/auth/kubeconfig --namespace=openshift-cluster-api logs clusterapi-manager-controllers-774dc4557-nx5xq --container=machine-controller +oc --config=${INSTALL_DIR}/auth/kubeconfig-admin --namespace=openshift-cluster-api logs clusterapi-manager-controllers-774dc4557-nx5xq --container=machine-controller ``` ### Kubernetes API is Unavailable @@ -100,7 +100,7 @@ For other generic troubleshooting, see [the Kubernetes documentation][kubernetes This is the generic version of the [*No Worker Nodes Created*](#no-worker-nodes-created) troubleshooting procedure. ```console -$ oc --config=${INSTALL_DIR}/auth/kubeconfig get pods --all-namespaces +$ oc --config=${INSTALL_DIR}/auth/kubeconfig-admin get pods --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE kube-system etcd-member-wking-master-0 1/1 Running 0 46s openshift-cluster-api machine-api-operator-586bd5b6b9-bxq9s 0/1 Pending 0 1m @@ -111,7 +111,7 @@ openshift-cluster-dns-operator cluster-dns-operator-7f4f6866b9-kzth5 You can investigate any pods listed as `Pending` with: ```sh -oc --config=${INSTALL_DIR}/auth/kubeconfig describe -n openshift-cluster-api pod/machine-api-operator-586bd5b6b9-bxq9s +oc --config=${INSTALL_DIR}/auth/kubeconfig-admin describe -n openshift-cluster-api pod/machine-api-operator-586bd5b6b9-bxq9s ``` which may show events with warnings like: @@ -123,7 +123,7 @@ Warning FailedScheduling 1m (x10 over 1m) default-scheduler 0/1 nodes are av You can get the image used for a crashing pod with: ```console -$ oc --config=${INSTALL_DIR}/auth/kubeconfig get pod -o "jsonpath={range .status.containerStatuses[*]}{.name}{'\t'}{.state}{'\t'}{.image}{'\n'}{end}" -n openshift-cluster-api machine-api-operator-586bd5b6b9-bxq9s +$ oc --config=${INSTALL_DIR}/auth/kubeconfig-admin get pod -o "jsonpath={range .status.containerStatuses[*]}{.name}{'\t'}{.state}{'\t'}{.image}{'\n'}{end}" -n openshift-cluster-api machine-api-operator-586bd5b6b9-bxq9s machine-api-operator map[running:map[startedAt:2018-11-13T19:04:50Z]] registry.svc.ci.openshift.org/openshift/origin-v4.0-20181113175638@sha256:c97d0b53b98d07053090f3c9563cfd8277587ce94f8c2400b33e246aa08332c7 ``` diff --git a/pkg/asset/asset.go b/pkg/asset/asset.go deleted file mode 100644 index b08652b0d2a..00000000000 --- a/pkg/asset/asset.go +++ /dev/null @@ -1,97 +0,0 @@ -package asset - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Asset used to install OpenShift. -type Asset interface { - // Dependencies returns the assets upon which this asset directly depends. - Dependencies() []Asset - - // Generate generates this asset given the states of its parent assets. - Generate(Parents) error - - // Name returns the human-friendly name of the asset. - Name() string -} - -// WritableAsset is an Asset that has files that can be written to disk. -// It can also be loaded from disk. -type WritableAsset interface { - Asset - - // Files returns the files to write. - Files() []*File - - // Load returns the on-disk asset if it exists. - // The asset object should be changed only when it's loaded successfully. - Load(FileFetcher) (found bool, err error) -} - -// File is a file for an Asset. -type File struct { - // Filename is the name of the file. - Filename string - // Data is the contents of the file. - Data []byte -} - -// PersistToFile writes all of the files of the specified asset into the specified -// directory. -func PersistToFile(asset WritableAsset, directory string) error { - for _, f := range asset.Files() { - path := filepath.Join(directory, f.Filename) - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return errors.Wrap(err, "failed to create dir") - } - if err := ioutil.WriteFile(path, f.Data, 0644); err != nil { - return errors.Wrap(err, "failed to write file") - } - } - return nil -} - -// deleteAssetFromDisk removes all the files for asset from disk. -// this is function is not safe for calling concurrently on the same directory. -func deleteAssetFromDisk(asset WritableAsset, directory string) error { - logrus.Debugf("Purging asset %q from disk", asset.Name()) - for _, f := range asset.Files() { - path := filepath.Join(directory, f.Filename) - if err := os.Remove(path); err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "failed to remove file") - } - - dir := filepath.Dir(path) - ok, err := isDirEmpty(dir) - if err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "failed to read directory") - } - if ok { - if err := os.Remove(dir); err != nil { - return errors.Wrap(err, "failed to remove directory") - } - } - } - return nil -} - -func isDirEmpty(name string) (bool, error) { - f, err := os.Open(name) - if err != nil { - return false, err - } - defer f.Close() - - _, err = f.Readdirnames(1) // Or f.Readdir(1) - if err == io.EOF { - return true, nil - } - return false, err // Either not empty or error, suits both cases -} diff --git a/pkg/asset/asset_test.go b/pkg/asset/asset_test.go deleted file mode 100644 index 18cbf499c4e..00000000000 --- a/pkg/asset/asset_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package asset - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" -) - -type persistAsset struct{} - -func (a *persistAsset) Name() string { - return "persist-asset" -} - -func (a *persistAsset) Dependencies() []Asset { - return []Asset{} -} - -func (a *persistAsset) Generate(Parents) error { - return nil -} - -type writablePersistAsset struct { - persistAsset - FileList []*File -} - -func (a *writablePersistAsset) Files() []*File { - return a.FileList -} - -func (a *writablePersistAsset) Load(FileFetcher) (bool, error) { - return false, nil -} - -func TestPersistToFile(t *testing.T) { - cases := []struct { - name string - filenames []string - }{ - { - name: "no files", - filenames: []string{}, - }, - { - name: "single file", - filenames: []string{"file1"}, - }, - { - name: "multiple files", - filenames: []string{"file1", "file2"}, - }, - { - name: "new directory", - filenames: []string{"dir1/file1"}, - }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - dir, err := ioutil.TempDir("", "TestStatePersistToFile") - if err != nil { - t.Skipf("could not create temporary directory: %v", err) - } - defer os.RemoveAll(dir) - - asset := &writablePersistAsset{ - FileList: make([]*File, len(tc.filenames)), - } - expectedFiles := map[string][]byte{} - for i, filename := range tc.filenames { - data := []byte(fmt.Sprintf("data%d", i)) - asset.FileList[i] = &File{ - Filename: filename, - Data: data, - } - expectedFiles[filepath.Join(dir, filename)] = data - } - err = PersistToFile(asset, dir) - assert.NoError(t, err, "unexpected error persisting state to file") - verifyFilesCreated(t, dir, expectedFiles) - }) - } -} - -func verifyFilesCreated(t *testing.T, dir string, expectedFiles map[string][]byte) { - dirContents, err := ioutil.ReadDir(dir) - assert.NoError(t, err, "could not read contents of directory %q", dir) - for _, fileinfo := range dirContents { - fullPath := filepath.Join(dir, fileinfo.Name()) - if fileinfo.IsDir() { - verifyFilesCreated(t, fullPath, expectedFiles) - } else { - expectedData, fileExpected := expectedFiles[fullPath] - if !fileExpected { - t.Errorf("Unexpected file created: %v", fullPath) - continue - } - actualData, err := ioutil.ReadFile(fullPath) - assert.NoError(t, err, "unexpected error reading created file %q", fullPath) - assert.Equal(t, expectedData, actualData, "unexpected data in created file %q", fullPath) - delete(expectedFiles, fullPath) - } - } - for f := range expectedFiles { - t.Errorf("Expected file %q not created", f) - } -} diff --git a/pkg/asset/cluster/aws/aws.go b/pkg/asset/cluster/aws/aws.go deleted file mode 100644 index bce9d4ce71d..00000000000 --- a/pkg/asset/cluster/aws/aws.go +++ /dev/null @@ -1,24 +0,0 @@ -// Package aws extracts AWS metadata from install configurations. -package aws - -import ( - "fmt" - - "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/aws" -) - -// Metadata converts an install configuration to AWS metadata. -func Metadata(config *types.InstallConfig) *aws.Metadata { - return &aws.Metadata{ - Region: config.Platform.AWS.Region, - Identifier: []map[string]string{ - { - "tectonicClusterID": config.ClusterID, - }, - { - fmt.Sprintf("kubernetes.io/cluster/%s", config.ObjectMeta.Name): "owned", - }, - }, - } -} diff --git a/pkg/asset/cluster/cluster.go b/pkg/asset/cluster/cluster.go deleted file mode 100644 index af0d65a2782..00000000000 --- a/pkg/asset/cluster/cluster.go +++ /dev/null @@ -1,157 +0,0 @@ -package cluster - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/cluster/aws" - "github.com/openshift/installer/pkg/asset/cluster/libvirt" - "github.com/openshift/installer/pkg/asset/cluster/openstack" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/kubeconfig" - "github.com/openshift/installer/pkg/terraform" - "github.com/openshift/installer/pkg/types" -) - -const ( - // metadataFileName is name of the file where clustermetadata is stored. - metadataFileName = "metadata.json" -) - -// Cluster uses the terraform executable to launch a cluster -// with the given terraform tfvar and generated templates. -type Cluster struct { - FileList []*asset.File -} - -var _ asset.WritableAsset = (*Cluster)(nil) - -// Name returns the human-friendly name of the asset. -func (c *Cluster) Name() string { - return "Cluster" -} - -// Dependencies returns the direct dependency for launching -// the cluster. -func (c *Cluster) Dependencies() []asset.Asset { - return []asset.Asset{ - &installconfig.InstallConfig{}, - &TerraformVariables{}, - &kubeconfig.Admin{}, - } -} - -// Generate launches the cluster and generates the terraform state file on disk. -func (c *Cluster) Generate(parents asset.Parents) (err error) { - installConfig := &installconfig.InstallConfig{} - terraformVariables := &TerraformVariables{} - adminKubeconfig := &kubeconfig.Admin{} - parents.Get(installConfig, terraformVariables, adminKubeconfig) - - // Copy the terraform.tfvars to a temp directory where the terraform will be invoked within. - tmpDir, err := ioutil.TempDir("", "openshift-install-") - if err != nil { - return errors.Wrap(err, "failed to create temp dir for terraform execution") - } - defer os.RemoveAll(tmpDir) - - terraformVariablesFile := terraformVariables.Files()[0] - if err := ioutil.WriteFile(filepath.Join(tmpDir, terraformVariablesFile.Filename), terraformVariablesFile.Data, 0600); err != nil { - return errors.Wrap(err, "failed to write terraform.tfvars file") - } - - metadata := &types.ClusterMetadata{ - ClusterName: installConfig.Config.ObjectMeta.Name, - } - - defer func() { - if data, err2 := json.Marshal(metadata); err2 == nil { - c.FileList = append(c.FileList, &asset.File{ - Filename: metadataFileName, - Data: data, - }) - } else { - err2 = errors.Wrap(err2, "failed to Marshal ClusterMetadata") - if err == nil { - err = err2 - } else { - logrus.Error(err2) - } - } - // serialize metadata and stuff it into c.FileList - }() - - switch { - case installConfig.Config.Platform.AWS != nil: - metadata.ClusterPlatformMetadata.AWS = aws.Metadata(installConfig.Config) - case installConfig.Config.Platform.OpenStack != nil: - metadata.ClusterPlatformMetadata.OpenStack = openstack.Metadata(installConfig.Config) - case installConfig.Config.Platform.Libvirt != nil: - metadata.ClusterPlatformMetadata.Libvirt = libvirt.Metadata(installConfig.Config) - default: - return fmt.Errorf("no known platform") - } - - logrus.Infof("Using Terraform to create cluster...") - stateFile, err := terraform.Apply(tmpDir, installConfig.Config.Platform.Name()) - if err != nil { - err = errors.Wrap(err, "failed to run terraform") - } - - data, err2 := ioutil.ReadFile(stateFile) - if err2 == nil { - c.FileList = append(c.FileList, &asset.File{ - Filename: terraform.StateFileName, - Data: data, - }) - } else { - if err == nil { - err = err2 - } else { - logrus.Errorf("Failed to read tfstate: %v", err2) - } - } - - // TODO(yifan): Use the kubeconfig to verify the cluster is up. - return err -} - -// Files returns the FileList generated by the asset. -func (c *Cluster) Files() []*asset.File { - return c.FileList -} - -// Load returns error if the tfstate file is already on-disk, because we want to -// prevent user from accidentally re-launching the cluster. -func (c *Cluster) Load(f asset.FileFetcher) (found bool, err error) { - _, err = f.FetchByName(terraform.StateFileName) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - return true, fmt.Errorf("%q already exists. There may already be a running cluster", terraform.StateFileName) -} - -// LoadMetadata loads the cluster metadata from an asset directory. -func LoadMetadata(dir string) (cmetadata *types.ClusterMetadata, err error) { - raw, err := ioutil.ReadFile(filepath.Join(dir, metadataFileName)) - if err != nil { - return nil, errors.Wrapf(err, "failed to read %s file", metadataFileName) - } - - if err = json.Unmarshal(raw, &cmetadata); err != nil { - return nil, errors.Wrapf(err, "failed to Unmarshal data from %s file to types.ClusterMetadata", metadataFileName) - } - - return cmetadata, err -} diff --git a/pkg/asset/cluster/doc.go b/pkg/asset/cluster/doc.go deleted file mode 100644 index 15f645fdc42..00000000000 --- a/pkg/asset/cluster/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package cluster contains asset targets that generates the terraform file, -// prepare the infra, and bootstrap the cluster. -package cluster diff --git a/pkg/asset/cluster/libvirt/libvirt.go b/pkg/asset/cluster/libvirt/libvirt.go deleted file mode 100644 index be57678fb8c..00000000000 --- a/pkg/asset/cluster/libvirt/libvirt.go +++ /dev/null @@ -1,14 +0,0 @@ -// Package libvirt extracts libvirt metadata from install configurations. -package libvirt - -import ( - "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/libvirt" -) - -// Metadata converts an install configuration to libvirt metadata. -func Metadata(config *types.InstallConfig) *libvirt.Metadata { - return &libvirt.Metadata{ - URI: config.Platform.Libvirt.URI, - } -} diff --git a/pkg/asset/cluster/openstack/openstack.go b/pkg/asset/cluster/openstack/openstack.go deleted file mode 100644 index 6fde9dd5c2f..00000000000 --- a/pkg/asset/cluster/openstack/openstack.go +++ /dev/null @@ -1,19 +0,0 @@ -// Package openstack extracts OpenStack metadata from install -// configurations. -package openstack - -import ( - "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/openstack" -) - -// Metadata converts an install configuration to OpenStack metadata. -func Metadata(config *types.InstallConfig) *openstack.Metadata { - return &openstack.Metadata{ - Region: config.Platform.OpenStack.Region, - Cloud: config.Platform.OpenStack.Cloud, - Identifier: map[string]string{ - "tectonicClusterID": config.ClusterID, - }, - } -} diff --git a/pkg/asset/cluster/tfvars.go b/pkg/asset/cluster/tfvars.go deleted file mode 100644 index f8dd936fa8d..00000000000 --- a/pkg/asset/cluster/tfvars.go +++ /dev/null @@ -1,85 +0,0 @@ -package cluster - -import ( - "os" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/ignition/bootstrap" - "github.com/openshift/installer/pkg/asset/ignition/machine" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/tfvars" - "github.com/pkg/errors" -) - -const ( - // TfVarsFileName is the filename for Terraform variables. - TfVarsFileName = "terraform.tfvars" - tfvarsAssetName = "Terraform Variables" -) - -// TerraformVariables depends on InstallConfig and -// Ignition to generate the terrafor.tfvars. -type TerraformVariables struct { - File *asset.File -} - -var _ asset.WritableAsset = (*TerraformVariables)(nil) - -// Name returns the human-friendly name of the asset. -func (t *TerraformVariables) Name() string { - return tfvarsAssetName -} - -// Dependencies returns the dependency of the TerraformVariable -func (t *TerraformVariables) Dependencies() []asset.Asset { - return []asset.Asset{ - &installconfig.InstallConfig{}, - &bootstrap.Bootstrap{}, - &machine.Master{}, - } -} - -// Generate generates the terraform.tfvars file. -func (t *TerraformVariables) Generate(parents asset.Parents) error { - installConfig := &installconfig.InstallConfig{} - bootstrap := &bootstrap.Bootstrap{} - master := &machine.Master{} - parents.Get(installConfig, bootstrap, master) - - bootstrapIgn := string(bootstrap.Files()[0].Data) - - masterIgn := string(master.Files()[0].Data) - - data, err := tfvars.TFVars(installConfig.Config, bootstrapIgn, masterIgn) - if err != nil { - return errors.Wrap(err, "failed to get Tfvars") - } - t.File = &asset.File{ - Filename: TfVarsFileName, - Data: data, - } - - return nil -} - -// Files returns the files generated by the asset. -func (t *TerraformVariables) Files() []*asset.File { - if t.File != nil { - return []*asset.File{t.File} - } - return []*asset.File{} -} - -// Load reads the terraform.tfvars from disk. -func (t *TerraformVariables) Load(f asset.FileFetcher) (found bool, err error) { - file, err := f.FetchByName(TfVarsFileName) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - t.File = file - return true, nil -} diff --git a/pkg/asset/doc.go b/pkg/asset/doc.go deleted file mode 100644 index 8c199afb6e5..00000000000 --- a/pkg/asset/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package asset defines the asset dependencies and implements the graph engine. -package asset diff --git a/pkg/asset/filefetcher.go b/pkg/asset/filefetcher.go deleted file mode 100644 index a8a76c2a58c..00000000000 --- a/pkg/asset/filefetcher.go +++ /dev/null @@ -1,57 +0,0 @@ -package asset - -import ( - "io/ioutil" - "path/filepath" - "sort" -) - -// FileFetcher fetches the asset files from disk. -type FileFetcher interface { - // FetchByName returns the file with the given name. - FetchByName(string) (*File, error) - // FetchByPattern returns the files whose name match the given glob. - FetchByPattern(pattern string) ([]*File, error) -} - -type fileFetcher struct { - directory string -} - -// FetchByName returns the file with the given name. -func (f *fileFetcher) FetchByName(name string) (*File, error) { - data, err := ioutil.ReadFile(filepath.Join(f.directory, name)) - if err != nil { - return nil, err - } - return &File{Filename: name, Data: data}, nil -} - -// FetchByPattern returns the files whose name match the given regexp. -func (f *fileFetcher) FetchByPattern(pattern string) (files []*File, err error) { - matches, err := filepath.Glob(filepath.Join(f.directory, pattern)) - if err != nil { - return nil, err - } - - files = make([]*File, 0, len(matches)) - for _, path := range matches { - data, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - - filename, err := filepath.Rel(f.directory, path) - if err != nil { - return nil, err - } - - files = append(files, &File{ - Filename: filename, - Data: data, - }) - } - - sort.Slice(files, func(i, j int) bool { return files[i].Filename < files[j].Filename }) - return files, nil -} diff --git a/pkg/asset/filefetcher_test.go b/pkg/asset/filefetcher_test.go deleted file mode 100644 index 43cb52b6c26..00000000000 --- a/pkg/asset/filefetcher_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package asset - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestFetchByName(t *testing.T) { - tests := []struct { - name string - files map[string][]byte - input string - expectFile *File - }{ - { - name: "input doesn't match", - files: map[string][]byte{"foo.bar": []byte("some data")}, - input: "bar.foo", - expectFile: nil, - }, - { - name: "with contents", - files: map[string][]byte{"foo.bar": []byte("some data")}, - input: "foo.bar", - expectFile: &File{ - Filename: "foo.bar", - Data: []byte("some data"), - }, - }, - { - name: "match one file", - files: map[string][]byte{"foo.bar": []byte("some data")}, - input: "foo.bar", - expectFile: &File{ - Filename: "foo.bar", - Data: []byte("some data"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tempDir, err := ioutil.TempDir("", "openshift-install-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - for filename, data := range tt.files { - err = ioutil.WriteFile(filepath.Join(tempDir, filename), data, 0666) - if err != nil { - t.Fatal(err) - } - } - - f := &fileFetcher{directory: tempDir} - file, err := f.FetchByName(tt.input) - if err != nil { - if os.IsNotExist(err) && tt.expectFile == nil { - return - } - t.Fatal(err) - } - - assert.Equal(t, tt.expectFile, file) - }) - } -} - -func TestFetchByPattern(t *testing.T) { - tempDir, err := ioutil.TempDir("", "openshift-install-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - - files := map[string][]byte{ - "master-0.ign": []byte("some data 0"), - "master-1.ign": []byte("some data 1"), - "master-2.ign": []byte("some data 2"), - "master-10.ign": []byte("some data 3"), - "master-20.ign": []byte("some data 4"), - "master-00.ign": []byte("some data 5"), - "master-01.ign": []byte("some data 6"), - "amaster-0.ign": []byte("some data 7"), - "master-.ign": []byte("some data 8"), - "master-.igni": []byte("some data 9"), - "master-.ignign": []byte("some data 10"), - "manifests/0": []byte("some data 11"), - "manifests/some": []byte("some data 12"), - "amanifests/a": []byte("some data 13"), - } - - for path, data := range files { - dir := filepath.Dir(path) - if dir != "." { - err := os.MkdirAll(filepath.Join(tempDir, dir), 0777) - if err != nil { - t.Fatal(err) - } - } - err = ioutil.WriteFile(filepath.Join(tempDir, path), data, 0666) - if err != nil { - t.Fatal(err) - } - } - tests := []struct { - input string - expectFiles []*File - }{ - { - input: "master-[0-9]*.ign", - expectFiles: []*File{ - { - Filename: "master-0.ign", - Data: []byte("some data 0"), - }, - { - Filename: "master-00.ign", - Data: []byte("some data 5"), - }, - { - Filename: "master-01.ign", - Data: []byte("some data 6"), - }, - { - Filename: "master-1.ign", - Data: []byte("some data 1"), - }, - { - Filename: "master-10.ign", - Data: []byte("some data 3"), - }, - { - Filename: "master-2.ign", - Data: []byte("some data 2"), - }, - { - Filename: "master-20.ign", - Data: []byte("some data 4"), - }, - }, - }, - { - input: filepath.Join("manifests", "*"), - expectFiles: []*File{ - { - Filename: "manifests/0", - Data: []byte("some data 11"), - }, - { - Filename: "manifests/some", - Data: []byte("some data 12"), - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.input, func(t *testing.T) { - f := &fileFetcher{directory: tempDir} - files, err := f.FetchByPattern(tt.input) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, tt.expectFiles, files) - }) - } -} diff --git a/pkg/asset/ignition/bootstrap/bootstrap.go b/pkg/asset/ignition/bootstrap/bootstrap.go deleted file mode 100644 index e685af4877e..00000000000 --- a/pkg/asset/ignition/bootstrap/bootstrap.go +++ /dev/null @@ -1,356 +0,0 @@ -package bootstrap - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - "text/template" - - "github.com/coreos/ignition/config/util" - igntypes "github.com/coreos/ignition/config/v2_2/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - - "github.com/openshift/installer/data" - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/ignition" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/kubeconfig" - "github.com/openshift/installer/pkg/asset/manifests" - "github.com/openshift/installer/pkg/asset/tls" - "github.com/openshift/installer/pkg/types" -) - -const ( - rootDir = "/opt/tectonic" - defaultReleaseImage = "registry.svc.ci.openshift.org/openshift/origin-release:v4.0" - bootstrapIgnFilename = "bootstrap.ign" -) - -// bootstrapTemplateData is the data to use to replace values in bootstrap -// template files. -type bootstrapTemplateData struct { - BootkubeImage string - EtcdCertSignerImage string - EtcdCluster string - EtcdctlImage string - ReleaseImage string - AdminKubeConfigBase64 string -} - -// Bootstrap is an asset that generates the ignition config for bootstrap nodes. -type Bootstrap struct { - Config *igntypes.Config - File *asset.File -} - -var _ asset.WritableAsset = (*Bootstrap)(nil) - -// Dependencies returns the assets on which the Bootstrap asset depends. -func (a *Bootstrap) Dependencies() []asset.Asset { - return []asset.Asset{ - &installconfig.InstallConfig{}, - &tls.RootCA{}, - &tls.EtcdCA{}, - &tls.KubeCA{}, - &tls.AggregatorCA{}, - &tls.ServiceServingCA{}, - &tls.EtcdClientCertKey{}, - &tls.APIServerCertKey{}, - &tls.APIServerProxyCertKey{}, - &tls.AdminCertKey{}, - &tls.KubeletCertKey{}, - &tls.MCSCertKey{}, - &tls.ServiceAccountKeyPair{}, - &kubeconfig.Admin{}, - &kubeconfig.Kubelet{}, - &manifests.Manifests{}, - &manifests.Tectonic{}, - } -} - -// Generate generates the ignition config for the Bootstrap asset. -func (a *Bootstrap) Generate(dependencies asset.Parents) error { - installConfig := &installconfig.InstallConfig{} - adminKubeConfig := &kubeconfig.Admin{} - dependencies.Get(installConfig, adminKubeConfig) - - templateData, err := a.getTemplateData(installConfig.Config, adminKubeConfig.File.Data) - if err != nil { - return errors.Wrap(err, "failed to get bootstrap templates") - } - - a.Config = &igntypes.Config{ - Ignition: igntypes.Ignition{ - Version: igntypes.MaxVersion.String(), - }, - } - - err = a.addStorageFiles("/", "bootstrap/files", templateData) - if err != nil { - return err - } - err = a.addSystemdUnits("bootstrap/systemd/units", templateData) - if err != nil { - return err - } - a.addParentFiles(dependencies) - - a.Config.Passwd.Users = append( - a.Config.Passwd.Users, - igntypes.PasswdUser{Name: "core", SSHAuthorizedKeys: []igntypes.SSHAuthorizedKey{igntypes.SSHAuthorizedKey(installConfig.Config.Admin.SSHKey)}}, - ) - - data, err := json.Marshal(a.Config) - if err != nil { - return errors.Wrap(err, "failed to Marshal Ignition config") - } - a.File = &asset.File{ - Filename: bootstrapIgnFilename, - Data: data, - } - - return nil -} - -// Name returns the human-friendly name of the asset. -func (a *Bootstrap) Name() string { - return "Bootstrap Ignition Config" -} - -// Files returns the files generated by the asset. -func (a *Bootstrap) Files() []*asset.File { - if a.File != nil { - return []*asset.File{a.File} - } - return []*asset.File{} -} - -// getTemplateData returns the data to use to execute bootstrap templates. -func (a *Bootstrap) getTemplateData(installConfig *types.InstallConfig, adminKubeConfig []byte) (*bootstrapTemplateData, error) { - etcdEndpoints := make([]string, installConfig.MasterCount()) - for i := range etcdEndpoints { - etcdEndpoints[i] = fmt.Sprintf("https://%s-etcd-%d.%s:2379", installConfig.ObjectMeta.Name, i, installConfig.BaseDomain) - } - - releaseImage := defaultReleaseImage - if ri, ok := os.LookupEnv("OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE"); ok && ri != "" { - logrus.Warn("Found override for ReleaseImage. Please be warned, this is not advised") - releaseImage = ri - } - - return &bootstrapTemplateData{ - EtcdCertSignerImage: "quay.io/coreos/kube-etcd-signer-server:678cc8e6841e2121ebfdb6e2db568fce290b67d6", - EtcdctlImage: "quay.io/coreos/etcd:v3.2.14", - BootkubeImage: "quay.io/coreos/bootkube:v0.14.0", - ReleaseImage: releaseImage, - EtcdCluster: strings.Join(etcdEndpoints, ","), - AdminKubeConfigBase64: base64.StdEncoding.EncodeToString(adminKubeConfig), - }, nil -} - -func (a *Bootstrap) addStorageFiles(base string, uri string, templateData *bootstrapTemplateData) (err error) { - file, err := data.Assets.Open(uri) - if err != nil { - return err - } - defer file.Close() - - info, err := file.Stat() - if err != nil { - return err - } - - if info.IsDir() { - children, err := file.Readdir(0) - if err != nil { - return err - } - file.Close() - - for _, childInfo := range children { - name := childInfo.Name() - err = a.addStorageFiles(path.Join(base, name), path.Join(uri, name), templateData) - if err != nil { - return err - } - } - return nil - } - - name := info.Name() - _, data, err := readFile(name, file, templateData) - if err != nil { - return err - } - - filename := path.Base(uri) - - var mode int - appendToFile := false - if path.Base(path.Dir(uri)) == "bin" { - mode = 0555 - } else if filename == "motd" { - mode = 0644 - appendToFile = true - } else { - mode = 0600 - } - ign := ignition.FileFromBytes(strings.TrimSuffix(base, ".template"), mode, data) - if filename == ".bash_history" { - ign.User = &igntypes.NodeUser{Name: "core"} - ign.Group = &igntypes.NodeGroup{Name: "core"} - } - ign.Append = appendToFile - a.Config.Storage.Files = append(a.Config.Storage.Files, ign) - - return nil -} - -func (a *Bootstrap) addSystemdUnits(uri string, templateData *bootstrapTemplateData) (err error) { - enabled := map[string]bool{ - "progress.service": true, - "kubelet.service": true, - } - - directory, err := data.Assets.Open(uri) - if err != nil { - return err - } - defer directory.Close() - - children, err := directory.Readdir(0) - if err != nil { - return err - } - - for _, childInfo := range children { - name := childInfo.Name() - file, err := data.Assets.Open(path.Join(uri, name)) - if err != nil { - return err - } - defer file.Close() - - name, data, err := readFile(name, file, templateData) - if err != nil { - return err - } - - unit := igntypes.Unit{Name: name, Contents: string(data)} - if _, ok := enabled[name]; ok { - unit.Enabled = util.BoolToPtr(true) - } - a.Config.Systemd.Units = append(a.Config.Systemd.Units, unit) - } - - return nil -} - -// Read data from the string reader, and, if the name ends with -// '.template', strip that extension from the name and render the -// template. -func readFile(name string, reader io.Reader, templateData interface{}) (finalName string, data []byte, err error) { - data, err = ioutil.ReadAll(reader) - if err != nil { - return name, []byte{}, err - } - - if filepath.Ext(name) == ".template" { - name = strings.TrimSuffix(name, ".template") - tmpl := template.New(name) - tmpl, err := tmpl.Parse(string(data)) - if err != nil { - return name, data, err - } - stringData := applyTemplateData(tmpl, templateData) - data = []byte(stringData) - } - - return name, data, nil -} - -func (a *Bootstrap) addParentFiles(dependencies asset.Parents) { - adminKubeconfig := &kubeconfig.Admin{} - kubeletKubeconfig := &kubeconfig.Kubelet{} - mfsts := &manifests.Manifests{} - tectonic := &manifests.Tectonic{} - dependencies.Get(adminKubeconfig, kubeletKubeconfig, mfsts, tectonic) - - a.Config.Storage.Files = append( - a.Config.Storage.Files, - ignition.FilesFromAsset(rootDir, 0600, adminKubeconfig)..., - ) - a.Config.Storage.Files = append( - a.Config.Storage.Files, - ignition.FileFromBytes("/etc/kubernetes/kubeconfig", 0600, kubeletKubeconfig.Files()[0].Data), - ignition.FileFromBytes("/var/lib/kubelet/kubeconfig", 0600, kubeletKubeconfig.Files()[0].Data), - ) - a.Config.Storage.Files = append( - a.Config.Storage.Files, - ignition.FilesFromAsset(rootDir, 0644, mfsts)..., - ) - a.Config.Storage.Files = append( - a.Config.Storage.Files, - ignition.FilesFromAsset(rootDir, 0644, tectonic)..., - ) - - for _, asset := range []asset.WritableAsset{ - &tls.RootCA{}, - &tls.KubeCA{}, - &tls.AggregatorCA{}, - &tls.ServiceServingCA{}, - &tls.EtcdCA{}, - &tls.EtcdClientCertKey{}, - &tls.APIServerCertKey{}, - &tls.APIServerProxyCertKey{}, - &tls.AdminCertKey{}, - &tls.KubeletCertKey{}, - &tls.MCSCertKey{}, - &tls.ServiceAccountKeyPair{}, - } { - dependencies.Get(asset) - a.Config.Storage.Files = append(a.Config.Storage.Files, ignition.FilesFromAsset(rootDir, 0600, asset)...) - } - - etcdClientCertKey := &tls.EtcdClientCertKey{} - dependencies.Get(etcdClientCertKey) - a.Config.Storage.Files = append( - a.Config.Storage.Files, - ignition.FileFromBytes("/etc/ssl/etcd/ca.crt", 0600, etcdClientCertKey.Cert()), - ) -} - -func applyTemplateData(template *template.Template, templateData interface{}) string { - buf := &bytes.Buffer{} - if err := template.Execute(buf, templateData); err != nil { - panic(err) - } - return buf.String() -} - -// Load returns the bootstrap ignition from disk. -func (a *Bootstrap) Load(f asset.FileFetcher) (found bool, err error) { - file, err := f.FetchByName(bootstrapIgnFilename) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - config := &igntypes.Config{} - if err := json.Unmarshal(file.Data, config); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal") - } - - a.File, a.Config = file, config - return true, nil -} diff --git a/pkg/asset/ignition/machine/master.go b/pkg/asset/ignition/machine/master.go deleted file mode 100644 index a1b1d2b6e30..00000000000 --- a/pkg/asset/ignition/machine/master.go +++ /dev/null @@ -1,85 +0,0 @@ -package machine - -import ( - "encoding/json" - "os" - - igntypes "github.com/coreos/ignition/config/v2_2/types" - "github.com/pkg/errors" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/tls" -) - -const ( - masterIgnFilename = "master.ign" -) - -// Master is an asset that generates the ignition config for master nodes. -type Master struct { - Config *igntypes.Config - File *asset.File -} - -var _ asset.WritableAsset = (*Master)(nil) - -// Dependencies returns the assets on which the Master asset depends. -func (a *Master) Dependencies() []asset.Asset { - return []asset.Asset{ - &installconfig.InstallConfig{}, - &tls.RootCA{}, - } -} - -// Generate generates the ignition config for the Master asset. -func (a *Master) Generate(dependencies asset.Parents) error { - installConfig := &installconfig.InstallConfig{} - rootCA := &tls.RootCA{} - dependencies.Get(installConfig, rootCA) - - a.Config = pointerIgnitionConfig(installConfig.Config, rootCA.Cert(), "master") - - data, err := json.Marshal(a.Config) - if err != nil { - return errors.Wrap(err, "failed to get InstallConfig from parents") - } - a.File = &asset.File{ - Filename: masterIgnFilename, - Data: data, - } - - return nil -} - -// Name returns the human-friendly name of the asset. -func (a *Master) Name() string { - return "Master Ignition Config" -} - -// Files returns the files generated by the asset. -func (a *Master) Files() []*asset.File { - if a.File != nil { - return []*asset.File{a.File} - } - return []*asset.File{} -} - -// Load returns the master ignitions from disk. -func (a *Master) Load(f asset.FileFetcher) (found bool, err error) { - file, err := f.FetchByName(masterIgnFilename) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - config := &igntypes.Config{} - if err := json.Unmarshal(file.Data, config); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal") - } - - a.File, a.Config = file, config - return true, nil -} diff --git a/pkg/asset/ignition/machine/master_test.go b/pkg/asset/ignition/machine/master_test.go deleted file mode 100644 index f0ce4eed3e9..00000000000 --- a/pkg/asset/ignition/machine/master_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package machine - -import ( - "net" - "testing" - - "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/tls" - "github.com/openshift/installer/pkg/ipnet" - "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/aws" -) - -// TestMasterGenerate tests generating the master asset. -func TestMasterGenerate(t *testing.T) { - installConfig := &installconfig.InstallConfig{ - Config: &types.InstallConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - }, - BaseDomain: "test-domain", - Networking: types.Networking{ - ServiceCIDR: ipnet.IPNet{ - IPNet: func(s string) net.IPNet { - _, cidr, _ := net.ParseCIDR(s) - return *cidr - }("10.0.1.0/24"), - }, - }, - Platform: types.Platform{ - AWS: &aws.Platform{ - Region: "us-east", - }, - }, - Machines: []types.MachinePool{ - { - Name: "master", - Replicas: func(x int64) *int64 { return &x }(3), - }, - }, - }, - } - - rootCA := &tls.RootCA{} - err := rootCA.Generate(nil) - assert.NoError(t, err, "unexpected error generating root CA") - - parents := asset.Parents{} - parents.Add(installConfig, rootCA) - - master := &Master{} - err = master.Generate(parents) - assert.NoError(t, err, "unexpected error generating master asset") - expectedIgnitionConfigNames := []string{ - "master.ign", - } - actualFiles := master.Files() - actualIgnitionConfigNames := make([]string, len(actualFiles)) - for i, f := range actualFiles { - actualIgnitionConfigNames[i] = f.Filename - } - assert.Equal(t, expectedIgnitionConfigNames, actualIgnitionConfigNames, "unexpected names for master ignition configs") -} diff --git a/pkg/asset/ignition/machine/node.go b/pkg/asset/ignition/machine/node.go deleted file mode 100644 index 0bebbcd5480..00000000000 --- a/pkg/asset/ignition/machine/node.go +++ /dev/null @@ -1,46 +0,0 @@ -package machine - -import ( - "fmt" - "net/url" - - ignition "github.com/coreos/ignition/config/v2_2/types" - "github.com/vincent-petithory/dataurl" - - "github.com/openshift/installer/pkg/types" -) - -// pointerIgnitionConfig generates a config which references the remote config -// served by the machine config server. -func pointerIgnitionConfig(installConfig *types.InstallConfig, rootCA []byte, role string) *ignition.Config { - return &ignition.Config{ - Ignition: ignition.Ignition{ - Version: ignition.MaxVersion.String(), - Config: ignition.IgnitionConfig{ - Append: []ignition.ConfigReference{{ - Source: func() *url.URL { - return &url.URL{ - Scheme: "https", - Host: fmt.Sprintf("%s-api.%s:49500", installConfig.ObjectMeta.Name, installConfig.BaseDomain), - Path: fmt.Sprintf("/config/%s", role), - } - }().String(), - }}, - }, - Security: ignition.Security{ - TLS: ignition.TLS{ - CertificateAuthorities: []ignition.CaReference{{ - Source: dataurl.EncodeBytes(rootCA), - }}, - }, - }, - }, - // XXX: Remove this once MCO supports injecting SSH keys. - Passwd: ignition.Passwd{ - Users: []ignition.PasswdUser{{ - Name: "core", - SSHAuthorizedKeys: []ignition.SSHAuthorizedKey{ignition.SSHAuthorizedKey(installConfig.Admin.SSHKey)}, - }}, - }, - } -} diff --git a/pkg/asset/ignition/machine/testutils_test.go b/pkg/asset/ignition/machine/testutils_test.go deleted file mode 100644 index e3cc5cb5a3c..00000000000 --- a/pkg/asset/ignition/machine/testutils_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package machine - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/vincent-petithory/dataurl" -) - -type fileAssertion struct { - path string - data string - additional func(*testing.T, map[string]interface{}) bool -} - -// assertFilesInIgnitionConfig asserts that the specified ignition config -// contains exactly the files enumerated in fileAssertions. -func assertFilesInIgnitionConfig( - t *testing.T, - ignitionConfig []byte, - fileAssertions ...fileAssertion, -) bool { - var ic map[string]interface{} - if err := json.Unmarshal(ignitionConfig, &ic); err != nil { - return assert.NoError(t, err, "unexpected error unmarshaling ignition config") - } - storage, ok := ic["storage"] - if !assert.True(t, ok, "No storage in ignition config") { - return false - } - files, ok := storage.(map[string]interface{})["files"] - if !assert.True(t, ok, "No files in ignition config") { - return false - } - expectedFilePaths := make([]string, len(fileAssertions)) - for i, a := range fileAssertions { - expectedFilePaths[i] = a.path - } - filesList := files.([]interface{}) - actualFilePaths := make([]string, len(filesList)) - for i, f := range filesList { - path, ok := f.(map[string]interface{})["path"] - if !assert.True(t, ok, "file has no path: %+v", f) { - return false - } - actualFilePaths[i] = path.(string) - } - if !assert.Equal(t, expectedFilePaths, actualFilePaths, "Unexpected file paths") { - return false - } - for _, f := range filesList { - file := f.(map[string]interface{}) - path := file["path"] - var fa fileAssertion - for _, a := range fileAssertions { - if a.path != path { - continue - } - fa = a - } - contents, ok := file["contents"] - if !assert.True(t, ok, "file %q has no contents", path) { - return false - } - source, ok := contents.(map[string]interface{})["source"] - if !assert.True(t, ok, "file %q has no source", path) { - return false - } - url, err := dataurl.DecodeString(source.(string)) - if !assert.NoError(t, err, "unexpected error decoding dataurl in file %q", path) { - return false - } - if !assert.Equal(t, fa.data, string(url.Data), "unexpected data in file %q", path) { - return false - } - if fa.additional != nil { - if !fa.additional(t, file) { - return false - } - } - } - return true -} - -type systemdUnitAssertion struct { - name string - dropinName string - contents string - additional func(*testing.T, map[string]interface{}) bool -} - -// assertSystemdUnitsInIgnitionConfig asserts that the specified ignition config -// contains exactly the systemd units enumerated in unitAssertions. -func assertSystemdUnitsInIgnitionConfig( - t *testing.T, - ignitionConfig []byte, - unitAssertions ...systemdUnitAssertion, -) bool { - var ic map[string]interface{} - if err := json.Unmarshal(ignitionConfig, &ic); err != nil { - return assert.NoError(t, err, "unexpected error unmarshaling ignition config") - } - systemd, ok := ic["systemd"] - if !assert.True(t, ok, "No systemd in ignition config") { - return false - } - units, ok := systemd.(map[string]interface{})["units"] - if !assert.True(t, ok, "No units in ignition config") { - return false - } - expectedUnitNames := make([]string, len(unitAssertions)) - for i, a := range unitAssertions { - expectedUnitNames[i] = a.name - } - unitsList := units.([]interface{}) - actualUnitNames := make([]string, len(unitsList)) - for i, u := range unitsList { - name, ok := u.(map[string]interface{})["name"] - if !assert.True(t, ok, "unit has no name: %+v", u) { - return false - } - actualUnitNames[i] = name.(string) - } - if !assert.Equal(t, expectedUnitNames, actualUnitNames, "Unexpected unit names") { - return false - } - for _, u := range unitsList { - unit := u.(map[string]interface{}) - name := unit["name"] - var ua systemdUnitAssertion - for _, a := range unitAssertions { - if a.name != name { - continue - } - ua = a - } - contentsParent := unit - if ua.dropinName != "" { - dropins, ok := unit["dropins"] - if !assert.True(t, ok, "no dropins in systemd unit %q", name) { - return false - } - dropinsList := dropins.([]interface{}) - if !assert.Equal(t, 1, len(dropinsList), "unexpected number of dropins in systemd unit %q", name) { - return false - } - dropin := dropinsList[0].(map[string]interface{}) - dropinName, ok := dropin["name"] - if !assert.True(t, ok, "no name in dropin in systemd unit %q", name) { - return false - } - if !assert.Equal(t, ua.dropinName, dropinName.(string), "unexpected dropin name in systemd unit %q", name) { - return false - } - contentsParent = dropin - } - contents, contentsOK := contentsParent["contents"] - if ua.contents != "" { - if !assert.True(t, contentsOK, "no contents in systemd unit %q", name) { - return false - } - if !assert.Equal(t, ua.contents, contents.(string), "unexpected contents in systemd unit %q", name) { - return false - } - } else { - if !assert.False(t, contentsOK, "unexpected contents in systemd unit %q", name) { - return false - } - } - if ua.additional != nil { - if !ua.additional(t, unit) { - return false - } - } - } - return true -} - -type userAssertion struct { - name string - sshKey string - additional func(*testing.T, map[string]interface{}) bool -} - -// assertUsersInIgnitionConfig asserts that the specified ignition config -// contains exactly the users enumerated in userAssertions. -func assertUsersInIgnitionConfig( - t *testing.T, - ignitionConfig []byte, - userAssertions ...userAssertion, -) bool { - var ic map[string]interface{} - if err := json.Unmarshal(ignitionConfig, &ic); err != nil { - return assert.NoError(t, err, "unexpected error unmarshaling ignition config") - } - passwd, ok := ic["passwd"] - if !assert.True(t, ok, "No passwd in ignition config") { - return false - } - users, ok := passwd.(map[string]interface{})["users"] - if !assert.True(t, ok, "No users in ignition config") { - return false - } - expectedUserNames := make([]string, len(userAssertions)) - for i, a := range userAssertions { - expectedUserNames[i] = a.name - } - usersList := users.([]interface{}) - actualUserNames := make([]string, len(usersList)) - for i, u := range usersList { - name, ok := u.(map[string]interface{})["name"] - if !assert.True(t, ok, "user has no name: %+v", u) { - return false - } - actualUserNames[i] = name.(string) - } - if !assert.Equal(t, expectedUserNames, actualUserNames, "Unexpected user names") { - return false - } - for _, u := range usersList { - user := u.(map[string]interface{}) - name := user["name"] - var ua userAssertion - for _, a := range userAssertions { - if a.name != name { - continue - } - ua = a - } - sshAuthorizedKeys, ok := user["sshAuthorizedKeys"] - if !assert.True(t, ok, "no sshAuthorizedKeys in user %q", name) { - return false - } - sshAuthorizedKeysList := sshAuthorizedKeys.([]interface{}) - if !assert.Equal(t, 1, len(sshAuthorizedKeysList), "unexpected number of sshAuthorizedKeys in user %q", name) { - return false - } - sshAuthorizedKey := sshAuthorizedKeysList[0].(string) - if !assert.Equal(t, ua.sshKey, sshAuthorizedKey, "unexpected ssh key in user %q", name) { - return false - } - if ua.additional != nil { - if !ua.additional(t, user) { - return false - } - } - } - return true -} diff --git a/pkg/asset/ignition/machine/worker.go b/pkg/asset/ignition/machine/worker.go deleted file mode 100644 index ad0a7fe7e14..00000000000 --- a/pkg/asset/ignition/machine/worker.go +++ /dev/null @@ -1,85 +0,0 @@ -package machine - -import ( - "encoding/json" - "os" - - igntypes "github.com/coreos/ignition/config/v2_2/types" - "github.com/pkg/errors" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/tls" -) - -const ( - workerIgnFilename = "worker.ign" -) - -// Worker is an asset that generates the ignition config for worker nodes. -type Worker struct { - Config *igntypes.Config - File *asset.File -} - -var _ asset.WritableAsset = (*Worker)(nil) - -// Dependencies returns the assets on which the Worker asset depends. -func (a *Worker) Dependencies() []asset.Asset { - return []asset.Asset{ - &installconfig.InstallConfig{}, - &tls.RootCA{}, - } -} - -// Generate generates the ignition config for the Worker asset. -func (a *Worker) Generate(dependencies asset.Parents) error { - installConfig := &installconfig.InstallConfig{} - rootCA := &tls.RootCA{} - dependencies.Get(installConfig, rootCA) - - a.Config = pointerIgnitionConfig(installConfig.Config, rootCA.Cert(), "worker") - - data, err := json.Marshal(a.Config) - if err != nil { - return errors.Wrap(err, "failed to get InstallConfig from parents") - } - a.File = &asset.File{ - Filename: workerIgnFilename, - Data: data, - } - - return nil -} - -// Name returns the human-friendly name of the asset. -func (a *Worker) Name() string { - return "Worker Ignition Config" -} - -// Files returns the files generated by the asset. -func (a *Worker) Files() []*asset.File { - if a.File != nil { - return []*asset.File{a.File} - } - return []*asset.File{} -} - -// Load returns the worker ignitions from disk. -func (a *Worker) Load(f asset.FileFetcher) (found bool, err error) { - file, err := f.FetchByName(workerIgnFilename) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - config := &igntypes.Config{} - if err := json.Unmarshal(file.Data, config); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal") - } - - a.File, a.Config = file, config - return true, nil -} diff --git a/pkg/asset/ignition/machine/worker_test.go b/pkg/asset/ignition/machine/worker_test.go deleted file mode 100644 index 3858c035819..00000000000 --- a/pkg/asset/ignition/machine/worker_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package machine - -import ( - "net" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/tls" - "github.com/openshift/installer/pkg/ipnet" - "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/aws" -) - -// TestWorkerGenerate tests generating the worker asset. -func TestWorkerGenerate(t *testing.T) { - installConfig := &installconfig.InstallConfig{ - Config: &types.InstallConfig{ - Networking: types.Networking{ - ServiceCIDR: ipnet.IPNet{ - IPNet: func(s string) net.IPNet { - _, cidr, _ := net.ParseCIDR(s) - return *cidr - }("10.0.1.0/24"), - }, - }, - Platform: types.Platform{ - AWS: &aws.Platform{ - Region: "us-east", - }, - }, - }, - } - - rootCA := &tls.RootCA{} - err := rootCA.Generate(nil) - assert.NoError(t, err, "unexpected error generating root CA") - - parents := asset.Parents{} - parents.Add(installConfig, rootCA) - - worker := &Worker{} - err = worker.Generate(parents) - assert.NoError(t, err, "unexpected error generating worker asset") - - actualFiles := worker.Files() - assert.Equal(t, 1, len(actualFiles), "unexpected number of files in worker state") - assert.Equal(t, "worker.ign", actualFiles[0].Filename, "unexpected name for worker ignition config") -} diff --git a/pkg/asset/ignition/node.go b/pkg/asset/ignition/node.go deleted file mode 100644 index 09b70eef2de..00000000000 --- a/pkg/asset/ignition/node.go +++ /dev/null @@ -1,41 +0,0 @@ -package ignition - -import ( - "path/filepath" - - ignition "github.com/coreos/ignition/config/v2_2/types" - "github.com/vincent-petithory/dataurl" - - "github.com/openshift/installer/pkg/asset" -) - -// FilesFromAsset creates ignition files for each of the files in the specified -// asset. -func FilesFromAsset(pathPrefix string, mode int, asset asset.WritableAsset) []ignition.File { - var files []ignition.File - for _, f := range asset.Files() { - files = append(files, FileFromBytes(filepath.Join(pathPrefix, f.Filename), mode, f.Data)) - } - return files -} - -// FileFromString creates an ignition-config file with the given contents. -func FileFromString(path string, mode int, contents string) ignition.File { - return FileFromBytes(path, mode, []byte(contents)) -} - -// FileFromBytes creates an ignition-config file with the given contents. -func FileFromBytes(path string, mode int, contents []byte) ignition.File { - return ignition.File{ - Node: ignition.Node{ - Filesystem: "root", - Path: path, - }, - FileEmbedded1: ignition.FileEmbedded1{ - Mode: &mode, - Contents: ignition.FileContents{ - Source: dataurl.EncodeBytes(contents), - }, - }, - } -} diff --git a/pkg/asset/installconfig/aws/aws.go b/pkg/asset/installconfig/aws/aws.go deleted file mode 100644 index 18d16ab4f83..00000000000 --- a/pkg/asset/installconfig/aws/aws.go +++ /dev/null @@ -1,95 +0,0 @@ -// Package aws collects AWS-specific configuration. -package aws - -import ( - "encoding/json" - "fmt" - "os" - "sort" - "strings" - - "github.com/pkg/errors" - survey "gopkg.in/AlecAivazis/survey.v1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/types/aws" -) - -const ( - defaultVPCCIDR = "10.0.0.0/16" -) - -var ( - validAWSRegions = map[string]string{ - "ap-northeast-1": "Tokyo", - "ap-northeast-2": "Seoul", - "ap-northeast-3": "Osaka-Local", - "ap-south-1": "Mumbai", - "ap-southeast-1": "Singapore", - "ap-southeast-2": "Sydney", - "ca-central-1": "Central", - "cn-north-1": "Beijing", - "cn-northwest-1": "Ningxia", - "eu-central-1": "Frankfurt", - "eu-west-1": "Ireland", - "eu-west-2": "London", - "eu-west-3": "Paris", - "sa-east-1": "São Paulo", - "us-east-1": "N. Virginia", - "us-east-2": "Ohio", - "us-west-1": "N. California", - "us-west-2": "Oregon", - } -) - -// Platform collects AWS-specific configuration. -func Platform() (*aws.Platform, error) { - longRegions := make([]string, 0, len(validAWSRegions)) - shortRegions := make([]string, 0, len(validAWSRegions)) - for id, location := range validAWSRegions { - longRegions = append(longRegions, fmt.Sprintf("%s (%s)", id, location)) - shortRegions = append(shortRegions, id) - } - regionTransform := survey.TransformString(func(s string) string { - return strings.SplitN(s, " ", 2)[0] - }) - sort.Strings(longRegions) - sort.Strings(shortRegions) - region, err := asset.GenerateUserProvidedAsset( - "AWS Region", - &survey.Question{ - Prompt: &survey.Select{ - Message: "Region", - Help: "The AWS region to be used for installation.", - Default: "us-east-1 (N. Virginia)", - Options: longRegions, - }, - Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - choice := regionTransform(ans).(string) - i := sort.SearchStrings(shortRegions, choice) - if i == len(shortRegions) || shortRegions[i] != choice { - return errors.Errorf("invalid region %q", choice) - } - return nil - }), - Transform: regionTransform, - }, - "OPENSHIFT_INSTALL_AWS_REGION", - ) - if err != nil { - return nil, err - } - - userTags := map[string]string{} - if value, ok := os.LookupEnv("_CI_ONLY_STAY_AWAY_OPENSHIFT_INSTALL_AWS_USER_TAGS"); ok { - if err := json.Unmarshal([]byte(value), &userTags); err != nil { - return nil, errors.Wrapf(err, "_CI_ONLY_STAY_AWAY_OPENSHIFT_INSTALL_AWS_USER_TAGS contains invalid JSON: %s", value) - } - } - - return &aws.Platform{ - VPCCIDRBlock: defaultVPCCIDR, - Region: region, - UserTags: userTags, - }, nil -} diff --git a/pkg/asset/installconfig/basedomain.go b/pkg/asset/installconfig/basedomain.go deleted file mode 100644 index 353748fee41..00000000000 --- a/pkg/asset/installconfig/basedomain.go +++ /dev/null @@ -1,43 +0,0 @@ -package installconfig - -import ( - survey "gopkg.in/AlecAivazis/survey.v1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/validate" -) - -type baseDomain struct { - BaseDomain string -} - -var _ asset.Asset = (*baseDomain)(nil) - -// Dependencies returns no dependencies. -func (a *baseDomain) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Generate queries for the base domain from the user. -func (a *baseDomain) Generate(asset.Parents) error { - bd, err := asset.GenerateUserProvidedAsset( - a.Name(), - &survey.Question{ - Prompt: &survey.Input{ - Message: "Base Domain", - Help: "The base domain of the cluster. All DNS records will be sub-domains of this base and will also include the cluster name.\n\nFor AWS, this must be a previously-existing public Route 53 zone. You can check for any already in your account with:\n\n $ aws route53 list-hosted-zones --query 'HostedZones[? !(Config.PrivateZone)].Name' --output text", - }, - Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - return validate.DomainName(ans.(string)) - }), - }, - "OPENSHIFT_INSTALL_BASE_DOMAIN", - ) - a.BaseDomain = bd - return err -} - -// Name returns the human-friendly name of the asset. -func (a *baseDomain) Name() string { - return "Base Domain" -} diff --git a/pkg/asset/installconfig/clusterid.go b/pkg/asset/installconfig/clusterid.go deleted file mode 100644 index f075d1f53d0..00000000000 --- a/pkg/asset/installconfig/clusterid.go +++ /dev/null @@ -1,29 +0,0 @@ -package installconfig - -import ( - "github.com/pborman/uuid" - - "github.com/openshift/installer/pkg/asset" -) - -type clusterID struct { - ClusterID string -} - -var _ asset.Asset = (*clusterID)(nil) - -// Dependencies returns no dependencies. -func (a *clusterID) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Generate generates a new UUID -func (a *clusterID) Generate(asset.Parents) error { - a.ClusterID = uuid.New() - return nil -} - -// Name returns the human-friendly name of the asset. -func (a *clusterID) Name() string { - return "Cluster ID" -} diff --git a/pkg/asset/installconfig/clustername.go b/pkg/asset/installconfig/clustername.go deleted file mode 100644 index ab2788dba68..00000000000 --- a/pkg/asset/installconfig/clustername.go +++ /dev/null @@ -1,43 +0,0 @@ -package installconfig - -import ( - survey "gopkg.in/AlecAivazis/survey.v1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/validate" -) - -type clusterName struct { - ClusterName string -} - -var _ asset.Asset = (*clusterName)(nil) - -// Dependencies returns no dependencies. -func (a *clusterName) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Generate queries for the cluster name from the user. -func (a *clusterName) Generate(asset.Parents) error { - n, err := asset.GenerateUserProvidedAsset( - a.Name(), - &survey.Question{ - Prompt: &survey.Input{ - Message: "Cluster Name", - Help: "The name of the cluster. This will be used when generating sub-domains.\n\nFor libvirt, choose a name that is unique enough to be used as a prefix during cluster deletion. For example, if you use 'demo' as your cluster name, `openshift-install destroy cluster` may destroy all domains, networks, pools, and volumes that begin with 'demo'.", - }, - Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - return validate.DomainName(ans.(string)) - }), - }, - "OPENSHIFT_INSTALL_CLUSTER_NAME", - ) - a.ClusterName = n - return err -} - -// Name returns the human-friendly name of the asset. -func (a *clusterName) Name() string { - return "Cluster Name" -} diff --git a/pkg/asset/installconfig/doc.go b/pkg/asset/installconfig/doc.go deleted file mode 100644 index fc5482f6203..00000000000 --- a/pkg/asset/installconfig/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package installconfig generates the install config assets based on its dependencies. -// The type itself is defined in ../pkg/types. -package installconfig diff --git a/pkg/asset/installconfig/emailaddress.go b/pkg/asset/installconfig/emailaddress.go deleted file mode 100644 index ff714b94c07..00000000000 --- a/pkg/asset/installconfig/emailaddress.go +++ /dev/null @@ -1,43 +0,0 @@ -package installconfig - -import ( - survey "gopkg.in/AlecAivazis/survey.v1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/validate" -) - -type emailAddress struct { - EmailAddress string -} - -var _ asset.Asset = (*emailAddress)(nil) - -// Dependencies returns no dependencies. -func (a *emailAddress) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Generate queries for the email address from the user. -func (a *emailAddress) Generate(asset.Parents) error { - email, err := asset.GenerateUserProvidedAsset( - a.Name(), - &survey.Question{ - Prompt: &survey.Input{ - Message: "Email Address", - Help: "The email address of the cluster administrator. This will be used to log in to the console.", - }, - Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - return validate.Email(ans.(string)) - }), - }, - "OPENSHIFT_INSTALL_EMAIL_ADDRESS", - ) - a.EmailAddress = email - return err -} - -// Name returns the human-friendly name of the asset. -func (a *emailAddress) Name() string { - return "Email Address" -} diff --git a/pkg/asset/installconfig/installconfig.go b/pkg/asset/installconfig/installconfig.go deleted file mode 100644 index de7d437c0da..00000000000 --- a/pkg/asset/installconfig/installconfig.go +++ /dev/null @@ -1,171 +0,0 @@ -package installconfig - -import ( - "net" - "os" - - "github.com/ghodss/yaml" - "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - netopv1 "github.com/openshift/cluster-network-operator/pkg/apis/networkoperator/v1" - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/ipnet" - "github.com/openshift/installer/pkg/types" -) - -const ( - installConfigFilename = "install-config.yml" -) - -var ( - defaultServiceCIDR = parseCIDR("172.30.0.0/16") - defaultClusterCIDR = "10.128.0.0/14" - defaultHostSubnetLength = 9 // equivalent to a /23 per node -) - -// InstallConfig generates the install-config.yml file. -type InstallConfig struct { - Config *types.InstallConfig `json:"config"` - File *asset.File `json:"file"` -} - -var _ asset.WritableAsset = (*InstallConfig)(nil) - -// Dependencies returns all of the dependencies directly needed by an -// InstallConfig asset. -func (a *InstallConfig) Dependencies() []asset.Asset { - return []asset.Asset{ - &clusterID{}, - &emailAddress{}, - &password{}, - &sshPublicKey{}, - &baseDomain{}, - &clusterName{}, - &pullSecret{}, - &platform{}, - } -} - -// Generate generates the install-config.yml file. -func (a *InstallConfig) Generate(parents asset.Parents) error { - clusterID := &clusterID{} - emailAddress := &emailAddress{} - password := &password{} - sshPublicKey := &sshPublicKey{} - baseDomain := &baseDomain{} - clusterName := &clusterName{} - pullSecret := &pullSecret{} - platform := &platform{} - parents.Get( - clusterID, - emailAddress, - password, - sshPublicKey, - baseDomain, - clusterName, - pullSecret, - platform, - ) - - a.Config = &types.InstallConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterName.ClusterName, - }, - ClusterID: clusterID.ClusterID, - Admin: types.Admin{ - Email: emailAddress.EmailAddress, - Password: password.Password, - SSHKey: sshPublicKey.Key, - }, - BaseDomain: baseDomain.BaseDomain, - Networking: types.Networking{ - Type: "OpenshiftSDN", - - ServiceCIDR: ipnet.IPNet{ - IPNet: defaultServiceCIDR, - }, - ClusterNetworks: []netopv1.ClusterNetwork{ - { - CIDR: defaultClusterCIDR, - HostSubnetLength: uint32(defaultHostSubnetLength), - }, - }, - }, - PullSecret: pullSecret.PullSecret, - } - - numberOfMasters := int64(3) - numberOfWorkers := int64(3) - switch { - case platform.AWS != nil: - a.Config.AWS = platform.AWS - case platform.OpenStack != nil: - a.Config.OpenStack = platform.OpenStack - case platform.Libvirt != nil: - a.Config.Libvirt = platform.Libvirt - numberOfMasters = 1 - numberOfWorkers = 1 - default: - panic("unknown platform type") - } - - a.Config.Machines = []types.MachinePool{ - { - Name: "master", - Replicas: func(x int64) *int64 { return &x }(numberOfMasters), - }, - { - Name: "worker", - Replicas: func(x int64) *int64 { return &x }(numberOfWorkers), - }, - } - - data, err := yaml.Marshal(a.Config) - if err != nil { - return errors.Wrap(err, "failed to Marshal InstallConfig") - } - a.File = &asset.File{ - Filename: installConfigFilename, - Data: data, - } - - return nil -} - -// Name returns the human-friendly name of the asset. -func (a *InstallConfig) Name() string { - return "Install Config" -} - -// Files returns the files generated by the asset. -func (a *InstallConfig) Files() []*asset.File { - if a.File != nil { - return []*asset.File{a.File} - } - return []*asset.File{} -} - -func parseCIDR(s string) net.IPNet { - _, cidr, _ := net.ParseCIDR(s) - return *cidr -} - -// Load returns the installconfig from disk. -func (a *InstallConfig) Load(f asset.FileFetcher) (found bool, err error) { - file, err := f.FetchByName(installConfigFilename) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - config := &types.InstallConfig{} - if err := yaml.Unmarshal(file.Data, config); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal") - } - - a.File, a.Config = file, config - return true, nil -} diff --git a/pkg/asset/installconfig/libvirt/libvirt.go b/pkg/asset/installconfig/libvirt/libvirt.go deleted file mode 100644 index 80cc4203c85..00000000000 --- a/pkg/asset/installconfig/libvirt/libvirt.go +++ /dev/null @@ -1,82 +0,0 @@ -// Package libvirt collects libvirt-specific configuration. -package libvirt - -import ( - "context" - "fmt" - "net/url" - "os" - - "github.com/pkg/errors" - survey "gopkg.in/AlecAivazis/survey.v1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/rhcos" - "github.com/openshift/installer/pkg/types/libvirt" -) - -const ( - defaultNetworkIfName = "tt0" - defaultNetworkIPRange = "192.168.126.0/24" -) - -// Platform collects libvirt-specific configuration. -func Platform() (*libvirt.Platform, error) { - uri, err := asset.GenerateUserProvidedAsset( - "Libvirt Connection URI", - &survey.Question{ - Prompt: &survey.Input{ - Message: "Libvirt Connection URI", - Help: "The libvirt connection URI to be used. This must be accessible from the running cluster.", - Default: "qemu+tcp://192.168.122.1/system", - }, - Validate: survey.ComposeValidators(survey.Required, uriValidator), - }, - "OPENSHIFT_INSTALL_LIBVIRT_URI", - ) - if err != nil { - return nil, err - } - - qcowImage, ok := os.LookupEnv("OPENSHIFT_INSTALL_LIBVIRT_IMAGE") - if ok { - err = validURI(qcowImage) - if err != nil { - return nil, errors.Wrap(err, "resolve OPENSHIFT_INSTALL_LIBVIRT_IMAGE") - } - } else { - qcowImage, err = rhcos.QEMU(context.TODO(), rhcos.DefaultChannel) - if err != nil { - return nil, errors.Wrap(err, "failed to fetch QEMU image URL") - } - } - - return &libvirt.Platform{ - Network: libvirt.Network{ - IfName: defaultNetworkIfName, - IPRange: defaultNetworkIPRange, - }, - DefaultMachinePlatform: &libvirt.MachinePool{ - Image: qcowImage, - }, - URI: uri, - }, nil -} - -// uriValidator validates if the answer provided in prompt is a valid -// url and has non-empty scheme. -func uriValidator(ans interface{}) error { - return validURI(ans.(string)) -} - -// validURI validates if the URI is a valid URI with a non-empty scheme. -func validURI(uri string) error { - parsed, err := url.Parse(uri) - if err != nil { - return err - } - if parsed.Scheme == "" { - return fmt.Errorf("invalid URI %q (no scheme)", uri) - } - return nil -} diff --git a/pkg/asset/installconfig/openstack/openstack.go b/pkg/asset/installconfig/openstack/openstack.go deleted file mode 100644 index 6d3ea877518..00000000000 --- a/pkg/asset/installconfig/openstack/openstack.go +++ /dev/null @@ -1,105 +0,0 @@ -// Package openstack collects OpenStack-specific configuration. -package openstack - -import ( - "github.com/pkg/errors" - survey "gopkg.in/AlecAivazis/survey.v1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/types/openstack" -) - -const ( - defaultVPCCIDR = "10.0.0.0/16" -) - -// Platform collects OpenStack-specific configuration. -func Platform() (*openstack.Platform, error) { - region, err := asset.GenerateUserProvidedAsset( - "OpenStack Region", - &survey.Question{ - Prompt: &survey.Input{ - Message: "Region", - Help: "The OpenStack region to be used for installation.", - Default: "regionOne", - }, - Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - //value := ans.(string) - //FIXME(shardy) add some validation here - return nil - }), - }, - "OPENSHIFT_INSTALL_OPENSTACK_REGION", - ) - if err != nil { - return nil, err - } - - image, err := asset.GenerateUserProvidedAsset( - "OpenStack Image", - &survey.Question{ - Prompt: &survey.Input{ - Message: "Image", - Help: "The OpenStack image to be used for installation.", - Default: "rhcos", - }, - Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - //value := ans.(string) - //FIXME(shardy) add some validation here - return nil - }), - }, - "OPENSHIFT_INSTALL_OPENSTACK_IMAGE", - ) - if err != nil { - return nil, err - } - - cloud, err := asset.GenerateUserProvidedAsset( - "OpenStack Cloud", - &survey.Question{ - //TODO(russellb) - We could open clouds.yaml here and read the list of defined clouds - //and then use survey.Select to let the user choose one. - Prompt: &survey.Input{ - Message: "Cloud", - Help: "The OpenStack cloud name from clouds.yaml.", - }, - Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - //value := ans.(string) - //FIXME(russellb) add some validation here - return nil - }), - }, - "OPENSHIFT_INSTALL_OPENSTACK_CLOUD", - ) - if err != nil { - return nil, err - } - - extNet, err := asset.GenerateUserProvidedAsset( - "OpenStack External Network", - &survey.Question{ - Prompt: &survey.Input{ - Message: "ExternalNetwork", - Help: "The OpenStack external network to be used for installation.", - }, - Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - //value := ans.(string) - //FIXME(shadower) add some validation here - return nil - }), - }, - "OPENSHIFT_INSTALL_OPENSTACK_EXTERNAL_NETWORK", - ) - if err != nil { - return nil, errors.Wrapf(err, "failed to Marshal %s platform", openstack.Name) - } - - return &openstack.Platform{ - NetworkCIDRBlock: defaultVPCCIDR, - Region: region, - BaseImage: image, - Cloud: cloud, - ExternalNetwork: extNet, - }, nil -} diff --git a/pkg/asset/installconfig/password.go b/pkg/asset/installconfig/password.go deleted file mode 100644 index 34c502c2c56..00000000000 --- a/pkg/asset/installconfig/password.go +++ /dev/null @@ -1,39 +0,0 @@ -package installconfig - -import ( - survey "gopkg.in/AlecAivazis/survey.v1" - - "github.com/openshift/installer/pkg/asset" -) - -type password struct { - Password string -} - -var _ asset.Asset = (*password)(nil) - -// Dependencies returns no dependencies. -func (a *password) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Generate queries for the password from the user. -func (a *password) Generate(asset.Parents) error { - p, err := asset.GenerateUserProvidedAsset( - a.Name(), - &survey.Question{ - Prompt: &survey.Password{ - Message: "Password", - Help: "The password of the cluster administrator. This will be used to log in to the console.", - }, - }, - "OPENSHIFT_INSTALL_PASSWORD", - ) - a.Password = p - return err -} - -// Name returns the human-friendly name of the asset. -func (a *password) Name() string { - return "Password" -} diff --git a/pkg/asset/installconfig/platform.go b/pkg/asset/installconfig/platform.go deleted file mode 100644 index 86c93c1cfa5..00000000000 --- a/pkg/asset/installconfig/platform.go +++ /dev/null @@ -1,85 +0,0 @@ -package installconfig - -import ( - "fmt" - "sort" - - "github.com/pkg/errors" - survey "gopkg.in/AlecAivazis/survey.v1" - - "github.com/openshift/installer/pkg/asset" - awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws" - libvirtconfig "github.com/openshift/installer/pkg/asset/installconfig/libvirt" - openstackconfig "github.com/openshift/installer/pkg/asset/installconfig/openstack" - "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/aws" - "github.com/openshift/installer/pkg/types/libvirt" - "github.com/openshift/installer/pkg/types/openstack" -) - -// Platform is an asset that queries the user for the platform on which to install -// the cluster. -type platform types.Platform - -var _ asset.Asset = (*platform)(nil) - -// Dependencies returns no dependencies. -func (a *platform) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Generate queries for input from the user. -func (a *platform) Generate(asset.Parents) error { - platform, err := a.queryUserForPlatform() - if err != nil { - return err - } - - switch platform { - case aws.Name: - a.AWS, err = awsconfig.Platform() - if err != nil { - return err - } - case openstack.Name: - a.OpenStack, err = openstackconfig.Platform() - if err != nil { - return err - } - case libvirt.Name: - a.Libvirt, err = libvirtconfig.Platform() - if err != nil { - return err - } - default: - return fmt.Errorf("unknown platform type %q", platform) - } - - return nil -} - -// Name returns the human-friendly name of the asset. -func (a *platform) Name() string { - return "Platform" -} - -func (a *platform) queryUserForPlatform() (string, error) { - return asset.GenerateUserProvidedAsset( - "Platform", - &survey.Question{ - Prompt: &survey.Select{ - Message: "Platform", - Options: types.PlatformNames, - }, - Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - choice := ans.(string) - i := sort.SearchStrings(types.PlatformNames, choice) - if i == len(types.PlatformNames) || types.PlatformNames[i] != choice { - return errors.Errorf("invalid platform %q", choice) - } - return nil - }), - }, - "OPENSHIFT_INSTALL_PLATFORM", - ) -} diff --git a/pkg/asset/installconfig/pullsecret.go b/pkg/asset/installconfig/pullsecret.go deleted file mode 100644 index 2aa85bfe916..00000000000 --- a/pkg/asset/installconfig/pullsecret.go +++ /dev/null @@ -1,44 +0,0 @@ -package installconfig - -import ( - survey "gopkg.in/AlecAivazis/survey.v1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/validate" -) - -type pullSecret struct { - PullSecret string -} - -var _ asset.Asset = (*pullSecret)(nil) - -// Dependencies returns no dependencies. -func (a *pullSecret) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Generate queries for the pull secret from the user. -func (a *pullSecret) Generate(asset.Parents) error { - s, err := asset.GenerateUserProvidedAssetForPath( - a.Name(), - &survey.Question{ - Prompt: &survey.Input{ - Message: "Pull Secret", - Help: "The container registry pull secret for this cluster, as a single line of JSON (e.g. {\"auths\": {...}}).", - }, - Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { - return validate.JSON([]byte(ans.(string))) - }), - }, - "OPENSHIFT_INSTALL_PULL_SECRET", - "OPENSHIFT_INSTALL_PULL_SECRET_PATH", - ) - a.PullSecret = s - return err -} - -// Name returns the human-friendly name of the asset. -func (a *pullSecret) Name() string { - return "Pull Secret" -} diff --git a/pkg/asset/kubeconfig/admin.go b/pkg/asset/kubeconfig/admin.go deleted file mode 100644 index 4e70f019f45..00000000000 --- a/pkg/asset/kubeconfig/admin.go +++ /dev/null @@ -1,55 +0,0 @@ -package kubeconfig - -import ( - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/tls" -) - -var ( - kubeconfigAdminPath = filepath.Join("auth", "kubeconfig") -) - -// Admin is the asset for the admin kubeconfig. -type Admin struct { - kubeconfig -} - -var _ asset.WritableAsset = (*Admin)(nil) - -// Dependencies returns the dependency of the kubeconfig. -func (k *Admin) Dependencies() []asset.Asset { - return []asset.Asset{ - &tls.RootCA{}, - &tls.AdminCertKey{}, - &installconfig.InstallConfig{}, - } -} - -// Generate generates the kubeconfig. -func (k *Admin) Generate(parents asset.Parents) error { - rootCA := &tls.RootCA{} - adminCertKey := &tls.AdminCertKey{} - installConfig := &installconfig.InstallConfig{} - parents.Get(rootCA, adminCertKey, installConfig) - - return k.kubeconfig.generate( - rootCA, - adminCertKey, - installConfig.Config, - "admin", - kubeconfigAdminPath, - ) -} - -// Name returns the human-friendly name of the asset. -func (k *Admin) Name() string { - return "Kubeconfig Admin" -} - -// Load returns the kubeconfig from disk. -func (k *Admin) Load(f asset.FileFetcher) (found bool, err error) { - return k.load(f, kubeconfigAdminPath) -} diff --git a/pkg/asset/kubeconfig/doc.go b/pkg/asset/kubeconfig/doc.go deleted file mode 100644 index 61361a8aec8..00000000000 --- a/pkg/asset/kubeconfig/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package kubeconfig defines and generates the kubeconfig assets. -package kubeconfig diff --git a/pkg/asset/kubeconfig/kubeconfig.go b/pkg/asset/kubeconfig/kubeconfig.go deleted file mode 100644 index 5ac43530dd3..00000000000 --- a/pkg/asset/kubeconfig/kubeconfig.go +++ /dev/null @@ -1,98 +0,0 @@ -package kubeconfig - -import ( - "fmt" - "os" - - "github.com/ghodss/yaml" - "github.com/pkg/errors" - clientcmd "k8s.io/client-go/tools/clientcmd/api/v1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/tls" - "github.com/openshift/installer/pkg/types" -) - -type kubeconfig struct { - Config *clientcmd.Config - File *asset.File -} - -// generate generates the kubeconfig. -func (k *kubeconfig) generate( - rootCA tls.CertKeyInterface, - clientCertKey tls.CertKeyInterface, - installConfig *types.InstallConfig, - userName string, - kubeconfigPath string, -) error { - k.Config = &clientcmd.Config{ - Clusters: []clientcmd.NamedCluster{ - { - Name: installConfig.ObjectMeta.Name, - Cluster: clientcmd.Cluster{ - Server: fmt.Sprintf("https://%s-api.%s:6443", installConfig.ObjectMeta.Name, installConfig.BaseDomain), - CertificateAuthorityData: []byte(rootCA.Cert()), - }, - }, - }, - AuthInfos: []clientcmd.NamedAuthInfo{ - { - Name: userName, - AuthInfo: clientcmd.AuthInfo{ - ClientCertificateData: []byte(clientCertKey.Cert()), - ClientKeyData: []byte(clientCertKey.Key()), - }, - }, - }, - Contexts: []clientcmd.NamedContext{ - { - Name: userName, - Context: clientcmd.Context{ - Cluster: installConfig.ObjectMeta.Name, - AuthInfo: userName, - }, - }, - }, - CurrentContext: userName, - } - - data, err := yaml.Marshal(k.Config) - if err != nil { - return errors.Wrap(err, "failed to Marshal kubeconfig") - } - - k.File = &asset.File{ - Filename: kubeconfigPath, - Data: data, - } - - return nil -} - -// Files returns the files generated by the asset. -func (k *kubeconfig) Files() []*asset.File { - if k.File != nil { - return []*asset.File{k.File} - } - return []*asset.File{} -} - -// load returns the kubeconfig from disk. -func (k *kubeconfig) load(f asset.FileFetcher, name string) (found bool, err error) { - file, err := f.FetchByName(name) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - config := &clientcmd.Config{} - if err := yaml.Unmarshal(file.Data, config); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal") - } - - k.File, k.Config = file, config - return true, nil -} diff --git a/pkg/asset/kubeconfig/kubeconfig_test.go b/pkg/asset/kubeconfig/kubeconfig_test.go deleted file mode 100644 index 44352a244c8..00000000000 --- a/pkg/asset/kubeconfig/kubeconfig_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package kubeconfig - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/openshift/installer/pkg/asset/tls" - "github.com/openshift/installer/pkg/types" -) - -type testCertKey struct { - key string - cert string -} - -func (t *testCertKey) Key() []byte { - return []byte(t.key) -} - -func (t *testCertKey) Cert() []byte { - return []byte(t.cert) -} - -func TestKubeconfigGenerate(t *testing.T) { - rootCA := &testCertKey{ - key: "THIS IS ROOT CA KEY DATA", - cert: "THIS IS ROOT CA CERT DATA", - } - - adminCert := &testCertKey{ - key: "THIS IS ADMIN KEY DATA", - cert: "THIS IS ADMIN CERT DATA", - } - - kubeletCert := &testCertKey{ - key: "THIS IS KUBELET KEY DATA", - cert: "THIS IS KUBELET CERT DATA", - } - - installConfig := &types.InstallConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster-name", - }, - BaseDomain: "test.example.com", - } - - tests := []struct { - name string - userName string - filename string - clientCert tls.CertKeyInterface - expectedData []byte - }{ - { - name: "admin kubeconfig", - userName: "admin", - filename: "auth/kubeconfig", - clientCert: adminCert, - expectedData: []byte(`clusters: -- cluster: - certificate-authority-data: VEhJUyBJUyBST09UIENBIENFUlQgREFUQQ== - server: https://test-cluster-name-api.test.example.com:6443 - name: test-cluster-name -contexts: -- context: - cluster: test-cluster-name - user: admin - name: admin -current-context: admin -preferences: {} -users: -- name: admin - user: - client-certificate-data: VEhJUyBJUyBBRE1JTiBDRVJUIERBVEE= - client-key-data: VEhJUyBJUyBBRE1JTiBLRVkgREFUQQ== -`), - }, - { - name: "kubelet kubeconfig", - userName: "kubelet", - filename: "auth/kubeconfig-kubelet", - clientCert: kubeletCert, - expectedData: []byte(`clusters: -- cluster: - certificate-authority-data: VEhJUyBJUyBST09UIENBIENFUlQgREFUQQ== - server: https://test-cluster-name-api.test.example.com:6443 - name: test-cluster-name -contexts: -- context: - cluster: test-cluster-name - user: kubelet - name: kubelet -current-context: kubelet -preferences: {} -users: -- name: kubelet - user: - client-certificate-data: VEhJUyBJUyBLVUJFTEVUIENFUlQgREFUQQ== - client-key-data: VEhJUyBJUyBLVUJFTEVUIEtFWSBEQVRB -`), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - kc := &kubeconfig{} - err := kc.generate(rootCA, tt.clientCert, installConfig, tt.userName, tt.filename) - assert.NoError(t, err, "unexpected error generating config") - actualFiles := kc.Files() - assert.Equal(t, 1, len(actualFiles), "unexpected number of files generated") - assert.Equal(t, tt.filename, actualFiles[0].Filename, "unexpected file name generated") - assert.Equal(t, tt.expectedData, actualFiles[0].Data, "unexpected config") - }) - } - -} diff --git a/pkg/asset/kubeconfig/kubelet.go b/pkg/asset/kubeconfig/kubelet.go deleted file mode 100644 index 17c26cbca12..00000000000 --- a/pkg/asset/kubeconfig/kubelet.go +++ /dev/null @@ -1,55 +0,0 @@ -package kubeconfig - -import ( - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/tls" -) - -var ( - kubeconfigKubeletPath = filepath.Join("auth", "kubeconfig-kubelet") -) - -// Kubelet is the asset for the kubelet kubeconfig. -type Kubelet struct { - kubeconfig -} - -var _ asset.WritableAsset = (*Kubelet)(nil) - -// Dependencies returns the dependency of the kubeconfig. -func (k *Kubelet) Dependencies() []asset.Asset { - return []asset.Asset{ - &tls.RootCA{}, - &tls.KubeletCertKey{}, - &installconfig.InstallConfig{}, - } -} - -// Generate generates the kubeconfig. -func (k *Kubelet) Generate(parents asset.Parents) error { - rootCA := &tls.RootCA{} - kubeletCertKey := &tls.KubeletCertKey{} - installConfig := &installconfig.InstallConfig{} - parents.Get(rootCA, kubeletCertKey, installConfig) - - return k.kubeconfig.generate( - rootCA, - kubeletCertKey, - installConfig.Config, - "kubelet", - kubeconfigKubeletPath, - ) -} - -// Name returns the human-friendly name of the asset. -func (k *Kubelet) Name() string { - return "Kubeconfig Kubelet" -} - -// Load is a no-op because kubelet kubeconfig is not written to disk. -func (k *Kubelet) Load(asset.FileFetcher) (bool, error) { - return false, nil -} diff --git a/pkg/asset/machines/aws/machines.go b/pkg/asset/machines/aws/machines.go deleted file mode 100644 index e4dfc0963fa..00000000000 --- a/pkg/asset/machines/aws/machines.go +++ /dev/null @@ -1,138 +0,0 @@ -// Package aws generates Machine objects for aws. -package aws - -import ( - "fmt" - - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/utils/pointer" - awsprovider "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsproviderconfig/v1alpha1" - clusterapi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - - "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/aws" -) - -// Machines returns a list of machines for a machinepool. -func Machines(config *types.InstallConfig, pool *types.MachinePool, role, userDataSecret string) ([]clusterapi.Machine, error) { - if configPlatform := config.Platform.Name(); configPlatform != aws.Name { - return nil, fmt.Errorf("non-AWS configuration: %q", configPlatform) - } - if poolPlatform := pool.Platform.Name(); poolPlatform != aws.Name { - return nil, fmt.Errorf("non-AWS machine-pool: %q", poolPlatform) - } - clustername := config.ObjectMeta.Name - platform := config.Platform.AWS - mpool := pool.Platform.AWS - azs := mpool.Zones - - total := int64(1) - if pool.Replicas != nil { - total = *pool.Replicas - } - var machines []clusterapi.Machine - for idx := int64(0); idx < total; idx++ { - azIndex := int(idx) % len(azs) - provider, err := provider(config.ClusterID, clustername, platform, mpool, azIndex, role, userDataSecret) - if err != nil { - return nil, errors.Wrap(err, "failed to create provider") - } - machine := clusterapi.Machine{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "cluster.k8s.io/v1alpha1", - Kind: "Machine", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "openshift-cluster-api", - Name: fmt.Sprintf("%s-%s-%d", clustername, pool.Name, idx), - Labels: map[string]string{ - "sigs.k8s.io/cluster-api-cluster": clustername, - "sigs.k8s.io/cluster-api-machine-role": role, - "sigs.k8s.io/cluster-api-machine-type": role, - }, - }, - Spec: clusterapi.MachineSpec{ - ProviderConfig: clusterapi.ProviderConfig{ - Value: &runtime.RawExtension{Object: provider}, - }, - // we don't need to set Versions, because we control those via operators. - }, - } - - machines = append(machines, machine) - } - - return machines, nil -} - -func provider(clusterID, clusterName string, platform *aws.Platform, mpool *aws.MachinePool, azIdx int, role, userDataSecret string) (*awsprovider.AWSMachineProviderConfig, error) { - az := mpool.Zones[azIdx] - tags, err := tagsFromUserTags(clusterID, clusterName, platform.UserTags) - if err != nil { - return nil, errors.Wrap(err, "failed to create awsprovider.TagSpecifications from UserTags") - } - return &awsprovider.AWSMachineProviderConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "aws.cluster.k8s.io/v1alpha1", - Kind: "AWSMachineProviderConfig", - }, - InstanceType: mpool.InstanceType, - AMI: awsprovider.AWSResourceReference{ID: &mpool.AMIID}, - Tags: tags, - IAMInstanceProfile: &awsprovider.AWSResourceReference{ID: pointer.StringPtr(fmt.Sprintf("%s-%s-profile", clusterName, role))}, - UserDataSecret: &corev1.LocalObjectReference{Name: userDataSecret}, - Subnet: awsprovider.AWSResourceReference{ - Filters: []awsprovider.Filter{{ - Name: "tag:Name", - Values: []string{fmt.Sprintf("%s-%s-%s", clusterName, role, az)}, - }}, - }, - Placement: awsprovider.Placement{Region: platform.Region, AvailabilityZone: az}, - SecurityGroups: []awsprovider.AWSResourceReference{{ - Filters: []awsprovider.Filter{{ - Name: "tag:Name", - Values: []string{fmt.Sprintf("%s_%s_sg", clusterName, role)}, - }}, - }}, - }, nil -} - -func tagsFromUserTags(clusterID, clusterName string, usertags map[string]string) ([]awsprovider.TagSpecification, error) { - tags := []awsprovider.TagSpecification{ - {Name: "tectonicClusterID", Value: clusterID}, - {Name: fmt.Sprintf("kubernetes.io/cluster/%s", clusterName), Value: "owned"}, - } - forbiddenTags := sets.NewString() - for idx := range tags { - forbiddenTags.Insert(tags[idx].Name) - } - for k, v := range usertags { - if forbiddenTags.Has(k) { - return nil, fmt.Errorf("user tags may not clobber %s", k) - } - tags = append(tags, awsprovider.TagSpecification{Name: k, Value: v}) - } - return tags, nil -} - -// ConfigMasters sets the PublicIP flag and assigns a set of load balancers to the given machines -func ConfigMasters(machines []clusterapi.Machine, clusterName string) { - for _, machine := range machines { - providerConfig := machine.Spec.ProviderConfig.Value.Object.(*awsprovider.AWSMachineProviderConfig) - providerConfig.PublicIP = pointer.BoolPtr(true) - providerConfig.LoadBalancers = []awsprovider.LoadBalancerReference{ - { - Name: fmt.Sprintf("%s-ext", clusterName), - Type: awsprovider.NetworkLoadBalancerType, - }, - { - Name: fmt.Sprintf("%s-int", clusterName), - Type: awsprovider.NetworkLoadBalancerType, - }, - } - } -} diff --git a/pkg/asset/machines/aws/machinesets.go b/pkg/asset/machines/aws/machinesets.go deleted file mode 100644 index 65af0057c7c..00000000000 --- a/pkg/asset/machines/aws/machinesets.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package aws generates Machine objects for aws. -package aws - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - clusterapi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - - "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/aws" - "github.com/pkg/errors" -) - -// MachineSets returns a list of machinesets for a machinepool. -func MachineSets(config *types.InstallConfig, pool *types.MachinePool, role, userDataSecret string) ([]clusterapi.MachineSet, error) { - if configPlatform := config.Platform.Name(); configPlatform != aws.Name { - return nil, fmt.Errorf("non-AWS configuration: %q", configPlatform) - } - if poolPlatform := pool.Platform.Name(); poolPlatform != aws.Name { - return nil, fmt.Errorf("non-AWS machine-pool: %q", poolPlatform) - } - clustername := config.ObjectMeta.Name - platform := config.Platform.AWS - mpool := pool.Platform.AWS - azs := mpool.Zones - - total := int64(0) - if pool.Replicas != nil { - total = *pool.Replicas - } - numOfAZs := int64(len(azs)) - var machinesets []clusterapi.MachineSet - for idx, az := range azs { - replicas := int32(total / numOfAZs) - if int64(idx) < total%numOfAZs { - replicas++ - } - - provider, err := provider(config.ClusterID, clustername, platform, mpool, idx, role, userDataSecret) - if err != nil { - return nil, errors.Wrap(err, "failed to create provider") - } - name := fmt.Sprintf("%s-%s-%s", clustername, pool.Name, az) - mset := clusterapi.MachineSet{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "cluster.k8s.io/v1alpha1", - Kind: "MachineSet", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "openshift-cluster-api", - Name: name, - Labels: map[string]string{ - "sigs.k8s.io/cluster-api-cluster": clustername, - "sigs.k8s.io/cluster-api-machine-role": role, - "sigs.k8s.io/cluster-api-machine-type": role, - }, - }, - Spec: clusterapi.MachineSetSpec{ - Replicas: &replicas, - Selector: metav1.LabelSelector{ - MatchLabels: map[string]string{ - "sigs.k8s.io/cluster-api-machineset": name, - "sigs.k8s.io/cluster-api-cluster": clustername, - }, - }, - Template: clusterapi.MachineTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "sigs.k8s.io/cluster-api-machineset": name, - "sigs.k8s.io/cluster-api-cluster": clustername, - "sigs.k8s.io/cluster-api-machine-role": role, - "sigs.k8s.io/cluster-api-machine-type": role, - }, - }, - Spec: clusterapi.MachineSpec{ - ProviderConfig: clusterapi.ProviderConfig{ - Value: &runtime.RawExtension{Object: provider}, - }, - // we don't need to set Versions, because we control those via cluster operators. - }, - }, - }, - } - machinesets = append(machinesets, mset) - } - - return machinesets, nil -} diff --git a/pkg/asset/machines/aws/zones.go b/pkg/asset/machines/aws/zones.go deleted file mode 100644 index e0100993641..00000000000 --- a/pkg/asset/machines/aws/zones.go +++ /dev/null @@ -1,48 +0,0 @@ -package aws - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/ec2" -) - -// AvailabilityZones retrieves a list of availability zones for the given region. -func AvailabilityZones(region string) ([]string, error) { - ec2Client := ec2Client(region) - zones, err := fetchAvailabilityZones(ec2Client, region) - if err != nil { - return nil, fmt.Errorf("cannot fetch availability zones: %v", err) - } - return zones, nil -} - -func ec2Client(region string) *ec2.EC2 { - ssn := session.Must(session.NewSessionWithOptions(session.Options{ - SharedConfigState: session.SharedConfigEnable, - Config: aws.Config{ - Region: aws.String(region), - }, - })) - return ec2.New(ssn) -} - -func fetchAvailabilityZones(client *ec2.EC2, region string) ([]string, error) { - zoneFilter := &ec2.Filter{ - Name: aws.String("region-name"), - Values: []*string{aws.String(region)}, - } - req := &ec2.DescribeAvailabilityZonesInput{ - Filters: []*ec2.Filter{zoneFilter}, - } - resp, err := client.DescribeAvailabilityZones(req) - if err != nil { - return nil, err - } - zones := []string{} - for _, zone := range resp.AvailabilityZones { - zones = append(zones, *zone.ZoneName) - } - return zones, nil -} diff --git a/pkg/asset/machines/doc.go b/pkg/asset/machines/doc.go deleted file mode 100644 index 77156e1a63c..00000000000 --- a/pkg/asset/machines/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package machines is responsible for creating Machine objects for machinepools. -package machines diff --git a/pkg/asset/machines/libvirt/machines.go b/pkg/asset/machines/libvirt/machines.go deleted file mode 100644 index 718f5bdc741..00000000000 --- a/pkg/asset/machines/libvirt/machines.go +++ /dev/null @@ -1,84 +0,0 @@ -// Package libvirt generates Machine objects for libvirt. -package libvirt - -import ( - "fmt" - - libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - clusterapi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - - "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/libvirt" -) - -// Machines returns a list of machines for a machinepool. -func Machines(config *types.InstallConfig, pool *types.MachinePool, role, userDataSecret string) ([]clusterapi.Machine, error) { - if configPlatform := config.Platform.Name(); configPlatform != libvirt.Name { - return nil, fmt.Errorf("non-Libvirt configuration: %q", configPlatform) - } - // FIXME: empty is a valid case for Libvirt as we don't use it. - if poolPlatform := pool.Platform.Name(); poolPlatform != "" && poolPlatform != libvirt.Name { - return nil, fmt.Errorf("non-Libvirt machine-pool: %q", poolPlatform) - } - clustername := config.ObjectMeta.Name - platform := config.Platform.Libvirt - // FIXME: libvirt actuator does not support any options from machinepool. - // mpool := pool.Platform.Libvirt - - total := int64(1) - if pool.Replicas != nil { - total = *pool.Replicas - } - provider := provider(clustername, platform, userDataSecret) - var machines []clusterapi.Machine - for idx := int64(0); idx < total; idx++ { - machine := clusterapi.Machine{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "cluster.k8s.io/v1alpha1", - Kind: "Machine", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "openshift-cluster-api", - Name: fmt.Sprintf("%s-%s-%d", clustername, pool.Name, idx), - Labels: map[string]string{ - "sigs.k8s.io/cluster-api-cluster": clustername, - "sigs.k8s.io/cluster-api-machine-role": role, - "sigs.k8s.io/cluster-api-machine-type": role, - }, - }, - Spec: clusterapi.MachineSpec{ - ProviderConfig: clusterapi.ProviderConfig{ - Value: &runtime.RawExtension{Object: provider}, - }, - // we don't need to set Versions, because we control those via cluster operators. - }, - } - machines = append(machines, machine) - } - - return machines, nil -} - -func provider(clusterName string, platform *libvirt.Platform, userDataSecret string) *libvirtprovider.LibvirtMachineProviderConfig { - return &libvirtprovider.LibvirtMachineProviderConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "libvirtproviderconfig.k8s.io/v1alpha1", - Kind: "LibvirtMachineProviderConfig", - }, - DomainMemory: 2048, - DomainVcpu: 2, - Ignition: &libvirtprovider.Ignition{ - UserDataSecret: userDataSecret, - }, - Volume: &libvirtprovider.Volume{ - PoolName: "default", - BaseVolumeID: fmt.Sprintf("/var/lib/libvirt/images/%s-base", clusterName), - }, - NetworkInterfaceName: clusterName, - NetworkInterfaceAddress: platform.Network.IPRange, - Autostart: false, - URI: platform.URI, - } -} diff --git a/pkg/asset/machines/libvirt/machinesets.go b/pkg/asset/machines/libvirt/machinesets.go deleted file mode 100644 index 0b4469d9f51..00000000000 --- a/pkg/asset/machines/libvirt/machinesets.go +++ /dev/null @@ -1,79 +0,0 @@ -// Package libvirt generates Machine objects for libvirt. -package libvirt - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" - clusterapi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - - "github.com/openshift/installer/pkg/types" - "github.com/openshift/installer/pkg/types/libvirt" -) - -// MachineSets returns a list of machinesets for a machinepool. -func MachineSets(config *types.InstallConfig, pool *types.MachinePool, role, userDataSecret string) ([]clusterapi.MachineSet, error) { - if configPlatform := config.Platform.Name(); configPlatform != libvirt.Name { - return nil, fmt.Errorf("non-Libvirt configuration: %q", configPlatform) - } - // FIXME: empty is a valid case for Libvirt as we don't use it. - if poolPlatform := pool.Platform.Name(); poolPlatform != "" && poolPlatform != libvirt.Name { - return nil, fmt.Errorf("non-Libvirt machine-pool: %q", poolPlatform) - } - clustername := config.ObjectMeta.Name - platform := config.Platform.Libvirt - // FIXME: libvirt actuator does not support any options from machinepool. - // mpool := pool.Platform.Libvirt - - total := int64(0) - if pool.Replicas != nil { - total = *pool.Replicas - } - - provider := provider(clustername, platform, userDataSecret) - name := fmt.Sprintf("%s-%s-%d", clustername, pool.Name, 0) - mset := clusterapi.MachineSet{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "cluster.k8s.io/v1alpha1", - Kind: "MachineSet", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "openshift-cluster-api", - Name: name, - Labels: map[string]string{ - "sigs.k8s.io/cluster-api-cluster": clustername, - "sigs.k8s.io/cluster-api-machine-role": role, - "sigs.k8s.io/cluster-api-machine-type": role, - }, - }, - Spec: clusterapi.MachineSetSpec{ - Replicas: pointer.Int32Ptr(int32(total)), - Selector: metav1.LabelSelector{ - MatchLabels: map[string]string{ - "sigs.k8s.io/cluster-api-machineset": name, - "sigs.k8s.io/cluster-api-cluster": clustername, - }, - }, - Template: clusterapi.MachineTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "sigs.k8s.io/cluster-api-machineset": name, - "sigs.k8s.io/cluster-api-cluster": clustername, - "sigs.k8s.io/cluster-api-machine-role": role, - "sigs.k8s.io/cluster-api-machine-type": role, - }, - }, - Spec: clusterapi.MachineSpec{ - ProviderConfig: clusterapi.ProviderConfig{ - Value: &runtime.RawExtension{Object: provider}, - }, - // we don't need to set Versions, because we control those via cluster operators. - }, - }, - }, - } - - return []clusterapi.MachineSet{mset}, nil -} diff --git a/pkg/asset/machines/master.go b/pkg/asset/machines/master.go deleted file mode 100644 index e898871b048..00000000000 --- a/pkg/asset/machines/master.go +++ /dev/null @@ -1,159 +0,0 @@ -package machines - -import ( - "context" - "fmt" - "time" - - "github.com/ghodss/yaml" - "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - clusterapi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/ignition/machine" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/machines/aws" - "github.com/openshift/installer/pkg/asset/machines/libvirt" - "github.com/openshift/installer/pkg/asset/machines/openstack" - "github.com/openshift/installer/pkg/rhcos" - "github.com/openshift/installer/pkg/types" -) - -// Master generates the machines for the `master` machine pool. -type Master struct { - MachinesRaw []byte - UserDataSecretRaw []byte -} - -var _ asset.Asset = (*Master)(nil) - -// Name returns a human friendly name for the Master Asset. -func (m *Master) Name() string { - return "Master Machines" -} - -// Dependencies returns all of the dependencies directly needed by the -// Master asset -func (m *Master) Dependencies() []asset.Asset { - return []asset.Asset{ - &installconfig.InstallConfig{}, - &machine.Master{}, - } -} - -// Generate generates the Master asset. -func (m *Master) Generate(dependencies asset.Parents) error { - installconfig := &installconfig.InstallConfig{} - mign := &machine.Master{} - dependencies.Get(installconfig, mign) - - var err error - userDataMap := map[string][]byte{"master-user-data": mign.File.Data} - m.UserDataSecretRaw, err = userDataList(userDataMap) - if err != nil { - return errors.Wrap(err, "failed to create user-data secret for worker machines") - } - - ic := installconfig.Config - pool := masterPool(ic.Machines) - switch ic.Platform.Name() { - case "aws": - mpool := defaultAWSMachinePoolPlatform() - mpool.Set(ic.Platform.AWS.DefaultMachinePlatform) - mpool.Set(pool.Platform.AWS) - if mpool.AMIID == "" { - ctx, cancel := context.WithTimeout(context.TODO(), 60*time.Second) - defer cancel() - ami, err := rhcos.AMI(ctx, rhcos.DefaultChannel, ic.Platform.AWS.Region) - if err != nil { - return errors.Wrap(err, "failed to determine default AMI") - } - mpool.AMIID = ami - } - if len(mpool.Zones) == 0 { - azs, err := aws.AvailabilityZones(ic.Platform.AWS.Region) - if err != nil { - return errors.Wrap(err, "failed to fetch availability zones") - } - mpool.Zones = azs - } - pool.Platform.AWS = &mpool - machines, err := aws.Machines(ic, &pool, "master", "master-user-data") - if err != nil { - return errors.Wrap(err, "failed to create master machine objects") - } - aws.ConfigMasters(machines, ic.ObjectMeta.Name) - - list := listFromMachines(machines) - raw, err := yaml.Marshal(list) - if err != nil { - return errors.Wrap(err, "failed to marshal") - } - m.MachinesRaw = raw - case "libvirt": - machines, err := libvirt.Machines(ic, &pool, "master", "master-user-data") - if err != nil { - return errors.Wrap(err, "failed to create master machine objects") - } - - list := listFromMachines(machines) - raw, err := yaml.Marshal(list) - if err != nil { - return errors.Wrap(err, "failed to marshal") - } - m.MachinesRaw = raw - case "openstack": - numOfMasters := int64(0) - if pool.Replicas != nil { - numOfMasters = *pool.Replicas - } - instances := []string{} - for i := 0; i < int(numOfMasters); i++ { - instances = append(instances, fmt.Sprintf("master-%d", i)) - } - config := openstack.MasterConfig{ - ClusterName: ic.ObjectMeta.Name, - Instances: instances, - Image: ic.Platform.OpenStack.BaseImage, - Region: ic.Platform.OpenStack.Region, - Machine: defaultOpenStackMachinePoolPlatform(), - } - - tags := map[string]string{ - "tectonicClusterID": ic.ClusterID, - } - config.Tags = tags - - config.Machine.Set(ic.Platform.OpenStack.DefaultMachinePlatform) - config.Machine.Set(pool.Platform.OpenStack) - - m.MachinesRaw = applyTemplateData(openstack.MasterMachinesTmpl, config) - default: - return fmt.Errorf("invalid Platform") - } - return nil -} - -func masterPool(pools []types.MachinePool) types.MachinePool { - for idx, pool := range pools { - if pool.Name == "master" { - return pools[idx] - } - } - return types.MachinePool{} -} - -func listFromMachines(objs []clusterapi.Machine) *metav1.List { - list := &metav1.List{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "List", - }, - } - for idx := range objs { - list.Items = append(list.Items, runtime.RawExtension{Object: &objs[idx]}) - } - return list -} diff --git a/pkg/asset/machines/openstack/OWNERS b/pkg/asset/machines/openstack/OWNERS deleted file mode 100644 index ea6fcb46def..00000000000 --- a/pkg/asset/machines/openstack/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md -# This file just uses aliases defined in OWNERS_ALIASES. - -approvers: - - openstack-approvers diff --git a/pkg/asset/machines/openstack/master.go b/pkg/asset/machines/openstack/master.go deleted file mode 100644 index f84bc27db0b..00000000000 --- a/pkg/asset/machines/openstack/master.go +++ /dev/null @@ -1,70 +0,0 @@ -// Package openstack generates Machine objects for openstack. -package openstack - -import ( - "text/template" - - "github.com/openshift/installer/pkg/types/openstack" -) - -// MasterConfig is used to generate the machine. -type MasterConfig struct { - ClusterName string - Instances []string - Image string - Tags map[string]string - Region string - Machine openstack.MachinePool -} - -// MasterMachinesTmpl is the template for master machines. -var MasterMachinesTmpl = template.Must(template.New("openstack-master-machines").Parse(` -{{- $c := . -}} -kind: List -apiVersion: v1 -metadata: - resourceVersion: "" - selfLink: "" -items: -{{- range $index,$instance := .Instances}} -- apiVersion: cluster.k8s.io/v1alpha1 - kind: Machine - metadata: - name: {{$c.ClusterName}}-master-{{$index}} - namespace: openshift-cluster-api - labels: - sigs.k8s.io/cluster-api-cluster: {{$c.ClusterName}} - sigs.k8s.io/cluster-api-machine-role: master - sigs.k8s.io/cluster-api-machine-type: master - spec: - providerConfig: - value: - apiVersion: openstack.cluster.k8s.io/v1alpha1 - kind: OpenStackMachineProviderConfig - image: - id: {{$c.Image}} - flavor: {{$c.Machine.FlavorName}} - placement: - region: {{$c.Region}} - subnet: - filters: - - name: "tag:Name" - values: - - "{{$c.ClusterName}}-master-*" - tags: -{{- range $key,$value := $c.Tags}} - - name: "{{$key}}" - value: "{{$value}}" -{{- end}} - securityGroups: - - filters: - - name: "tag:Name" - values: - - "{{$c.ClusterName}}_master_sg" - userDataSecret: - name: master-user-data - versions: - kubelet: "" - controlPlane: "" -{{- end -}} -`)) diff --git a/pkg/asset/machines/openstack/worker.go b/pkg/asset/machines/openstack/worker.go deleted file mode 100644 index 6a1844f0405..00000000000 --- a/pkg/asset/machines/openstack/worker.go +++ /dev/null @@ -1,74 +0,0 @@ -// Package openstack generates Machine objects for openstack. -package openstack - -import ( - "text/template" - - "github.com/openshift/installer/pkg/types/openstack" -) - -// Config is used to generate the machine. -type Config struct { - ClusterName string - Replicas int64 - Image string - Tags map[string]string - Region string - Machine openstack.MachinePool -} - -// WorkerMachineSetTmpl is template for worker machineset. -var WorkerMachineSetTmpl = template.Must(template.New("openstack-worker-machineset").Parse(` -apiVersion: cluster.k8s.io/v1alpha1 -kind: MachineSet -metadata: - name: {{.ClusterName}}-worker-0 - namespace: openshift-cluster-api - labels: - sigs.k8s.io/cluster-api-cluster: {{.ClusterName}} - sigs.k8s.io/cluster-api-machine-role: worker - sigs.k8s.io/cluster-api-machine-type: worker -spec: - replicas: {{.Replicas}} - selector: - matchLabels: - sigs.k8s.io/cluster-api-machineset: {{.ClusterName}}-worker-0 - sigs.k8s.io/cluster-api-cluster: {{.ClusterName}} - template: - metadata: - labels: - sigs.k8s.io/cluster-api-machineset: {{.ClusterName}}-worker-0 - sigs.k8s.io/cluster-api-cluster: {{.ClusterName}} - sigs.k8s.io/cluster-api-machine-role: worker - sigs.k8s.io/cluster-api-machine-type: worker - spec: - providerConfig: - value: - apiVersion: openstack.cluster.k8s.io/v1alpha1 - kind: OpenStackMachineProviderConfig - image: - id: {{.Image}} - flavor: {{.Machine.FlavorName}} - placement: - region: {{.Region}} - subnet: - filters: - - name: "tag:Name" - values: - - "{{.ClusterName}}-worker-*" - tags: -{{- range $key,$value := .Tags}} - - name: "{{$key}}" - value: "{{$value}}" -{{- end}} - securityGroups: - - filters: - - name: "tag:Name" - values: - - "{{.ClusterName}}_worker_sg" - userDataSecret: - name: worker-user-data - versions: - kubelet: "" - controlPlane: "" -`)) diff --git a/pkg/asset/machines/userdata.go b/pkg/asset/machines/userdata.go deleted file mode 100644 index c1687eda340..00000000000 --- a/pkg/asset/machines/userdata.go +++ /dev/null @@ -1,40 +0,0 @@ -package machines - -import ( - "bytes" - "encoding/base64" - "text/template" - - "github.com/pkg/errors" -) - -var userDataListTmpl = template.Must(template.New("user-data-list").Parse(` -kind: List -apiVersion: v1 -metadata: - resourceVersion: "" - selfLink: "" -items: -{{- range $name, $content := . }} -- apiVersion: v1 - kind: Secret - metadata: - name: {{$name}} - namespace: openshift-cluster-api - type: Opaque - data: - userData: {{$content}} -{{- end}} -`)) - -func userDataList(data map[string][]byte) ([]byte, error) { - encodedData := map[string]string{} - for name, content := range data { - encodedData[name] = base64.StdEncoding.EncodeToString(content) - } - buf := &bytes.Buffer{} - if err := userDataListTmpl.Execute(buf, encodedData); err != nil { - return nil, errors.Wrap(err, "failed to execute content.UserDataListTmpl") - } - return buf.Bytes(), nil -} diff --git a/pkg/asset/machines/worker.go b/pkg/asset/machines/worker.go deleted file mode 100644 index bff38c09643..00000000000 --- a/pkg/asset/machines/worker.go +++ /dev/null @@ -1,178 +0,0 @@ -package machines - -import ( - "bytes" - "context" - "fmt" - "text/template" - "time" - - "github.com/ghodss/yaml" - "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - clusterapi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/ignition/machine" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/machines/aws" - "github.com/openshift/installer/pkg/asset/machines/libvirt" - "github.com/openshift/installer/pkg/asset/machines/openstack" - "github.com/openshift/installer/pkg/rhcos" - "github.com/openshift/installer/pkg/types" - awstypes "github.com/openshift/installer/pkg/types/aws" - openstacktypes "github.com/openshift/installer/pkg/types/openstack" -) - -func defaultAWSMachinePoolPlatform() awstypes.MachinePool { - return awstypes.MachinePool{ - InstanceType: "m4.large", - } -} - -func defaultOpenStackMachinePoolPlatform() openstacktypes.MachinePool { - return openstacktypes.MachinePool{ - FlavorName: "m1.medium", - } -} - -// Worker generates the machinesets for `worker` machine pool. -type Worker struct { - MachineSetRaw []byte - UserDataSecretRaw []byte -} - -var _ asset.Asset = (*Worker)(nil) - -// Name returns a human friendly name for the Worker Asset. -func (w *Worker) Name() string { - return "Worker Machines" -} - -// Dependencies returns all of the dependencies directly needed by the -// Worker asset -func (w *Worker) Dependencies() []asset.Asset { - return []asset.Asset{ - &installconfig.InstallConfig{}, - &machine.Worker{}, - } -} - -// Generate generates the Worker asset. -func (w *Worker) Generate(dependencies asset.Parents) error { - installconfig := &installconfig.InstallConfig{} - wign := &machine.Worker{} - dependencies.Get(installconfig, wign) - - var err error - userDataMap := map[string][]byte{"worker-user-data": wign.File.Data} - w.UserDataSecretRaw, err = userDataList(userDataMap) - if err != nil { - return errors.Wrap(err, "failed to create user-data secret for worker machines") - } - - ic := installconfig.Config - pool := workerPool(ic.Machines) - switch ic.Platform.Name() { - case "aws": - mpool := defaultAWSMachinePoolPlatform() - mpool.Set(ic.Platform.AWS.DefaultMachinePlatform) - mpool.Set(pool.Platform.AWS) - if mpool.AMIID == "" { - ctx, cancel := context.WithTimeout(context.TODO(), 60*time.Second) - defer cancel() - ami, err := rhcos.AMI(ctx, rhcos.DefaultChannel, ic.Platform.AWS.Region) - if err != nil { - return errors.Wrap(err, "failed to determine default AMI") - } - mpool.AMIID = ami - } - if len(mpool.Zones) == 0 { - azs, err := aws.AvailabilityZones(ic.Platform.AWS.Region) - if err != nil { - return errors.Wrap(err, "failed to fetch availability zones") - } - mpool.Zones = azs - } - pool.Platform.AWS = &mpool - sets, err := aws.MachineSets(ic, &pool, "worker", "worker-user-data") - if err != nil { - return errors.Wrap(err, "failed to create worker machine objects") - } - - list := listFromMachineSets(sets) - raw, err := yaml.Marshal(list) - if err != nil { - return errors.Wrap(err, "failed to marshal") - } - w.MachineSetRaw = raw - case "libvirt": - sets, err := libvirt.MachineSets(ic, &pool, "worker", "worker-user-data") - if err != nil { - return errors.Wrap(err, "failed to create worker machine objects") - } - - list := listFromMachineSets(sets) - raw, err := yaml.Marshal(list) - if err != nil { - return errors.Wrap(err, "failed to marshal") - } - w.MachineSetRaw = raw - case "openstack": - numOfWorkers := int64(0) - if pool.Replicas != nil { - numOfWorkers = *pool.Replicas - } - config := openstack.Config{ - ClusterName: ic.ObjectMeta.Name, - Replicas: numOfWorkers, - Image: ic.Platform.OpenStack.BaseImage, - Region: ic.Platform.OpenStack.Region, - Machine: defaultOpenStackMachinePoolPlatform(), - } - - tags := map[string]string{ - "tectonicClusterID": ic.ClusterID, - } - config.Tags = tags - - config.Machine.Set(ic.Platform.OpenStack.DefaultMachinePlatform) - config.Machine.Set(pool.Platform.OpenStack) - - w.MachineSetRaw = applyTemplateData(openstack.WorkerMachineSetTmpl, config) - default: - return fmt.Errorf("invalid Platform") - } - return nil -} - -func workerPool(pools []types.MachinePool) types.MachinePool { - for idx, pool := range pools { - if pool.Name == "worker" { - return pools[idx] - } - } - return types.MachinePool{} -} - -func applyTemplateData(template *template.Template, templateData interface{}) []byte { - buf := &bytes.Buffer{} - if err := template.Execute(buf, templateData); err != nil { - panic(err) - } - return buf.Bytes() -} - -func listFromMachineSets(objs []clusterapi.MachineSet) *metav1.List { - list := &metav1.List{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "List", - }, - } - for idx := range objs { - list.Items = append(list.Items, runtime.RawExtension{Object: &objs[idx]}) - } - return list -} diff --git a/pkg/asset/manifests/cluster_k8s_io.go b/pkg/asset/manifests/cluster_k8s_io.go deleted file mode 100644 index 009bcfb8e05..00000000000 --- a/pkg/asset/manifests/cluster_k8s_io.go +++ /dev/null @@ -1,65 +0,0 @@ -package manifests - -import ( - "github.com/ghodss/yaml" - "github.com/pkg/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" - clusterv1a1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -// This file was originally in pkg/assets/machines, but is now in -// /manifests due to an import loop. - -// ClusterK8sIO generates the `Cluster.cluster.k8s.io/v1alpha1` object. -type ClusterK8sIO struct { - Raw []byte -} - -var _ asset.Asset = (*ClusterK8sIO)(nil) - -// Name returns a human friendly name for the ClusterK8sIO Asset. -func (c *ClusterK8sIO) Name() string { - return "Cluster.cluster.k8s.io/v1alpha1" -} - -// Dependencies returns all of the dependencies directly needed by the -// ClusterK8sIO asset -func (c *ClusterK8sIO) Dependencies() []asset.Asset { - return []asset.Asset{ - &installconfig.InstallConfig{}, - &Networking{}, - } -} - -// Generate generates the Worker asset. -func (c *ClusterK8sIO) Generate(dependencies asset.Parents) error { - installconfig := &installconfig.InstallConfig{} - dependencies.Get(installconfig) - - net := &Networking{} - dependencies.Get(net) - clusterNet, err := net.ClusterNetwork() - if err != nil { - return errors.Wrapf(err, "Could not generate ClusterNetworkingConfig") - } - - cluster := clusterv1a1.Cluster{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "cluster.k8s.io/v1alpha1", - Kind: "Cluster", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: installconfig.Config.ObjectMeta.Name, - Namespace: "openshift-cluster-api", - }, - Spec: clusterv1a1.ClusterSpec{ - ClusterNetwork: *clusterNet, - }, - } - - c.Raw, err = yaml.Marshal(cluster) - return err -} diff --git a/pkg/asset/manifests/ingress.go b/pkg/asset/manifests/ingress.go deleted file mode 100644 index 0a17fba6682..00000000000 --- a/pkg/asset/manifests/ingress.go +++ /dev/null @@ -1,122 +0,0 @@ -package manifests - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/ghodss/yaml" - "github.com/pkg/errors" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/templates/content" - - configv1 "github.com/openshift/api/config/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var ( - ingCrdFilename = "cluster-ingress-01-crd.yaml" - ingCfgFilename = filepath.Join(manifestDir, "cluster-ingress-02-config.yml") -) - -// Ingress generates the cluster-ingress-*.yml files. -type Ingress struct { - config *configv1.Ingress - FileList []*asset.File -} - -var _ asset.WritableAsset = (*Ingress)(nil) - -// Name returns a human friendly name for the asset. -func (*Ingress) Name() string { - return "Ingress Config" -} - -// Dependencies returns all of the dependencies directly needed to generate -// the asset. -func (*Ingress) Dependencies() []asset.Asset { - return []asset.Asset{ - &installconfig.InstallConfig{}, - } -} - -// Generate generates the ingress config and its CRD. -func (ing *Ingress) Generate(dependencies asset.Parents) error { - installConfig := &installconfig.InstallConfig{} - dependencies.Get(installConfig) - - ing.config = &configv1.Ingress{ - TypeMeta: metav1.TypeMeta{ - APIVersion: configv1.SchemeGroupVersion.String(), - Kind: "Ingress", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster", - // not namespaced - }, - Spec: configv1.IngressSpec{ - Domain: fmt.Sprintf("apps.%s.%s", installConfig.Config.ObjectMeta.Name, installConfig.Config.BaseDomain), - }, - } - - configData, err := yaml.Marshal(ing.config) - if err != nil { - return errors.Wrapf(err, "failed to create %s manifests from InstallConfig", ing.Name()) - } - - crdData, err := content.GetBootkubeTemplate(ingCrdFilename) - if err != nil { - return err - } - - ing.FileList = []*asset.File{ - { - Filename: filepath.Join(manifestDir, ingCrdFilename), - Data: []byte(crdData), - }, - { - Filename: ingCfgFilename, - Data: configData, - }, - } - - return nil -} - -// Files returns the files generated by the asset. -func (ing *Ingress) Files() []*asset.File { - return ing.FileList -} - -// Load loads the already-rendered files back from disk. -func (ing *Ingress) Load(f asset.FileFetcher) (bool, error) { - crdFile, err := f.FetchByName(filepath.Join(manifestDir, ingCrdFilename)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - cfgFile, err := f.FetchByName(ingCfgFilename) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - - return false, err - } - - ingressConfig := &configv1.Ingress{} - if err := yaml.Unmarshal(cfgFile.Data, ingressConfig); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal %s", ingCfgFilename) - } - - fileList := []*asset.File{crdFile, cfgFile} - - ing.FileList, ing.config = fileList, ingressConfig - - return true, nil -} diff --git a/pkg/asset/manifests/network.go b/pkg/asset/manifests/network.go deleted file mode 100644 index cfd4f21ba20..00000000000 --- a/pkg/asset/manifests/network.go +++ /dev/null @@ -1,202 +0,0 @@ -package manifests - -import ( - "os" - "path/filepath" - - "github.com/ghodss/yaml" - "github.com/pkg/errors" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" - - netopv1 "github.com/openshift/cluster-network-operator/pkg/apis/networkoperator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1a1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" -) - -var ( - noCrdFilename = filepath.Join(manifestDir, "cluster-network-01-crd.yml") - noCfgFilename = filepath.Join(manifestDir, "cluster-network-02-config.yml") -) - -const ( - - // We need to manually create our CRD first, so we can create the - // configuration instance of it. - // Other operators have their CRD created by the CVO, but we manually - // create our operator's configuration in the installer. - netConfigCRD = ` -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networkconfigs.networkoperator.openshift.io -spec: - group: networkoperator.openshift.io - names: - kind: NetworkConfig - listKind: NetworkConfigList - plural: networkconfigs - singular: networkconfig - scope: Cluster - versions: - - name: v1 - served: true - storage: true -` -) - -// Networking generates the cluster-network-*.yml files. -type Networking struct { - config *netopv1.NetworkConfig - FileList []*asset.File -} - -var _ asset.WritableAsset = (*Networking)(nil) - -// Name returns a human friendly name for the operator. -func (no *Networking) Name() string { - return "Network Config" -} - -// Dependencies returns all of the dependencies directly needed to generate -// network configuration. -func (no *Networking) Dependencies() []asset.Asset { - return []asset.Asset{ - &installconfig.InstallConfig{}, - } -} - -// Generate generates the network operator config and its CRD. -func (no *Networking) Generate(dependencies asset.Parents) error { - installConfig := &installconfig.InstallConfig{} - dependencies.Get(installConfig) - - netConfig := installConfig.Config.Networking - - // determine pod address space. - // This can go away when we get rid of PodCIDR - // entirely in favor of ClusterNetworks - var clusterNets []netopv1.ClusterNetwork - if len(netConfig.ClusterNetworks) > 0 { - clusterNets = netConfig.ClusterNetworks - } else if !netConfig.PodCIDR.IPNet.IP.IsUnspecified() { - clusterNets = []netopv1.ClusterNetwork{ - { - CIDR: netConfig.PodCIDR.String(), - HostSubnetLength: 9, - }, - } - } else { - return errors.Errorf("Either PodCIDR or ClusterNetworks must be specified") - } - - defaultNet := netopv1.DefaultNetworkDefinition{ - Type: netConfig.Type, - } - - // Add any network-specific configuration defaults here. - switch netConfig.Type { - case netopv1.NetworkTypeOpenshiftSDN: - defaultNet.OpenshiftSDNConfig = &netopv1.OpenshiftSDNConfig{ - // Default to network policy, operator provides all other defaults. - Mode: netopv1.SDNModePolicy, - } - } - - no.config = &netopv1.NetworkConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: netopv1.SchemeGroupVersion.String(), - Kind: "NetworkConfig", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "default", - // not namespaced - }, - - Spec: netopv1.NetworkConfigSpec{ - ServiceNetwork: netConfig.ServiceCIDR.String(), - ClusterNetworks: clusterNets, - DefaultNetwork: defaultNet, - }, - } - - configData, err := yaml.Marshal(no.config) - if err != nil { - return errors.Wrapf(err, "failed to create %s manifests from InstallConfig", no.Name()) - } - - no.FileList = []*asset.File{ - { - Filename: noCrdFilename, - Data: []byte(netConfigCRD), - }, - { - Filename: noCfgFilename, - Data: configData, - }, - } - - return nil -} - -// Files returns the files generated by the asset. -func (no *Networking) Files() []*asset.File { - return no.FileList -} - -// ClusterNetwork returns the ClusterNetworkingConfig for the ClusterConfig -// object. This is called by ClusterK8sIO, which captures generalized cluster -// state but shouldn't need to be fully networking aware. -func (no *Networking) ClusterNetwork() (*clusterv1a1.ClusterNetworkingConfig, error) { - if no.config == nil { - // should be unreachable. - return nil, errors.Errorf("ClusterNetwork called before initialization") - } - - pods := []string{} - for _, cn := range no.config.Spec.ClusterNetworks { - pods = append(pods, cn.CIDR) - } - - cn := &clusterv1a1.ClusterNetworkingConfig{ - Services: clusterv1a1.NetworkRanges{ - CIDRBlocks: []string{no.config.Spec.ServiceNetwork}, - }, - Pods: clusterv1a1.NetworkRanges{ - CIDRBlocks: pods, - }, - } - return cn, nil -} - -// Load loads the already-rendered files back from disk. -func (no *Networking) Load(f asset.FileFetcher) (bool, error) { - crdFile, err := f.FetchByName(noCrdFilename) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - - cfgFile, err := f.FetchByName(noCfgFilename) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - - return false, err - } - - netConfig := &netopv1.NetworkConfig{} - if err := yaml.Unmarshal(cfgFile.Data, netConfig); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal %s", noCfgFilename) - } - - fileList := []*asset.File{crdFile, cfgFile} - - no.FileList, no.config = fileList, netConfig - - return true, nil -} diff --git a/pkg/asset/manifests/operators.go b/pkg/asset/manifests/operators.go deleted file mode 100644 index b2c70dcfcb0..00000000000 --- a/pkg/asset/manifests/operators.go +++ /dev/null @@ -1,272 +0,0 @@ -// Package manifests deals with creating manifests for all manifests to be installed for the cluster -package manifests - -import ( - "bytes" - "encoding/base64" - "fmt" - "path/filepath" - "strings" - "text/template" - - "github.com/ghodss/yaml" - "github.com/pkg/errors" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/templates/content/bootkube" - "github.com/openshift/installer/pkg/asset/tls" -) - -const ( - manifestDir = "manifests" -) - -var ( - kubeSysConfigPath = filepath.Join(manifestDir, "cluster-config.yaml") - - _ asset.WritableAsset = (*Manifests)(nil) - - customTmplFuncs = template.FuncMap{ - "indent": indent, - "add": func(i, j int) int { - return i + j - }, - } -) - -// Manifests generates the dependent operator config.yaml files -type Manifests struct { - KubeSysConfig *configurationObject - FileList []*asset.File -} - -type genericData map[string]string - -// Name returns a human friendly name for the operator -func (m *Manifests) Name() string { - return "Common Manifests" -} - -// Dependencies returns all of the dependencies directly needed by a -// Manifests asset. -func (m *Manifests) Dependencies() []asset.Asset { - return []asset.Asset{ - &installconfig.InstallConfig{}, - &Ingress{}, - &Networking{}, - &tls.RootCA{}, - &tls.EtcdCA{}, - &tls.IngressCertKey{}, - &tls.KubeCA{}, - &tls.ServiceServingCA{}, - &tls.EtcdClientCertKey{}, - &tls.MCSCertKey{}, - &tls.KubeletCertKey{}, - - &bootkube.KubeCloudConfig{}, - &bootkube.MachineConfigServerTLSSecret{}, - &bootkube.OpenshiftServiceCertSignerSecret{}, - &bootkube.Pull{}, - &bootkube.CVOOverrides{}, - &bootkube.LegacyCVOOverrides{}, - &bootkube.HostEtcdServiceEndpointsKubeSystem{}, - &bootkube.KubeSystemConfigmapEtcdServingCA{}, - &bootkube.KubeSystemConfigmapRootCA{}, - &bootkube.KubeSystemSecretEtcdClient{}, - - &bootkube.OpenshiftWebConsoleNamespace{}, - &bootkube.OpenshiftMachineConfigOperator{}, - &bootkube.OpenshiftClusterAPINamespace{}, - &bootkube.OpenshiftServiceCertSignerNamespace{}, - &bootkube.EtcdServiceKubeSystem{}, - &bootkube.HostEtcdServiceKubeSystem{}, - } -} - -// Generate generates the respective operator config.yml files -func (m *Manifests) Generate(dependencies asset.Parents) error { - ingress := &Ingress{} - network := &Networking{} - installConfig := &installconfig.InstallConfig{} - dependencies.Get(installConfig, ingress, network) - - // mao go to kube-system config map - m.KubeSysConfig = configMap("kube-system", "cluster-config-v1", genericData{ - "install-config": string(installConfig.Files()[0].Data), - }) - kubeSysConfigData, err := yaml.Marshal(m.KubeSysConfig) - if err != nil { - return errors.Wrap(err, "failed to create kube-system/cluster-config-v1 configmap") - } - - m.FileList = []*asset.File{ - { - Filename: kubeSysConfigPath, - Data: kubeSysConfigData, - }, - } - m.FileList = append(m.FileList, m.generateBootKubeManifests(dependencies)...) - - m.FileList = append(m.FileList, ingress.Files()...) - m.FileList = append(m.FileList, network.Files()...) - - return nil -} - -// Files returns the files generated by the asset. -func (m *Manifests) Files() []*asset.File { - return m.FileList -} - -func (m *Manifests) generateBootKubeManifests(dependencies asset.Parents) []*asset.File { - installConfig := &installconfig.InstallConfig{} - etcdCA := &tls.EtcdCA{} - kubeCA := &tls.KubeCA{} - mcsCertKey := &tls.MCSCertKey{} - etcdClientCertKey := &tls.EtcdClientCertKey{} - rootCA := &tls.RootCA{} - serviceServingCA := &tls.ServiceServingCA{} - dependencies.Get( - installConfig, - etcdCA, - etcdClientCertKey, - kubeCA, - mcsCertKey, - rootCA, - serviceServingCA, - ) - - etcdEndpointHostnames := make([]string, installConfig.Config.MasterCount()) - for i := range etcdEndpointHostnames { - etcdEndpointHostnames[i] = fmt.Sprintf("%s-etcd-%d", installConfig.Config.ObjectMeta.Name, i) - } - - templateData := &bootkubeTemplateData{ - Base64encodeCloudProviderConfig: "", // FIXME - EtcdCaCert: string(etcdCA.Cert()), - EtcdClientCert: base64.StdEncoding.EncodeToString(etcdClientCertKey.Cert()), - EtcdClientKey: base64.StdEncoding.EncodeToString(etcdClientCertKey.Key()), - KubeCaCert: base64.StdEncoding.EncodeToString(kubeCA.Cert()), - KubeCaKey: base64.StdEncoding.EncodeToString(kubeCA.Key()), - McsTLSCert: base64.StdEncoding.EncodeToString(mcsCertKey.Cert()), - McsTLSKey: base64.StdEncoding.EncodeToString(mcsCertKey.Key()), - PullSecret: base64.StdEncoding.EncodeToString([]byte(installConfig.Config.PullSecret)), - RootCaCert: string(rootCA.Cert()), - ServiceServingCaCert: base64.StdEncoding.EncodeToString(serviceServingCA.Cert()), - ServiceServingCaKey: base64.StdEncoding.EncodeToString(serviceServingCA.Key()), - CVOClusterID: installConfig.Config.ClusterID, - EtcdEndpointHostnames: etcdEndpointHostnames, - EtcdEndpointDNSSuffix: installConfig.Config.BaseDomain, - } - - kubeCloudConfig := &bootkube.KubeCloudConfig{} - machineConfigServerTLSSecret := &bootkube.MachineConfigServerTLSSecret{} - openshiftServiceCertSignerSecret := &bootkube.OpenshiftServiceCertSignerSecret{} - pull := &bootkube.Pull{} - cVOOverrides := &bootkube.CVOOverrides{} - legacyCVOOverrides := &bootkube.LegacyCVOOverrides{} - hostEtcdServiceEndpointsKubeSystem := &bootkube.HostEtcdServiceEndpointsKubeSystem{} - kubeSystemConfigmapEtcdServingCA := &bootkube.KubeSystemConfigmapEtcdServingCA{} - kubeSystemConfigmapRootCA := &bootkube.KubeSystemConfigmapRootCA{} - kubeSystemSecretEtcdClient := &bootkube.KubeSystemSecretEtcdClient{} - - openshiftWebConsoleNamespace := &bootkube.OpenshiftWebConsoleNamespace{} - openshiftMachineConfigOperator := &bootkube.OpenshiftMachineConfigOperator{} - openshiftClusterAPINamespace := &bootkube.OpenshiftClusterAPINamespace{} - openshiftServiceCertSignerNamespace := &bootkube.OpenshiftServiceCertSignerNamespace{} - etcdServiceKubeSystem := &bootkube.EtcdServiceKubeSystem{} - hostEtcdServiceKubeSystem := &bootkube.HostEtcdServiceKubeSystem{} - dependencies.Get( - kubeCloudConfig, - machineConfigServerTLSSecret, - openshiftServiceCertSignerSecret, - pull, - cVOOverrides, - legacyCVOOverrides, - hostEtcdServiceEndpointsKubeSystem, - kubeSystemConfigmapEtcdServingCA, - kubeSystemConfigmapRootCA, - kubeSystemSecretEtcdClient, - openshiftWebConsoleNamespace, - openshiftMachineConfigOperator, - openshiftClusterAPINamespace, - openshiftServiceCertSignerNamespace, - etcdServiceKubeSystem, - hostEtcdServiceKubeSystem, - ) - assetData := map[string][]byte{ - "kube-cloud-config.yaml": applyTemplateData(kubeCloudConfig.Files()[0].Data, templateData), - "machine-config-server-tls-secret.yaml": applyTemplateData(machineConfigServerTLSSecret.Files()[0].Data, templateData), - "openshift-service-signer-secret.yaml": applyTemplateData(openshiftServiceCertSignerSecret.Files()[0].Data, templateData), - "pull.json": applyTemplateData(pull.Files()[0].Data, templateData), - "cvo-overrides.yaml": applyTemplateData(cVOOverrides.Files()[0].Data, templateData), - "legacy-cvo-overrides.yaml": applyTemplateData(legacyCVOOverrides.Files()[0].Data, templateData), - "host-etcd-service-endpoints.yaml": applyTemplateData(hostEtcdServiceEndpointsKubeSystem.Files()[0].Data, templateData), - "kube-system-configmap-etcd-serving-ca.yaml": applyTemplateData(kubeSystemConfigmapEtcdServingCA.Files()[0].Data, templateData), - "kube-system-configmap-root-ca.yaml": applyTemplateData(kubeSystemConfigmapRootCA.Files()[0].Data, templateData), - "kube-system-secret-etcd-client.yaml": applyTemplateData(kubeSystemSecretEtcdClient.Files()[0].Data, templateData), - - "03-openshift-web-console-namespace.yaml": []byte(openshiftWebConsoleNamespace.Files()[0].Data), - "04-openshift-machine-config-operator.yaml": []byte(openshiftMachineConfigOperator.Files()[0].Data), - "05-openshift-cluster-api-namespace.yaml": []byte(openshiftClusterAPINamespace.Files()[0].Data), - "09-openshift-service-signer-namespace.yaml": []byte(openshiftServiceCertSignerNamespace.Files()[0].Data), - "etcd-service.yaml": []byte(etcdServiceKubeSystem.Files()[0].Data), - "host-etcd-service.yaml": []byte(hostEtcdServiceKubeSystem.Files()[0].Data), - } - - files := make([]*asset.File, 0, len(assetData)) - for name, data := range assetData { - files = append(files, &asset.File{ - Filename: filepath.Join(manifestDir, name), - Data: data, - }) - } - - return files -} - -func applyTemplateData(data []byte, templateData interface{}) []byte { - template := template.Must(template.New("template").Funcs(customTmplFuncs).Parse(string(data))) - buf := &bytes.Buffer{} - if err := template.Execute(buf, templateData); err != nil { - panic(err) - } - return buf.Bytes() -} - -// Load returns the manifests asset from disk. -func (m *Manifests) Load(f asset.FileFetcher) (bool, error) { - fileList, err := f.FetchByPattern(filepath.Join(manifestDir, "*")) - if err != nil { - return false, err - } - if len(fileList) == 0 { - return false, nil - } - - kubeSysConfig := &configurationObject{} - var found bool - for _, file := range fileList { - if file.Filename == kubeSysConfigPath { - if err := yaml.Unmarshal(file.Data, kubeSysConfig); err != nil { - return false, errors.Wrapf(err, "failed to unmarshal cluster-config.yaml") - } - found = true - } - } - - if !found { - return false, nil - - } - - m.FileList, m.KubeSysConfig = fileList, kubeSysConfig - - return true, nil -} - -func indent(indention int, v string) string { - newline := "\n" + strings.Repeat(" ", indention) - return strings.Replace(v, "\n", newline, -1) -} diff --git a/pkg/asset/manifests/tectonic.go b/pkg/asset/manifests/tectonic.go deleted file mode 100644 index 98d30ae7670..00000000000 --- a/pkg/asset/manifests/tectonic.go +++ /dev/null @@ -1,142 +0,0 @@ -package manifests - -import ( - "encoding/base64" - "path/filepath" - - "github.com/aws/aws-sdk-go/aws/session" - "github.com/ghodss/yaml" - - "github.com/gophercloud/utils/openstack/clientconfig" - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" - "github.com/openshift/installer/pkg/asset/machines" - "github.com/openshift/installer/pkg/asset/templates/content/tectonic" -) - -const ( - tectonicManifestDir = "tectonic" -) - -var ( - _ asset.WritableAsset = (*Tectonic)(nil) -) - -// Tectonic generates the dependent resource manifests for tectonic (as against bootkube) -type Tectonic struct { - FileList []*asset.File -} - -// Name returns a human friendly name for the operator -func (t *Tectonic) Name() string { - return "Tectonic Manifests" -} - -// Dependencies returns all of the dependencies directly needed by the -// Tectonic asset -func (t *Tectonic) Dependencies() []asset.Asset { - return []asset.Asset{ - &installconfig.InstallConfig{}, - &ClusterK8sIO{}, - &machines.Worker{}, - &machines.Master{}, - - &tectonic.BindingDiscovery{}, - &tectonic.CloudCredsSecret{}, - &tectonic.RoleCloudCredsSecretReader{}, - } -} - -// Generate generates the respective operator config.yml files -func (t *Tectonic) Generate(dependencies asset.Parents) error { - installConfig := &installconfig.InstallConfig{} - clusterk8sio := &ClusterK8sIO{} - worker := &machines.Worker{} - master := &machines.Master{} - dependencies.Get(installConfig, clusterk8sio, worker, master) - var cloudCreds cloudCredsSecretData - platform := installConfig.Config.Platform.Name() - switch platform { - case "aws": - ssn := session.Must(session.NewSessionWithOptions(session.Options{ - SharedConfigState: session.SharedConfigEnable, - })) - creds, err := ssn.Config.Credentials.Get() - if err != nil { - return err - } - cloudCreds = cloudCredsSecretData{ - AWS: &AwsCredsSecretData{ - Base64encodeAccessKeyID: base64.StdEncoding.EncodeToString([]byte(creds.AccessKeyID)), - Base64encodeSecretAccessKey: base64.StdEncoding.EncodeToString([]byte(creds.SecretAccessKey)), - }, - } - case "openstack": - clouds, err := clientconfig.LoadCloudsYAML() - if err != nil { - return err - } - - marshalled, err := yaml.Marshal(clouds) - if err != nil { - return err - } - - credsEncoded := base64.StdEncoding.EncodeToString(marshalled) - cloudCreds = cloudCredsSecretData{ - OpenStack: &OpenStackCredsSecretData{ - Base64encodeCloudCreds: credsEncoded, - }, - } - } - - templateData := &tectonicTemplateData{ - CloudCreds: cloudCreds, - } - - bindingDiscovery := &tectonic.BindingDiscovery{} - cloudCredsSecret := &tectonic.CloudCredsSecret{} - roleCloudCredsSecretReader := &tectonic.RoleCloudCredsSecretReader{} - dependencies.Get( - bindingDiscovery, - cloudCredsSecret, - roleCloudCredsSecretReader) - assetData := map[string][]byte{ - "99_binding-discovery.yaml": []byte(bindingDiscovery.Files()[0].Data), - "99_openshift-cluster-api_cluster.yaml": clusterk8sio.Raw, - "99_openshift-cluster-api_master-machines.yaml": master.MachinesRaw, - "99_openshift-cluster-api_master-user-data-secret.yaml": master.UserDataSecretRaw, - "99_openshift-cluster-api_worker-machineset.yaml": worker.MachineSetRaw, - "99_openshift-cluster-api_worker-user-data-secret.yaml": worker.UserDataSecretRaw, - } - - switch platform { - case "aws", "openstack": - assetData["99_cloud-creds-secret.yaml"] = applyTemplateData(cloudCredsSecret.Files()[0].Data, templateData) - assetData["99_role-cloud-creds-secret-reader.yaml"] = applyTemplateData(roleCloudCredsSecretReader.Files()[0].Data, templateData) - } - - t.FileList = []*asset.File{} - for name, data := range assetData { - t.FileList = append(t.FileList, &asset.File{ - Filename: filepath.Join(tectonicManifestDir, name), - Data: data, - }) - } - - return nil -} - -// Files returns the files generated by the asset. -func (t *Tectonic) Files() []*asset.File { - return t.FileList -} - -// Load returns the tectonic asset from disk. -func (t *Tectonic) Load(f asset.FileFetcher) (bool, error) { - fileList, err := f.FetchByPattern(filepath.Join(tectonicManifestDir, "*")) - if err != nil { - return false, err - } - return len(fileList) > 0, nil -} diff --git a/pkg/asset/manifests/template.go b/pkg/asset/manifests/template.go deleted file mode 100644 index 71dbdefecd8..00000000000 --- a/pkg/asset/manifests/template.go +++ /dev/null @@ -1,40 +0,0 @@ -package manifests - -// AwsCredsSecretData holds encoded credentials and is used to generate cloud-creds secret -type AwsCredsSecretData struct { - Base64encodeAccessKeyID string - Base64encodeSecretAccessKey string -} - -// OpenStackCredsSecretData holds encoded credentials and is used to generate cloud-creds secret -type OpenStackCredsSecretData struct { - Base64encodeCloudCreds string -} - -type cloudCredsSecretData struct { - AWS *AwsCredsSecretData - OpenStack *OpenStackCredsSecretData -} - -type bootkubeTemplateData struct { - Base64encodeCloudProviderConfig string - EtcdCaCert string - EtcdClientCert string - EtcdClientKey string - KubeCaCert string - KubeCaKey string - McsTLSCert string - McsTLSKey string - PullSecret string - RootCaCert string - ServiceServingCaCert string - ServiceServingCaKey string - WorkerIgnConfig string - CVOClusterID string - EtcdEndpointHostnames []string - EtcdEndpointDNSSuffix string -} - -type tectonicTemplateData struct { - CloudCreds cloudCredsSecretData -} diff --git a/pkg/asset/manifests/utils.go b/pkg/asset/manifests/utils.go deleted file mode 100644 index d277efa1ab6..00000000000 --- a/pkg/asset/manifests/utils.go +++ /dev/null @@ -1,49 +0,0 @@ -package manifests - -import ( - "fmt" - - "github.com/openshift/installer/pkg/types" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type configurationObject struct { - metav1.TypeMeta - - Metadata metadata `json:"metadata,omitempty"` - Data genericData `json:"data,omitempty"` -} - -type metadata struct { - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` -} - -func configMap(namespace, name string, data genericData) *configurationObject { - return &configurationObject{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - Metadata: metadata{ - Name: name, - Namespace: namespace, - }, - Data: data, - } -} - -// Converts a platform to the cloudProvider that k8s understands -func tectonicCloudProvider(platform types.Platform) string { - if platform.AWS != nil { - return "aws" - } - if platform.Libvirt != nil { - return "libvirt" - } - return "" -} - -func getAPIServerURL(ic *types.InstallConfig) string { - return fmt.Sprintf("https://%s-api.%s:6443", ic.ObjectMeta.Name, ic.BaseDomain) -} diff --git a/pkg/asset/parents.go b/pkg/asset/parents.go deleted file mode 100644 index 12d45062116..00000000000 --- a/pkg/asset/parents.go +++ /dev/null @@ -1,24 +0,0 @@ -package asset - -import ( - "reflect" -) - -// Parents is the collection of assets upon which another asset is directly -// dependent. -type Parents map[reflect.Type]Asset - -// Add adds the specified assets to the parents collection. -func (p Parents) Add(assets ...Asset) { - for _, a := range assets { - p[reflect.TypeOf(a)] = a - } -} - -// Get populates the state of the specified assets with the state stored in the -// parents collection. -func (p Parents) Get(assets ...Asset) { - for _, a := range assets { - reflect.ValueOf(a).Elem().Set(reflect.ValueOf(p[reflect.TypeOf(a)]).Elem()) - } -} diff --git a/pkg/asset/parents_test.go b/pkg/asset/parents_test.go deleted file mode 100644 index 58606fb5592..00000000000 --- a/pkg/asset/parents_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package asset - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type parentsAsset struct { - x int -} - -func (a *parentsAsset) Name() string { - return "parents-asset" -} - -func (a *parentsAsset) Dependencies() []Asset { - return []Asset{} -} - -func (a *parentsAsset) Generate(Parents) error { - return nil -} - -func TestParentsGetPointer(t *testing.T) { - origAsset := &parentsAsset{x: 1} - parents := Parents{} - parents.Add(origAsset) - - retrievedAsset := &parentsAsset{} - parents.Get(retrievedAsset) - assert.Equal(t, 1, retrievedAsset.x) -} diff --git a/pkg/asset/state.go b/pkg/asset/state.go deleted file mode 100644 index bd92151627c..00000000000 --- a/pkg/asset/state.go +++ /dev/null @@ -1,42 +0,0 @@ -package asset - -import ( - "io/ioutil" - "os" - "path/filepath" - - "github.com/pkg/errors" -) - -// State is the state of an Asset. -type State struct { - Contents []Content -} - -// Content is a generated portion of an Asset. -type Content struct { - Name string // the path on disk for this content. - Data []byte -} - -// PersistToFile persists the data in the State to files. Each Content entry that -// has a non-empty Name will be persisted to a file with that name. -func (s *State) PersistToFile(directory string) error { - if s == nil { - return nil - } - - for _, c := range s.Contents { - if c.Name == "" { - continue - } - path := filepath.Join(directory, c.Name) - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return errors.Wrap(err, "failed to create dir") - } - if err := ioutil.WriteFile(path, c.Data, 0644); err != nil { - return errors.Wrap(err, "failed to write file") - } - } - return nil -} diff --git a/pkg/asset/store.go b/pkg/asset/store.go deleted file mode 100644 index e8c1eaf4fcf..00000000000 --- a/pkg/asset/store.go +++ /dev/null @@ -1,348 +0,0 @@ -package asset - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "reflect" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - stateFileName = ".openshift_install_state.json" -) - -// Store is a store for the states of assets. -type Store interface { - // Fetch retrieves the state of the given asset, generating it and its - // dependencies if necessary. - Fetch(Asset) error - - // Destroy removes the asset from all its internal state and also from - // disk if possible. - Destroy(Asset) error -} - -// assetSource indicates from where the asset was fetched -type assetSource int - -const ( - // unsourced indicates that the asset has not been fetched - unfetched assetSource = iota - // generatedSource indicates that the asset was generated - generatedSource - // onDiskSource indicates that the asset was fetched from disk - onDiskSource - // stateFileSource indicates that the asset was fetched from the state file - stateFileSource -) - -type assetState struct { - // asset is the asset. - // If the asset has not been fetched, then this will be nil. - asset Asset - // source is the source from which the asset was fetched - source assetSource - // anyParentsDirty is true if any of the parents of the asset are dirty - anyParentsDirty bool - // presentOnDisk is true if the asset in on-disk. This is set whether the - // asset is sourced from on-disk or not. It is used in purging consumed assets. - presentOnDisk bool -} - -// StoreImpl is the implementation of Store. -type StoreImpl struct { - directory string - assets map[reflect.Type]*assetState - stateFileAssets map[string]json.RawMessage - fileFetcher FileFetcher -} - -// NewStore returns an asset store that implements the Store interface. -func NewStore(dir string) (Store, error) { - store := &StoreImpl{ - directory: dir, - fileFetcher: &fileFetcher{directory: dir}, - assets: map[reflect.Type]*assetState{}, - } - - if err := store.loadStateFile(); err != nil { - return nil, err - } - return store, nil -} - -// Fetch retrieves the state of the given asset, generating it and its -// dependencies if necessary. -func (s *StoreImpl) Fetch(asset Asset) error { - if err := s.fetch(asset, ""); err != nil { - return err - } - if err := s.saveStateFile(); err != nil { - return errors.Wrapf(err, "failed to save state") - } - if wa, ok := asset.(WritableAsset); ok { - return errors.Wrapf(s.purge(wa), "failed to purge asset") - } - return nil -} - -// Destroy removes the asset from all its internal state and also from -// disk if possible. -func (s *StoreImpl) Destroy(asset Asset) error { - if sa, ok := s.assets[reflect.TypeOf(asset)]; ok { - reflect.ValueOf(asset).Elem().Set(reflect.ValueOf(sa.asset).Elem()) - } else if s.isAssetInState(asset) { - if err := s.loadAssetFromState(asset); err != nil { - return err - } - } else { - // nothing to do - return nil - } - - if wa, ok := asset.(WritableAsset); ok { - if err := deleteAssetFromDisk(wa, s.directory); err != nil { - return err - } - } - - delete(s.assets, reflect.TypeOf(asset)) - delete(s.stateFileAssets, reflect.TypeOf(asset).String()) - return s.saveStateFile() -} - -// loadStateFile retrieves the state from the state file present in the given directory -// and returns the assets map -func (s *StoreImpl) loadStateFile() error { - path := filepath.Join(s.directory, stateFileName) - assets := map[string]json.RawMessage{} - data, err := ioutil.ReadFile(path) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - err = json.Unmarshal(data, &assets) - if err != nil { - return errors.Wrapf(err, "failed to unmarshal state file %q", path) - } - s.stateFileAssets = assets - return nil -} - -// loadAssetFromState renders the asset object arguments from the state file contents. -func (s *StoreImpl) loadAssetFromState(asset Asset) error { - bytes, ok := s.stateFileAssets[reflect.TypeOf(asset).String()] - if !ok { - return errors.Errorf("asset %q is not found in the state file", asset.Name()) - } - return json.Unmarshal(bytes, asset) -} - -// isAssetInState tests whether the asset is in the state file. -func (s *StoreImpl) isAssetInState(asset Asset) bool { - _, ok := s.stateFileAssets[reflect.TypeOf(asset).String()] - return ok -} - -// saveStateFile dumps the entire state map into a file -func (s *StoreImpl) saveStateFile() error { - if s.stateFileAssets == nil { - s.stateFileAssets = map[string]json.RawMessage{} - } - for k, v := range s.assets { - if v.source == unfetched { - continue - } - data, err := json.MarshalIndent(v.asset, "", " ") - if err != nil { - return err - } - s.stateFileAssets[k.String()] = json.RawMessage(data) - } - data, err := json.MarshalIndent(s.stateFileAssets, "", " ") - if err != nil { - return err - } - - path := filepath.Join(s.directory, stateFileName) - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - if err := ioutil.WriteFile(path, data, 0644); err != nil { - return err - } - return nil -} - -// fetch populates the given asset, generating it and its dependencies if -// necessary, and returns whether or not the asset had to be regenerated and -// any errors. -func (s *StoreImpl) fetch(asset Asset, indent string) error { - logrus.Debugf("%sFetching %q...", indent, asset.Name()) - - assetState, ok := s.assets[reflect.TypeOf(asset)] - if !ok { - if _, err := s.load(asset, ""); err != nil { - return err - } - assetState = s.assets[reflect.TypeOf(asset)] - } - - // Return immediately if the asset has been fetched before, - // this is because we are doing a depth-first-search, it's guaranteed - // that we always fetch the parent before children, so we don't need - // to worry about invalidating anything in the cache. - if assetState.source != unfetched { - logrus.Debugf("%sReusing previously-fetched %q", indent, asset.Name()) - reflect.ValueOf(asset).Elem().Set(reflect.ValueOf(assetState.asset).Elem()) - return nil - } - - // Re-generate the asset - dependencies := asset.Dependencies() - parents := make(Parents, len(dependencies)) - for _, d := range dependencies { - if err := s.fetch(d, increaseIndent(indent)); err != nil { - return errors.Wrapf(err, "failed to fetch dependency of %q", asset.Name()) - } - parents.Add(d) - } - logrus.Debugf("%sGenerating %q...", indent, asset.Name()) - if err := asset.Generate(parents); err != nil { - return errors.Wrapf(err, "failed to generate asset %q", asset.Name()) - } - assetState.asset = asset - assetState.source = generatedSource - return nil -} - -// load loads the asset and all of its ancestors from on-disk and the state file. -func (s *StoreImpl) load(asset Asset, indent string) (*assetState, error) { - logrus.Debugf("%sLoading %q...", indent, asset.Name()) - - // Stop descent if the asset has already been loaded. - if state, ok := s.assets[reflect.TypeOf(asset)]; ok { - return state, nil - } - - // Load dependencies from on-disk. - anyParentsDirty := false - for _, d := range asset.Dependencies() { - state, err := s.load(d, increaseIndent(indent)) - if err != nil { - return nil, err - } - if state.anyParentsDirty || state.source == onDiskSource { - anyParentsDirty = true - } - } - - // Try to load from on-disk. - var ( - onDiskAsset WritableAsset - foundOnDisk bool - ) - if _, isWritable := asset.(WritableAsset); isWritable { - onDiskAsset = reflect.New(reflect.TypeOf(asset).Elem()).Interface().(WritableAsset) - var err error - foundOnDisk, err = onDiskAsset.Load(s.fileFetcher) - if err != nil { - return nil, errors.Wrapf(err, "failed to load asset %q", asset.Name()) - } - } - - // Try to load from state file. - var ( - stateFileAsset Asset - foundInStateFile bool - onDiskMatchesStateFile bool - ) - // Do not need to bother with loading from state file if any of the parents - // are dirty because the asset must be re-generated in this case. - if !anyParentsDirty { - foundInStateFile = s.isAssetInState(asset) - if foundInStateFile { - stateFileAsset = reflect.New(reflect.TypeOf(asset).Elem()).Interface().(Asset) - if err := s.loadAssetFromState(stateFileAsset); err != nil { - return nil, errors.Wrapf(err, "failed to load asset %q from state file", asset.Name()) - } - } - - if foundOnDisk && foundInStateFile { - logrus.Debugf("%sLoading %q from both state file and target directory", indent, asset.Name()) - - // If the on-disk asset is the same as the one in the state file, there - // is no need to consider the one on disk and to mark the asset dirty. - onDiskMatchesStateFile = reflect.DeepEqual(onDiskAsset, stateFileAsset) - if onDiskMatchesStateFile { - logrus.Debugf("%sOn-disk %q matches asset in state file", indent, asset.Name()) - } - } - } - - var ( - assetToStore Asset - source assetSource - ) - switch { - // A parent is dirty. The asset must be re-generated. - case anyParentsDirty: - if foundOnDisk { - logrus.Warningf("%sDiscarding the %q that was provided in the target directory because its dependencies are dirty and it needs to be regenerated", indent, asset.Name()) - } - source = unfetched - // The asset is on disk and that differs from what is in the source file. - // The asset is sourced from on disk. - case foundOnDisk && !onDiskMatchesStateFile: - logrus.Debugf("%sUsing %q loaded from target directory", indent, asset.Name()) - assetToStore = onDiskAsset - source = onDiskSource - // The asset is in the state file. The asset is sourced from state file. - case foundInStateFile: - logrus.Debugf("%sUsing %q loaded from state file", indent, asset.Name()) - assetToStore = stateFileAsset - source = stateFileSource - // There is no existing source for the asset. The asset will be generated. - default: - source = unfetched - } - - state := &assetState{ - asset: assetToStore, - source: source, - anyParentsDirty: anyParentsDirty, - presentOnDisk: foundOnDisk, - } - s.assets[reflect.TypeOf(asset)] = state - return state, nil -} - -// purge deletes the on-disk assets that are consumed already. -// E.g., install-config.yml will be deleted after fetching 'manifests'. -// The target asset is excluded. -func (s *StoreImpl) purge(excluded WritableAsset) error { - for _, assetState := range s.assets { - if !assetState.presentOnDisk { - continue - } - if reflect.TypeOf(assetState.asset) == reflect.TypeOf(excluded) { - continue - } - logrus.Infof("Consuming %q from target directory", assetState.asset.Name()) - if err := deleteAssetFromDisk(assetState.asset.(WritableAsset), s.directory); err != nil { - return err - } - assetState.presentOnDisk = false - } - return nil -} - -func increaseIndent(indent string) string { - return indent + " " -} diff --git a/pkg/asset/store_test.go b/pkg/asset/store_test.go deleted file mode 100644 index 8c13d44b2d8..00000000000 --- a/pkg/asset/store_test.go +++ /dev/null @@ -1,387 +0,0 @@ -package asset - -import ( - "io/ioutil" - "os" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" -) - -var ( - // It is unfortunate that these need to be global variables. However, the - // asset store creates new assets by type, so the tests cannot store behavior - // state in the assets themselves. - generationLog []string - dependencies map[reflect.Type][]Asset - onDiskAssets map[reflect.Type]bool -) - -func clearAssetBehaviors() { - generationLog = []string{} - dependencies = map[reflect.Type][]Asset{} - onDiskAssets = map[reflect.Type]bool{} -} - -func dependenciesTestStoreAsset(a Asset) []Asset { - return dependencies[reflect.TypeOf(a)] -} - -func generateTestStoreAsset(a Asset) error { - generationLog = append(generationLog, a.Name()) - return nil -} - -func fileTestStoreAsset(a Asset) []*File { - return []*File{{Filename: a.Name()}} -} - -func loadTestStoreAsset(a Asset) (bool, error) { - return onDiskAssets[reflect.TypeOf(a)], nil -} - -type testStoreAssetA struct{} - -func (a *testStoreAssetA) Name() string { - return "a" -} - -func (a *testStoreAssetA) Dependencies() []Asset { - return dependenciesTestStoreAsset(a) -} - -func (a *testStoreAssetA) Generate(Parents) error { - return generateTestStoreAsset(a) -} - -func (a *testStoreAssetA) Files() []*File { - return fileTestStoreAsset(a) -} - -func (a *testStoreAssetA) Load(FileFetcher) (bool, error) { - return loadTestStoreAsset(a) -} - -type testStoreAssetB struct{} - -func (a *testStoreAssetB) Name() string { - return "b" -} - -func (a *testStoreAssetB) Dependencies() []Asset { - return dependenciesTestStoreAsset(a) -} - -func (a *testStoreAssetB) Generate(Parents) error { - return generateTestStoreAsset(a) -} - -func (a *testStoreAssetB) Files() []*File { - return fileTestStoreAsset(a) -} - -func (a *testStoreAssetB) Load(FileFetcher) (bool, error) { - return loadTestStoreAsset(a) -} - -type testStoreAssetC struct{} - -func (a *testStoreAssetC) Name() string { - return "c" -} - -func (a *testStoreAssetC) Dependencies() []Asset { - return dependenciesTestStoreAsset(a) -} - -func (a *testStoreAssetC) Generate(Parents) error { - return generateTestStoreAsset(a) -} - -func (a *testStoreAssetC) Files() []*File { - return fileTestStoreAsset(a) -} - -func (a *testStoreAssetC) Load(FileFetcher) (bool, error) { - return loadTestStoreAsset(a) -} - -type testStoreAssetD struct{} - -func (a *testStoreAssetD) Name() string { - return "d" -} - -func (a *testStoreAssetD) Dependencies() []Asset { - return dependenciesTestStoreAsset(a) -} - -func (a *testStoreAssetD) Generate(Parents) error { - return generateTestStoreAsset(a) -} - -func (a *testStoreAssetD) Files() []*File { - return fileTestStoreAsset(a) -} - -func (a *testStoreAssetD) Load(FileFetcher) (bool, error) { - return loadTestStoreAsset(a) -} - -func newTestStoreAsset(name string) Asset { - switch name { - case "a": - return &testStoreAssetA{} - case "b": - return &testStoreAssetB{} - case "c": - return &testStoreAssetC{} - case "d": - return &testStoreAssetD{} - default: - return nil - } -} - -// TestStoreFetch tests the Fetch method of StoreImpl. -func TestStoreFetch(t *testing.T) { - cases := []struct { - name string - assets map[string][]string - existingAssets []string - target string - expectedGenerationLog []string - }{ - { - name: "no dependencies", - assets: map[string][]string{ - "a": {}, - }, - target: "a", - expectedGenerationLog: []string{"a"}, - }, - { - name: "single dependency", - assets: map[string][]string{ - "a": {"b"}, - "b": {}, - }, - target: "a", - expectedGenerationLog: []string{"b", "a"}, - }, - { - name: "multiple dependencies", - assets: map[string][]string{ - "a": {"b", "c"}, - "b": {}, - "c": {}, - }, - target: "a", - expectedGenerationLog: []string{"b", "c", "a"}, - }, - { - name: "grandchild dependency", - assets: map[string][]string{ - "a": {"b"}, - "b": {"c"}, - "c": {}, - }, - target: "a", - expectedGenerationLog: []string{"c", "b", "a"}, - }, - { - name: "intragenerational shared dependency", - assets: map[string][]string{ - "a": {"b", "c"}, - "b": {"d"}, - "c": {"d"}, - "d": {}, - }, - target: "a", - expectedGenerationLog: []string{"d", "b", "c", "a"}, - }, - { - name: "intergenerational shared dependency", - assets: map[string][]string{ - "a": {"b", "c"}, - "b": {"c"}, - "c": {}, - }, - target: "a", - expectedGenerationLog: []string{"c", "b", "a"}, - }, - { - name: "existing asset", - assets: map[string][]string{ - "a": {}, - }, - existingAssets: []string{"a"}, - target: "a", - expectedGenerationLog: []string{}, - }, - { - name: "existing child asset", - assets: map[string][]string{ - "a": {"b"}, - "b": {}, - }, - existingAssets: []string{"b"}, - target: "a", - expectedGenerationLog: []string{"a"}, - }, - { - name: "absent grandchild asset", - assets: map[string][]string{ - "a": {"b"}, - "b": {"c"}, - "c": {}, - }, - existingAssets: []string{"b"}, - target: "a", - expectedGenerationLog: []string{"a"}, - }, - { - name: "absent grandchild with absent parent", - assets: map[string][]string{ - "a": {"b", "c"}, - "b": {"d"}, - "c": {"d"}, - "d": {}, - }, - existingAssets: []string{"b"}, - target: "a", - expectedGenerationLog: []string{"d", "c", "a"}, - }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - clearAssetBehaviors() - dir, err := ioutil.TempDir("", "TestStoreFetch") - if err != nil { - t.Fatalf("failed to create temporary directory: %v", err) - } - defer os.RemoveAll(dir) - store := &StoreImpl{ - directory: dir, - assets: map[reflect.Type]*assetState{}, - } - assets := make(map[string]Asset, len(tc.assets)) - for name := range tc.assets { - assets[name] = newTestStoreAsset(name) - } - for name, deps := range tc.assets { - dependenciesOfAsset := make([]Asset, len(deps)) - for i, d := range deps { - dependenciesOfAsset[i] = assets[d] - } - dependencies[reflect.TypeOf(assets[name])] = dependenciesOfAsset - } - for _, assetName := range tc.existingAssets { - asset := assets[assetName] - store.assets[reflect.TypeOf(asset)] = &assetState{ - asset: asset, - source: generatedSource, - } - } - err = store.Fetch(assets[tc.target]) - assert.NoError(t, err, "error fetching asset") - assert.EqualValues(t, tc.expectedGenerationLog, generationLog) - }) - } -} - -func TestStoreFetchOnDiskAssets(t *testing.T) { - cases := []struct { - name string - assets map[string][]string - onDiskAssets []string - target string - expectedGenerationLog []string - expectedDirty bool - }{ - { - name: "no on-disk assets", - assets: map[string][]string{ - "a": {"b"}, - "b": {}, - }, - onDiskAssets: nil, - target: "a", - expectedGenerationLog: []string{"b", "a"}, - expectedDirty: false, - }, - { - name: "on-disk asset does not need dependent generation", - assets: map[string][]string{ - "a": {"b"}, - "b": {}, - }, - onDiskAssets: []string{"a"}, - target: "a", - expectedGenerationLog: []string{}, - expectedDirty: false, - }, - { - name: "on-disk dependent asset causes re-generation", - assets: map[string][]string{ - "a": {"b"}, - "b": {}, - }, - onDiskAssets: []string{"b"}, - target: "a", - expectedGenerationLog: []string{"a"}, - expectedDirty: true, - }, - { - name: "on-disk dependents invalidate all its children", - assets: map[string][]string{ - "a": {"b", "c"}, - "b": {"d"}, - "c": {"d"}, - "d": {}, - }, - onDiskAssets: []string{"d"}, - target: "a", - expectedGenerationLog: []string{"b", "c", "a"}, - expectedDirty: true, - }, - { - name: "re-generate when both parents and children are on-disk", - assets: map[string][]string{ - "a": {"b"}, - "b": {}, - }, - onDiskAssets: []string{"a", "b"}, - target: "a", - expectedGenerationLog: []string{"a"}, - expectedDirty: true, - }, - } - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - clearAssetBehaviors() - store := &StoreImpl{ - assets: map[reflect.Type]*assetState{}, - } - assets := make(map[string]Asset, len(tc.assets)) - for name := range tc.assets { - assets[name] = newTestStoreAsset(name) - } - for name, deps := range tc.assets { - dependenciesOfAsset := make([]Asset, len(deps)) - for i, d := range deps { - dependenciesOfAsset[i] = assets[d] - } - dependencies[reflect.TypeOf(assets[name])] = dependenciesOfAsset - } - for _, name := range tc.onDiskAssets { - onDiskAssets[reflect.TypeOf(assets[name])] = true - } - err := store.fetch(assets[tc.target], "") - assert.NoError(t, err, "unexpected error") - assert.EqualValues(t, tc.expectedGenerationLog, generationLog) - assert.Equal(t, tc.expectedDirty, store.assets[reflect.TypeOf(assets[tc.target])].anyParentsDirty) - }) - } -} diff --git a/pkg/asset/templates/content/bootkube/03-openshift-web-console-namespace.go b/pkg/asset/templates/content/bootkube/03-openshift-web-console-namespace.go deleted file mode 100644 index 85cf2d1fcc7..00000000000 --- a/pkg/asset/templates/content/bootkube/03-openshift-web-console-namespace.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - openshiftWebConsoleNamespaceFileName = "03-openshift-web-console-namespace.yaml" -) - -var _ asset.WritableAsset = (*OpenshiftWebConsoleNamespace)(nil) - -// OpenshiftWebConsoleNamespace is the constant to represent contents of Openshift_WebConsoleNamespace.yaml file -type OpenshiftWebConsoleNamespace struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *OpenshiftWebConsoleNamespace) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *OpenshiftWebConsoleNamespace) Name() string { - return "OpenshiftWebConsoleNamespace" -} - -// Generate generates the actual files by this asset -func (t *OpenshiftWebConsoleNamespace) Generate(parents asset.Parents) error { - t.fileName = openshiftWebConsoleNamespaceFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *OpenshiftWebConsoleNamespace) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *OpenshiftWebConsoleNamespace) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, openshiftWebConsoleNamespaceFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/04-openshift-machine-config-operator.go b/pkg/asset/templates/content/bootkube/04-openshift-machine-config-operator.go deleted file mode 100644 index 3b5676ddc52..00000000000 --- a/pkg/asset/templates/content/bootkube/04-openshift-machine-config-operator.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - openshiftMachineConfigOperatorFileName = "04-openshift-machine-config-operator.yaml" -) - -var _ asset.WritableAsset = (*OpenshiftMachineConfigOperator)(nil) - -// OpenshiftMachineConfigOperator is the constant to represent contents of Openshift_MachineConfigOperator.yaml file -type OpenshiftMachineConfigOperator struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *OpenshiftMachineConfigOperator) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *OpenshiftMachineConfigOperator) Name() string { - return "OpenshiftMachineConfigOperator" -} - -// Generate generates the actual files by this asset -func (t *OpenshiftMachineConfigOperator) Generate(parents asset.Parents) error { - t.fileName = openshiftMachineConfigOperatorFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *OpenshiftMachineConfigOperator) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *OpenshiftMachineConfigOperator) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, openshiftMachineConfigOperatorFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/05-openshift-cluster-api-namespace.go b/pkg/asset/templates/content/bootkube/05-openshift-cluster-api-namespace.go deleted file mode 100644 index ff83d97094f..00000000000 --- a/pkg/asset/templates/content/bootkube/05-openshift-cluster-api-namespace.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - openshiftClusterAPINamespaceFileName = "05-openshift-cluster-api-namespace.yaml" -) - -var _ asset.WritableAsset = (*OpenshiftClusterAPINamespace)(nil) - -// OpenshiftClusterAPINamespace is the constant to represent contents of Openshift_ClusterApiNamespace.yaml file -type OpenshiftClusterAPINamespace struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *OpenshiftClusterAPINamespace) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *OpenshiftClusterAPINamespace) Name() string { - return "OpenshiftClusterAPINamespace" -} - -// Generate generates the actual files by this asset -func (t *OpenshiftClusterAPINamespace) Generate(parents asset.Parents) error { - t.fileName = openshiftClusterAPINamespaceFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *OpenshiftClusterAPINamespace) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *OpenshiftClusterAPINamespace) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, openshiftClusterAPINamespaceFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/09-openshift-service-cert-signer-namespace.go b/pkg/asset/templates/content/bootkube/09-openshift-service-cert-signer-namespace.go deleted file mode 100644 index a5d9650846b..00000000000 --- a/pkg/asset/templates/content/bootkube/09-openshift-service-cert-signer-namespace.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - openshiftServiceCertSignerNamespaceFileName = "09-openshift-service-cert-signer-namespace.yaml" -) - -var _ asset.WritableAsset = (*OpenshiftServiceCertSignerNamespace)(nil) - -// OpenshiftServiceCertSignerNamespace is the constant to represent the contents of 09-openshift-service-signer-namespace.yaml -type OpenshiftServiceCertSignerNamespace struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *OpenshiftServiceCertSignerNamespace) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *OpenshiftServiceCertSignerNamespace) Name() string { - return "OpenshiftServiceCertSignerNamespace" -} - -// Generate generates the actual files by this asset -func (t *OpenshiftServiceCertSignerNamespace) Generate(parents asset.Parents) error { - t.fileName = openshiftServiceCertSignerNamespaceFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *OpenshiftServiceCertSignerNamespace) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *OpenshiftServiceCertSignerNamespace) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, openshiftServiceCertSignerNamespaceFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/cvo-overrides.go b/pkg/asset/templates/content/bootkube/cvo-overrides.go deleted file mode 100644 index 3e29b3ae7fe..00000000000 --- a/pkg/asset/templates/content/bootkube/cvo-overrides.go +++ /dev/null @@ -1,68 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - cVOOverridesFileName = "cvo-overrides.yaml.template" -) - -var _ asset.WritableAsset = (*CVOOverrides)(nil) - -// CVOOverrides is the constant to represent contents of cvo-override.yaml.template file -// This is a gate to prevent CVO from installing these operators which is conflicting -// with already owned resources by tectonic-operators. -// This files can be dropped when the overrides list becomes empty. -type CVOOverrides struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *CVOOverrides) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *CVOOverrides) Name() string { - return "CVOOverrides" -} - -// Generate generates the actual files by this asset -func (t *CVOOverrides) Generate(parents asset.Parents) error { - t.fileName = cVOOverridesFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *CVOOverrides) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *CVOOverrides) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, cVOOverridesFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/etcd-service.go b/pkg/asset/templates/content/bootkube/etcd-service.go deleted file mode 100644 index 90fdcdae699..00000000000 --- a/pkg/asset/templates/content/bootkube/etcd-service.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - etcdServiceKubeSystemFileName = "etcd-service.yaml" -) - -var _ asset.WritableAsset = (*EtcdServiceKubeSystem)(nil) - -// EtcdServiceKubeSystem is the constant to represent contents of etcd-service.yaml file -type EtcdServiceKubeSystem struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *EtcdServiceKubeSystem) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *EtcdServiceKubeSystem) Name() string { - return "EtcdServiceKubeSystem" -} - -// Generate generates the actual files by this asset -func (t *EtcdServiceKubeSystem) Generate(parents asset.Parents) error { - t.fileName = etcdServiceKubeSystemFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *EtcdServiceKubeSystem) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *EtcdServiceKubeSystem) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, etcdServiceKubeSystemFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/host-etcd-service-endpoints.go b/pkg/asset/templates/content/bootkube/host-etcd-service-endpoints.go deleted file mode 100644 index e7d56072bf0..00000000000 --- a/pkg/asset/templates/content/bootkube/host-etcd-service-endpoints.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - hostEtcdServiceEndpointsKubeSystemFileName = "host-etcd-service-endpoints.yaml.template" -) - -var _ asset.WritableAsset = (*HostEtcdServiceEndpointsKubeSystem)(nil) - -// HostEtcdServiceEndpointsKubeSystem is the constant to represent contents of etcd-service-endpoints.yaml.template file. -type HostEtcdServiceEndpointsKubeSystem struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *HostEtcdServiceEndpointsKubeSystem) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *HostEtcdServiceEndpointsKubeSystem) Name() string { - return "HostEtcdServiceEndpointsKubeSystem" -} - -// Generate generates the actual files by this asset -func (t *HostEtcdServiceEndpointsKubeSystem) Generate(parents asset.Parents) error { - t.fileName = hostEtcdServiceEndpointsKubeSystemFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *HostEtcdServiceEndpointsKubeSystem) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *HostEtcdServiceEndpointsKubeSystem) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, hostEtcdServiceEndpointsKubeSystemFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/host-etcd-service.go b/pkg/asset/templates/content/bootkube/host-etcd-service.go deleted file mode 100644 index 95b3eac7af7..00000000000 --- a/pkg/asset/templates/content/bootkube/host-etcd-service.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - hostEtcdServiceKubeSystemFileName = "host-etcd-service.yaml" -) - -var _ asset.WritableAsset = (*HostEtcdServiceKubeSystem)(nil) - -// HostEtcdServiceKubeSystem is the constant to represent contents of etcd-service.yaml file -type HostEtcdServiceKubeSystem struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *HostEtcdServiceKubeSystem) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *HostEtcdServiceKubeSystem) Name() string { - return "HostEtcdServiceKubeSystem" -} - -// Generate generates the actual files by this asset -func (t *HostEtcdServiceKubeSystem) Generate(parents asset.Parents) error { - t.fileName = hostEtcdServiceKubeSystemFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *HostEtcdServiceKubeSystem) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *HostEtcdServiceKubeSystem) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, hostEtcdServiceKubeSystemFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/kube-cloud-config.go b/pkg/asset/templates/content/bootkube/kube-cloud-config.go deleted file mode 100644 index 1b5e649e486..00000000000 --- a/pkg/asset/templates/content/bootkube/kube-cloud-config.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - kubeCloudConfigFileName = "kube-cloud-config.yaml" -) - -var _ asset.WritableAsset = (*KubeCloudConfig)(nil) - -// KubeCloudConfig is the constant to represent contents of kube_cloudconfig.yaml file -type KubeCloudConfig struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *KubeCloudConfig) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *KubeCloudConfig) Name() string { - return "KubeCloudConfig" -} - -// Generate generates the actual files by this asset -func (t *KubeCloudConfig) Generate(parents asset.Parents) error { - t.fileName = kubeCloudConfigFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *KubeCloudConfig) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *KubeCloudConfig) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, kubeCloudConfigFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/kube-system-configmap-etcd-serving-ca.go b/pkg/asset/templates/content/bootkube/kube-system-configmap-etcd-serving-ca.go deleted file mode 100644 index 0d152ddba95..00000000000 --- a/pkg/asset/templates/content/bootkube/kube-system-configmap-etcd-serving-ca.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - kubeSystemConfigmapEtcdServingCAFileName = "kube-system-configmap-etcd-serving-ca.yaml.template" -) - -var _ asset.WritableAsset = (*KubeSystemConfigmapEtcdServingCA)(nil) - -// KubeSystemConfigmapEtcdServingCA is the constant to represent contents of kube-system-configmap-etcd-serving-ca.yaml.template file. -type KubeSystemConfigmapEtcdServingCA struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *KubeSystemConfigmapEtcdServingCA) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *KubeSystemConfigmapEtcdServingCA) Name() string { - return "KubeSystemConfigmapEtcdServingCA" -} - -// Generate generates the actual files by this asset -func (t *KubeSystemConfigmapEtcdServingCA) Generate(parents asset.Parents) error { - t.fileName = kubeSystemConfigmapEtcdServingCAFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *KubeSystemConfigmapEtcdServingCA) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *KubeSystemConfigmapEtcdServingCA) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, kubeSystemConfigmapEtcdServingCAFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/kube-system-configmap-root-ca.go b/pkg/asset/templates/content/bootkube/kube-system-configmap-root-ca.go deleted file mode 100644 index 99a5b70a751..00000000000 --- a/pkg/asset/templates/content/bootkube/kube-system-configmap-root-ca.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - kubeSystemConfigmapRootCAFileName = "kube-system-configmap-root-ca.yaml.template" -) - -var _ asset.WritableAsset = (*KubeSystemConfigmapRootCA)(nil) - -// KubeSystemConfigmapRootCA is the constant to represent contents of kube-system-configmap-root-ca.yaml.template file. -type KubeSystemConfigmapRootCA struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *KubeSystemConfigmapRootCA) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *KubeSystemConfigmapRootCA) Name() string { - return "KubeSystemConfigmapRootCA" -} - -// Generate generates the actual files by this asset -func (t *KubeSystemConfigmapRootCA) Generate(parents asset.Parents) error { - t.fileName = kubeSystemConfigmapRootCAFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *KubeSystemConfigmapRootCA) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *KubeSystemConfigmapRootCA) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, kubeSystemConfigmapRootCAFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/kube-system-secret-etcd-client.go b/pkg/asset/templates/content/bootkube/kube-system-secret-etcd-client.go deleted file mode 100644 index 3ba3af00f4e..00000000000 --- a/pkg/asset/templates/content/bootkube/kube-system-secret-etcd-client.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - kubeSystemSecretEtcdClientFileName = "kube-system-secret-etcd-client.yaml.template" -) - -var _ asset.WritableAsset = (*KubeSystemSecretEtcdClient)(nil) - -// KubeSystemSecretEtcdClient is the constant to represent contents of kube-system-secret-etcd-client.yaml.template file. -type KubeSystemSecretEtcdClient struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *KubeSystemSecretEtcdClient) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *KubeSystemSecretEtcdClient) Name() string { - return "KubeSystemSecretEtcdClient" -} - -// Generate generates the actual files by this asset -func (t *KubeSystemSecretEtcdClient) Generate(parents asset.Parents) error { - t.fileName = kubeSystemSecretEtcdClientFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *KubeSystemSecretEtcdClient) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *KubeSystemSecretEtcdClient) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, kubeSystemSecretEtcdClientFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/legacy-cvo-overrides.go b/pkg/asset/templates/content/bootkube/legacy-cvo-overrides.go deleted file mode 100644 index f5968813fdf..00000000000 --- a/pkg/asset/templates/content/bootkube/legacy-cvo-overrides.go +++ /dev/null @@ -1,68 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - legacyCVOOverridesFileName = "legacy-cvo-overrides.yaml.template" -) - -var _ asset.WritableAsset = (*LegacyCVOOverrides)(nil) - -// LegacyCVOOverrides is the constant to represent contents of legacy-cvo-override.yaml.template file -// This is a gate to prevent CVO from installing these operators which is conflicting -// with already owned resources by tectonic-operators. -// This files can be dropped when the overrides list becomes empty. -type LegacyCVOOverrides struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *LegacyCVOOverrides) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *LegacyCVOOverrides) Name() string { - return "LegacyCVOOverrides" -} - -// Generate generates the actual files by this asset -func (t *LegacyCVOOverrides) Generate(parents asset.Parents) error { - t.fileName = legacyCVOOverridesFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *LegacyCVOOverrides) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *LegacyCVOOverrides) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, legacyCVOOverridesFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/machine-config-server-tls-secret.go b/pkg/asset/templates/content/bootkube/machine-config-server-tls-secret.go deleted file mode 100644 index f5403bbdd6e..00000000000 --- a/pkg/asset/templates/content/bootkube/machine-config-server-tls-secret.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - machineConfigServerTLSSecretFileName = "machine-config-server-tls-secret.yaml.template" -) - -var _ asset.WritableAsset = (*MachineConfigServerTLSSecret)(nil) - -// MachineConfigServerTLSSecret is the constant to represent contents of machine_configservertlssecret.yaml.template file -type MachineConfigServerTLSSecret struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *MachineConfigServerTLSSecret) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *MachineConfigServerTLSSecret) Name() string { - return "MachineConfigServerTLSSecret" -} - -// Generate generates the actual files by this asset -func (t *MachineConfigServerTLSSecret) Generate(parents asset.Parents) error { - t.fileName = machineConfigServerTLSSecretFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *MachineConfigServerTLSSecret) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *MachineConfigServerTLSSecret) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, machineConfigServerTLSSecretFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/openshift-service-cert-signer-ca-secret.go b/pkg/asset/templates/content/bootkube/openshift-service-cert-signer-ca-secret.go deleted file mode 100644 index ac84b473ddf..00000000000 --- a/pkg/asset/templates/content/bootkube/openshift-service-cert-signer-ca-secret.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - openshiftServiceCertSignerSecretFileName = "openshift-service-cert-signer-ca-secret.yaml.template" -) - -var _ asset.WritableAsset = (*OpenshiftServiceCertSignerSecret)(nil) - -// OpenshiftServiceCertSignerSecret is the constant to represent the contents of openshift-service-signer-secret.yaml.template -type OpenshiftServiceCertSignerSecret struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *OpenshiftServiceCertSignerSecret) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *OpenshiftServiceCertSignerSecret) Name() string { - return "OpenshiftServiceCertSignerSecret" -} - -// Generate generates the actual files by this asset -func (t *OpenshiftServiceCertSignerSecret) Generate(parents asset.Parents) error { - t.fileName = openshiftServiceCertSignerSecretFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *OpenshiftServiceCertSignerSecret) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *OpenshiftServiceCertSignerSecret) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, openshiftServiceCertSignerSecretFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/bootkube/pull.go b/pkg/asset/templates/content/bootkube/pull.go deleted file mode 100644 index 02c5701bc2c..00000000000 --- a/pkg/asset/templates/content/bootkube/pull.go +++ /dev/null @@ -1,65 +0,0 @@ -package bootkube - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - pullFileName = "pull.yaml.template" -) - -var _ asset.WritableAsset = (*Pull)(nil) - -// Pull is the constant to represent contents of pull.yaml.template file -type Pull struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *Pull) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *Pull) Name() string { - return "Pull" -} - -// Generate generates the actual files by this asset -func (t *Pull) Generate(parents asset.Parents) error { - t.fileName = pullFileName - data, err := content.GetBootkubeTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *Pull) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *Pull) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, pullFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/helper.go b/pkg/asset/templates/content/helper.go deleted file mode 100644 index 27dd210ae55..00000000000 --- a/pkg/asset/templates/content/helper.go +++ /dev/null @@ -1,36 +0,0 @@ -package content - -import ( - "io/ioutil" - "path" - - "github.com/openshift/installer/data" -) - -const ( - // TemplateDir is the target directory for all template assets' files - TemplateDir = "templates" - bootkubeDataDir = "manifests/bootkube/" - tectonicDataDir = "manifests/tectonic/" -) - -// GetBootkubeTemplate returns the contents of the file in bootkube data dir -func GetBootkubeTemplate(uri string) ([]byte, error) { - return getFileContents(path.Join(bootkubeDataDir, uri)) -} - -// GetTectonicTemplate returns the contents of the file in tectonic data dir -func GetTectonicTemplate(uri string) ([]byte, error) { - return getFileContents(path.Join(tectonicDataDir, uri)) -} - -// getFileContents the content of the given URI, assuming that it's a file -func getFileContents(uri string) ([]byte, error) { - file, err := data.Assets.Open(uri) - if err != nil { - return []byte{}, err - } - defer file.Close() - - return ioutil.ReadAll(file) -} diff --git a/pkg/asset/templates/content/tectonic/binding-discovery.go b/pkg/asset/templates/content/tectonic/binding-discovery.go deleted file mode 100644 index 7643525f1ec..00000000000 --- a/pkg/asset/templates/content/tectonic/binding-discovery.go +++ /dev/null @@ -1,65 +0,0 @@ -package tectonic - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - bindingDiscoveryFileName = "binding-discovery.yaml" -) - -var _ asset.WritableAsset = (*BindingDiscovery)(nil) - -// BindingDiscovery is the variable/constant representing the contents of the respective file -type BindingDiscovery struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *BindingDiscovery) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *BindingDiscovery) Name() string { - return "BindingDiscovery" -} - -// Generate generates the actual files by this asset -func (t *BindingDiscovery) Generate(parents asset.Parents) error { - t.fileName = bindingDiscoveryFileName - data, err := content.GetTectonicTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *BindingDiscovery) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *BindingDiscovery) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, bindingDiscoveryFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/tectonic/cloud-creds-secret.go b/pkg/asset/templates/content/tectonic/cloud-creds-secret.go deleted file mode 100644 index cb0149228a1..00000000000 --- a/pkg/asset/templates/content/tectonic/cloud-creds-secret.go +++ /dev/null @@ -1,65 +0,0 @@ -package tectonic - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - cloudCredsSecretFileName = "cloud-creds-secret.yaml.template" -) - -var _ asset.WritableAsset = (*CloudCredsSecret)(nil) - -// CloudCredsSecret is the constant to represent contents of corresponding yaml file -type CloudCredsSecret struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *CloudCredsSecret) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *CloudCredsSecret) Name() string { - return "CloudCredsSecret" -} - -// Generate generates the actual files by this asset -func (t *CloudCredsSecret) Generate(parents asset.Parents) error { - t.fileName = cloudCredsSecretFileName - data, err := content.GetTectonicTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *CloudCredsSecret) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *CloudCredsSecret) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, cloudCredsSecretFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/content/tectonic/role-cloud-creds-secret-reader.go b/pkg/asset/templates/content/tectonic/role-cloud-creds-secret-reader.go deleted file mode 100644 index e3ccbdcd159..00000000000 --- a/pkg/asset/templates/content/tectonic/role-cloud-creds-secret-reader.go +++ /dev/null @@ -1,65 +0,0 @@ -package tectonic - -import ( - "os" - "path/filepath" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content" -) - -const ( - roleCloudCredsSecretReaderFileName = "role-cloud-creds-secret-reader.yaml.template" -) - -var _ asset.WritableAsset = (*RoleCloudCredsSecretReader)(nil) - -// RoleCloudCredsSecretReader is the variable to represent contents of corresponding file -type RoleCloudCredsSecretReader struct { - fileName string - FileList []*asset.File -} - -// Dependencies returns all of the dependencies directly needed by the asset -func (t *RoleCloudCredsSecretReader) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Name returns the human-friendly name of the asset. -func (t *RoleCloudCredsSecretReader) Name() string { - return "RoleCloudCredsSecretReader" -} - -// Generate generates the actual files by this asset -func (t *RoleCloudCredsSecretReader) Generate(parents asset.Parents) error { - t.fileName = roleCloudCredsSecretReaderFileName - data, err := content.GetTectonicTemplate(t.fileName) - if err != nil { - return err - } - t.FileList = []*asset.File{ - { - Filename: filepath.Join(content.TemplateDir, t.fileName), - Data: []byte(data), - }, - } - return nil -} - -// Files returns the files generated by the asset. -func (t *RoleCloudCredsSecretReader) Files() []*asset.File { - return t.FileList -} - -// Load returns the asset from disk. -func (t *RoleCloudCredsSecretReader) Load(f asset.FileFetcher) (bool, error) { - file, err := f.FetchByName(filepath.Join(content.TemplateDir, roleCloudCredsSecretReaderFileName)) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - t.FileList = []*asset.File{file} - return true, nil -} diff --git a/pkg/asset/templates/templates.go b/pkg/asset/templates/templates.go deleted file mode 100644 index 726ef245614..00000000000 --- a/pkg/asset/templates/templates.go +++ /dev/null @@ -1,125 +0,0 @@ -// Package templates deals with creating template assets that will be used by other assets -package templates - -import ( - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/templates/content/bootkube" - "github.com/openshift/installer/pkg/asset/templates/content/tectonic" -) - -var _ asset.WritableAsset = (*Templates)(nil) - -// Templates generates the dependent unrendered template files -type Templates struct { - FileList []*asset.File -} - -// Name returns a human friendly name for the templates asset -func (m *Templates) Name() string { - return "Common Templates" -} - -// Dependencies returns all of the dependencies directly needed by a -// Templates asset. -func (m *Templates) Dependencies() []asset.Asset { - return []asset.Asset{ - &bootkube.KubeCloudConfig{}, - &bootkube.MachineConfigServerTLSSecret{}, - &bootkube.OpenshiftServiceCertSignerSecret{}, - &bootkube.Pull{}, - &bootkube.CVOOverrides{}, - &bootkube.LegacyCVOOverrides{}, - &bootkube.HostEtcdServiceEndpointsKubeSystem{}, - &bootkube.KubeSystemConfigmapEtcdServingCA{}, - &bootkube.KubeSystemConfigmapRootCA{}, - &bootkube.KubeSystemSecretEtcdClient{}, - &bootkube.OpenshiftWebConsoleNamespace{}, - &bootkube.OpenshiftMachineConfigOperator{}, - &bootkube.OpenshiftClusterAPINamespace{}, - &bootkube.OpenshiftServiceCertSignerNamespace{}, - &bootkube.EtcdServiceKubeSystem{}, - &bootkube.HostEtcdServiceKubeSystem{}, - &tectonic.BindingDiscovery{}, - &tectonic.CloudCredsSecret{}, - &tectonic.RoleCloudCredsSecretReader{}, - } -} - -// Generate generates the respective operator config.yml files -func (m *Templates) Generate(dependencies asset.Parents) error { - kubeCloudConfig := &bootkube.KubeCloudConfig{} - machineConfigServerTLSSecret := &bootkube.MachineConfigServerTLSSecret{} - openshiftServiceCertSignerSecret := &bootkube.OpenshiftServiceCertSignerSecret{} - pull := &bootkube.Pull{} - cVOOverrides := &bootkube.CVOOverrides{} - legacyCVOOverrides := &bootkube.LegacyCVOOverrides{} - hostEtcdServiceEndpointsKubeSystem := &bootkube.HostEtcdServiceEndpointsKubeSystem{} - kubeSystemConfigmapEtcdServingCA := &bootkube.KubeSystemConfigmapEtcdServingCA{} - kubeSystemConfigmapRootCA := &bootkube.KubeSystemConfigmapRootCA{} - kubeSystemSecretEtcdClient := &bootkube.KubeSystemSecretEtcdClient{} - openshiftWebConsoleNamespace := &bootkube.OpenshiftWebConsoleNamespace{} - openshiftMachineConfigOperator := &bootkube.OpenshiftMachineConfigOperator{} - openshiftClusterAPINamespace := &bootkube.OpenshiftClusterAPINamespace{} - openshiftServiceCertSignerNamespace := &bootkube.OpenshiftServiceCertSignerNamespace{} - etcdServiceKubeSystem := &bootkube.EtcdServiceKubeSystem{} - hostEtcdServiceKubeSystem := &bootkube.HostEtcdServiceKubeSystem{} - - bindingDiscovery := &tectonic.BindingDiscovery{} - cloudCredsSecret := &tectonic.CloudCredsSecret{} - roleCloudCredsSecretReader := &tectonic.RoleCloudCredsSecretReader{} - - dependencies.Get( - kubeCloudConfig, - machineConfigServerTLSSecret, - openshiftServiceCertSignerSecret, - pull, - cVOOverrides, - legacyCVOOverrides, - hostEtcdServiceEndpointsKubeSystem, - kubeSystemConfigmapEtcdServingCA, - kubeSystemConfigmapRootCA, - kubeSystemSecretEtcdClient, - openshiftWebConsoleNamespace, - openshiftMachineConfigOperator, - openshiftClusterAPINamespace, - openshiftServiceCertSignerNamespace, - etcdServiceKubeSystem, - hostEtcdServiceKubeSystem, - bindingDiscovery, - cloudCredsSecret, - roleCloudCredsSecretReader) - - m.FileList = []*asset.File{} - m.FileList = append(m.FileList, kubeCloudConfig.Files()...) - m.FileList = append(m.FileList, machineConfigServerTLSSecret.Files()...) - m.FileList = append(m.FileList, openshiftServiceCertSignerSecret.Files()...) - m.FileList = append(m.FileList, pull.Files()...) - m.FileList = append(m.FileList, cVOOverrides.Files()...) - m.FileList = append(m.FileList, legacyCVOOverrides.Files()...) - m.FileList = append(m.FileList, hostEtcdServiceEndpointsKubeSystem.Files()...) - m.FileList = append(m.FileList, kubeSystemConfigmapEtcdServingCA.Files()...) - m.FileList = append(m.FileList, kubeSystemConfigmapRootCA.Files()...) - m.FileList = append(m.FileList, kubeSystemSecretEtcdClient.Files()...) - m.FileList = append(m.FileList, openshiftWebConsoleNamespace.Files()...) - m.FileList = append(m.FileList, openshiftMachineConfigOperator.Files()...) - m.FileList = append(m.FileList, openshiftClusterAPINamespace.Files()...) - m.FileList = append(m.FileList, openshiftServiceCertSignerNamespace.Files()...) - m.FileList = append(m.FileList, etcdServiceKubeSystem.Files()...) - m.FileList = append(m.FileList, hostEtcdServiceKubeSystem.Files()...) - - m.FileList = append(m.FileList, bindingDiscovery.Files()...) - m.FileList = append(m.FileList, cloudCredsSecret.Files()...) - m.FileList = append(m.FileList, roleCloudCredsSecretReader.Files()...) - - return nil -} - -// Files returns the files generated by the asset. -func (m *Templates) Files() []*asset.File { - return m.FileList -} - -// Load returns the manifests asset from disk. -func (m *Templates) Load(f asset.FileFetcher) (bool, error) { - return false, nil -} diff --git a/pkg/asset/tls/admincertkey.go b/pkg/asset/tls/admincertkey.go deleted file mode 100644 index 5866a8f19d1..00000000000 --- a/pkg/asset/tls/admincertkey.go +++ /dev/null @@ -1,44 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - - "github.com/openshift/installer/pkg/asset" -) - -//AdminCertKey is the asset that generates the admin key/cert pair. -type AdminCertKey struct { - CertKey -} - -var _ asset.WritableAsset = (*AdminCertKey)(nil) - -// Dependencies returns the dependency of the the cert/key pair, which includes -// the parent CA, and install config if it depends on the install config for -// DNS names, etc. -func (a *AdminCertKey) Dependencies() []asset.Asset { - return []asset.Asset{ - &KubeCA{}, - } -} - -// Generate generates the cert/key pair based on its dependencies. -func (a *AdminCertKey) Generate(dependencies asset.Parents) error { - kubeCA := &KubeCA{} - dependencies.Get(kubeCA) - - cfg := &CertCfg{ - Subject: pkix.Name{CommonName: "system:admin", Organization: []string{"system:masters"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - Validity: ValidityTenYears, - } - - return a.CertKey.Generate(cfg, kubeCA, "admin", DoNotAppendParent) -} - -// Name returns the human-friendly name of the asset. -func (a *AdminCertKey) Name() string { - return "Certificate (system:admin)" -} diff --git a/pkg/asset/tls/aggregatorca.go b/pkg/asset/tls/aggregatorca.go deleted file mode 100644 index 632c4b159c5..00000000000 --- a/pkg/asset/tls/aggregatorca.go +++ /dev/null @@ -1,44 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - - "github.com/openshift/installer/pkg/asset" -) - -// AggregatorCA is the asset that generates the aggregator-ca key/cert pair. -type AggregatorCA struct { - CertKey -} - -var _ asset.Asset = (*AggregatorCA)(nil) - -// Dependencies returns the dependency of the the cert/key pair, which includes -// the parent CA, and install config if it depends on the install config for -// DNS names, etc. -func (a *AggregatorCA) Dependencies() []asset.Asset { - return []asset.Asset{ - &RootCA{}, - } -} - -// Generate generates the cert/key pair based on its dependencies. -func (a *AggregatorCA) Generate(dependencies asset.Parents) error { - rootCA := &RootCA{} - dependencies.Get(rootCA) - - cfg := &CertCfg{ - Subject: pkix.Name{CommonName: "aggregator", OrganizationalUnit: []string{"bootkube"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: ValidityTenYears, - IsCA: true, - } - - return a.CertKey.Generate(cfg, rootCA, "aggregator-ca", DoNotAppendParent) -} - -// Name returns the human-friendly name of the asset. -func (a *AggregatorCA) Name() string { - return "Certificate (aggregator)" -} diff --git a/pkg/asset/tls/apiservercertkey.go b/pkg/asset/tls/apiservercertkey.go deleted file mode 100644 index 7a7e1f322ba..00000000000 --- a/pkg/asset/tls/apiservercertkey.go +++ /dev/null @@ -1,63 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - "net" - - "github.com/pkg/errors" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" -) - -// APIServerCertKey is the asset that generates the API server key/cert pair. -type APIServerCertKey struct { - CertKey -} - -var _ asset.Asset = (*APIServerCertKey)(nil) - -// Dependencies returns the dependency of the the cert/key pair, which includes -// the parent CA, and install config if it depends on the install config for -// DNS names, etc. -func (a *APIServerCertKey) Dependencies() []asset.Asset { - return []asset.Asset{ - &KubeCA{}, - &installconfig.InstallConfig{}, - } -} - -// Generate generates the cert/key pair based on its dependencies. -func (a *APIServerCertKey) Generate(dependencies asset.Parents) error { - kubeCA := &KubeCA{} - installConfig := &installconfig.InstallConfig{} - dependencies.Get(kubeCA, installConfig) - - apiServerAddress, err := cidrhost(installConfig.Config.Networking.ServiceCIDR.IPNet, 1) - if err != nil { - return errors.Wrap(err, "failed to get API Server address from InstallConfig") - } - - cfg := &CertCfg{ - Subject: pkix.Name{CommonName: "system:kube-apiserver", Organization: []string{"kube-master"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - Validity: ValidityTenYears, - DNSNames: []string{ - apiAddress(installConfig.Config), - "kubernetes", "kubernetes.default", - "kubernetes.default.svc", - "kubernetes.default.svc.cluster.local", - "localhost", - }, - IPAddresses: []net.IP{net.ParseIP(apiServerAddress), net.ParseIP("127.0.0.1")}, - } - - return a.CertKey.Generate(cfg, kubeCA, "apiserver", AppendParent) -} - -// Name returns the human-friendly name of the asset. -func (a *APIServerCertKey) Name() string { - return "Certificate (kube-apiaserver)" -} diff --git a/pkg/asset/tls/apiserverproxycertkey.go b/pkg/asset/tls/apiserverproxycertkey.go deleted file mode 100644 index 76f6fe5dc44..00000000000 --- a/pkg/asset/tls/apiserverproxycertkey.go +++ /dev/null @@ -1,44 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - - "github.com/openshift/installer/pkg/asset" -) - -// APIServerProxyCertKey is the asset that generates the API server proxy key/cert pair. -type APIServerProxyCertKey struct { - CertKey -} - -var _ asset.Asset = (*APIServerProxyCertKey)(nil) - -// Dependencies returns the dependency of the the cert/key pair, which includes -// the parent CA, and install config if it depends on the install config for -// DNS names, etc. -func (a *APIServerProxyCertKey) Dependencies() []asset.Asset { - return []asset.Asset{ - &AggregatorCA{}, - } -} - -// Generate generates the cert/key pair based on its dependencies. -func (a *APIServerProxyCertKey) Generate(dependencies asset.Parents) error { - aggregatorCA := &AggregatorCA{} - dependencies.Get(aggregatorCA) - - cfg := &CertCfg{ - Subject: pkix.Name{CommonName: "system:kube-apiserver-proxy", Organization: []string{"kube-master"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - Validity: ValidityTenYears, - } - - return a.CertKey.Generate(cfg, aggregatorCA, "apiserver-proxy", DoNotAppendParent) -} - -// Name returns the human-friendly name of the asset. -func (a *APIServerProxyCertKey) Name() string { - return "Certificate (system:kube-apiserver-proxy)" -} diff --git a/pkg/asset/tls/certkey.go b/pkg/asset/tls/certkey.go deleted file mode 100644 index dbb0b921261..00000000000 --- a/pkg/asset/tls/certkey.go +++ /dev/null @@ -1,109 +0,0 @@ -package tls - -import ( - "bytes" - "crypto/rsa" - "crypto/x509" - - "github.com/pkg/errors" - - "github.com/openshift/installer/pkg/asset" -) - -// AppendParentChoice dictates whether the parent's cert is to be added to the -// cert. -type AppendParentChoice bool - -const ( - // AppendParent indicates that the parent's cert should be added. - AppendParent AppendParentChoice = true - // DoNotAppendParent indicates that the parent's cert should not be added. - DoNotAppendParent AppendParentChoice = false -) - -// CertKeyInterface contains a private key and the associated cert. -type CertKeyInterface interface { - // Cert returns the certificate. - Cert() []byte - // Key returns the private key. - Key() []byte -} - -// CertKey contains the private key and the cert that's -// signed by the parent CA. -type CertKey struct { - CertRaw []byte - KeyRaw []byte - FileList []*asset.File -} - -// Cert returns the certificate. -func (c *CertKey) Cert() []byte { - return c.CertRaw -} - -// Key returns the private key. -func (c *CertKey) Key() []byte { - return c.KeyRaw -} - -// Generate generates a cert/key pair signed by the specified parent CA. -func (c *CertKey) Generate( - cfg *CertCfg, - parentCA CertKeyInterface, - filenameBase string, - appendParent AppendParentChoice, -) error { - var key *rsa.PrivateKey - var crt *x509.Certificate - var err error - - caKey, err := PemToPrivateKey(parentCA.Key()) - if err != nil { - return errors.Wrap(err, "failed to parse rsa private key") - } - - caCert, err := PemToCertificate(parentCA.Cert()) - if err != nil { - return errors.Wrap(err, "failed to parse x509 certificate") - } - - key, crt, err = GenerateCert(caKey, caCert, cfg) - if err != nil { - return errors.Wrap(err, "failed to generate cert/key pair") - } - - c.KeyRaw = PrivateKeyToPem(key) - c.CertRaw = CertToPem(crt) - - if appendParent { - c.CertRaw = bytes.Join([][]byte{c.CertRaw, CertToPem(caCert)}, []byte("\n")) - } - - c.generateFiles(filenameBase) - - return nil -} - -// Files returns the files generated by the asset. -func (c *CertKey) Files() []*asset.File { - return c.FileList -} - -func (c *CertKey) generateFiles(filenameBase string) { - c.FileList = []*asset.File{ - { - Filename: assetFilePath(filenameBase + ".key"), - Data: c.KeyRaw, - }, - { - Filename: assetFilePath(filenameBase + ".crt"), - Data: c.CertRaw, - }, - } -} - -// Load is a no-op because TLS assets are not written to disk. -func (c *CertKey) Load(asset.FileFetcher) (bool, error) { - return false, nil -} diff --git a/pkg/asset/tls/certkey_test.go b/pkg/asset/tls/certkey_test.go deleted file mode 100644 index ecfb4167dd1..00000000000 --- a/pkg/asset/tls/certkey_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - "net" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestCertKeyGenerate(t *testing.T) { - tests := []struct { - name string - certCfg *CertCfg - filenameBase string - certFileName string - appendParent AppendParentChoice - errString string - }{ - { - name: "simple ca", - certCfg: &CertCfg{ - Subject: pkix.Name{CommonName: "test0-ca", OrganizationalUnit: []string{"openshift"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: ValidityTenYears, - }, - filenameBase: "test0-ca", - appendParent: DoNotAppendParent, - }, - { - name: "more complicated ca", - certCfg: &CertCfg{ - Subject: pkix.Name{CommonName: "test1-ca", OrganizationalUnit: []string{"openshift"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: ValidityTenYears, - DNSNames: []string{"test.openshift.io"}, - IPAddresses: []net.IP{net.ParseIP("10.0.0.1")}, - }, - filenameBase: "test1-ca", - appendParent: AppendParent, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - rootCA := &RootCA{} - err := rootCA.Generate(nil) - assert.NoError(t, err, "failed to generate root CA") - - certKey := &CertKey{} - err = certKey.Generate(tt.certCfg, rootCA, tt.filenameBase, tt.appendParent) - if err != nil { - assert.EqualErrorf(t, err, tt.errString, tt.name) - return - } else if tt.errString != "" { - t.Errorf("expect error %v, saw nil", err) - } - - actualFiles := certKey.Files() - - assert.Equal(t, 2, len(actualFiles), "unexpected number of files") - assert.Equal(t, assetFilePath(tt.filenameBase+".key"), actualFiles[0].Filename, "unexpected key file name") - assert.Equal(t, assetFilePath(tt.filenameBase+".crt"), actualFiles[1].Filename, "unexpected cert file name") - - assert.Equal(t, certKey.Key(), actualFiles[0].Data, "key file data does not match key") - assert.Equal(t, certKey.Cert(), actualFiles[1].Data, "cert file does not match cert") - - // Briefly check the certs. - certPool := x509.NewCertPool() - if !certPool.AppendCertsFromPEM(certKey.Cert()) { - t.Error("failed to append certs from PEM") - } - - opts := x509.VerifyOptions{ - Roots: certPool, - DNSName: tt.certCfg.Subject.CommonName, - } - if tt.certCfg.DNSNames != nil { - opts.DNSName = "test.openshift.io" - } - - cert, err := PemToCertificate(certKey.Cert()) - assert.NoError(t, err, tt.name) - - _, err = cert.Verify(opts) - assert.NoError(t, err, tt.name) - }) - } -} diff --git a/pkg/asset/tls/doc.go b/pkg/asset/tls/doc.go deleted file mode 100644 index f5cced4b334..00000000000 --- a/pkg/asset/tls/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package tls defines and generates the tls assets based on its dependencies. -package tls diff --git a/pkg/asset/tls/etcdca.go b/pkg/asset/tls/etcdca.go deleted file mode 100644 index ca1f58884e6..00000000000 --- a/pkg/asset/tls/etcdca.go +++ /dev/null @@ -1,44 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - - "github.com/openshift/installer/pkg/asset" -) - -// EtcdCA is the asset that generates the etcd-ca key/cert pair. -type EtcdCA struct { - CertKey -} - -var _ asset.Asset = (*EtcdCA)(nil) - -// Dependencies returns the dependency of the the cert/key pair, which includes -// the parent CA, and install config if it depends on the install config for -// DNS names, etc. -func (a *EtcdCA) Dependencies() []asset.Asset { - return []asset.Asset{ - &RootCA{}, - } -} - -// Generate generates the cert/key pair based on its dependencies. -func (a *EtcdCA) Generate(dependencies asset.Parents) error { - rootCA := &RootCA{} - dependencies.Get(rootCA) - - cfg := &CertCfg{ - Subject: pkix.Name{CommonName: "etcd", OrganizationalUnit: []string{"etcd"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: ValidityTenYears, - IsCA: true, - } - - return a.CertKey.Generate(cfg, rootCA, "etcd-client-ca", DoNotAppendParent) -} - -// Name returns the human-friendly name of the asset. -func (a *EtcdCA) Name() string { - return "Certificate (etcd)" -} diff --git a/pkg/asset/tls/etcdclientcertkey.go b/pkg/asset/tls/etcdclientcertkey.go deleted file mode 100644 index d9c90258d17..00000000000 --- a/pkg/asset/tls/etcdclientcertkey.go +++ /dev/null @@ -1,44 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - - "github.com/openshift/installer/pkg/asset" -) - -// EtcdClientCertKey is the asset that generates the etcd client key/cert pair. -type EtcdClientCertKey struct { - CertKey -} - -var _ asset.Asset = (*EtcdClientCertKey)(nil) - -// Dependencies returns the dependency of the the cert/key pair, which includes -// the parent CA, and install config if it depends on the install config for -// DNS names, etc. -func (a *EtcdClientCertKey) Dependencies() []asset.Asset { - return []asset.Asset{ - &EtcdCA{}, - } -} - -// Generate generates the cert/key pair based on its dependencies. -func (a *EtcdClientCertKey) Generate(dependencies asset.Parents) error { - etcdCA := &EtcdCA{} - dependencies.Get(etcdCA) - - cfg := &CertCfg{ - Subject: pkix.Name{CommonName: "etcd", OrganizationalUnit: []string{"etcd"}}, - KeyUsages: x509.KeyUsageKeyEncipherment, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - Validity: ValidityTenYears, - } - - return a.CertKey.Generate(cfg, etcdCA, "etcd-client", DoNotAppendParent) -} - -// Name returns the human-friendly name of the asset. -func (a *EtcdClientCertKey) Name() string { - return "Certificate (etcd)" -} diff --git a/pkg/asset/tls/helper.go b/pkg/asset/tls/helper.go deleted file mode 100644 index de02b26029c..00000000000 --- a/pkg/asset/tls/helper.go +++ /dev/null @@ -1,32 +0,0 @@ -package tls - -import ( - "fmt" - "net" - "path/filepath" - - "github.com/apparentlymart/go-cidr/cidr" - - "github.com/openshift/installer/pkg/types" -) - -const ( - tlsDir = "tls" -) - -func assetFilePath(filename string) string { - return filepath.Join(tlsDir, filename) -} - -func apiAddress(cfg *types.InstallConfig) string { - return fmt.Sprintf("%s-api.%s", cfg.ObjectMeta.Name, cfg.BaseDomain) -} - -func cidrhost(network net.IPNet, hostNum int) (string, error) { - ip, err := cidr.Host(&network, hostNum) - if err != nil { - return "", err - } - - return ip.String(), nil -} diff --git a/pkg/asset/tls/ingresscertkey.go b/pkg/asset/tls/ingresscertkey.go deleted file mode 100644 index 69141ae2ad0..00000000000 --- a/pkg/asset/tls/ingresscertkey.go +++ /dev/null @@ -1,54 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - "fmt" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" -) - -// IngressCertKey is the asset that generates the ingress key/cert pair. -type IngressCertKey struct { - CertKey -} - -var _ asset.Asset = (*IngressCertKey)(nil) - -// Dependencies returns the dependency of the the cert/key pair, which includes -// the parent CA, and install config if it depends on the install config for -// DNS names, etc. -func (a *IngressCertKey) Dependencies() []asset.Asset { - return []asset.Asset{ - &KubeCA{}, - &installconfig.InstallConfig{}, - } -} - -// Generate generates the cert/key pair based on its dependencies. -func (a *IngressCertKey) Generate(dependencies asset.Parents) error { - kubeCA := &KubeCA{} - installConfig := &installconfig.InstallConfig{} - dependencies.Get(kubeCA, installConfig) - - baseAddress := fmt.Sprintf("%s.%s", installConfig.Config.ObjectMeta.Name, installConfig.Config.BaseDomain) - - cfg := &CertCfg{ - Subject: pkix.Name{CommonName: baseAddress, Organization: []string{"ingress"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, - Validity: ValidityTenYears, - DNSNames: []string{ - baseAddress, - fmt.Sprintf("*.%s", baseAddress), - }, - } - - return a.CertKey.Generate(cfg, kubeCA, "ingress", AppendParent) -} - -// Name returns the human-friendly name of the asset. -func (a *IngressCertKey) Name() string { - return "Certificate (ingress)" -} diff --git a/pkg/asset/tls/keypair.go b/pkg/asset/tls/keypair.go deleted file mode 100644 index f2efb5d9fca..00000000000 --- a/pkg/asset/tls/keypair.go +++ /dev/null @@ -1,65 +0,0 @@ -package tls - -import ( - "github.com/openshift/installer/pkg/asset" - "github.com/pkg/errors" -) - -// KeyPairInterface contains a private key and a public key. -type KeyPairInterface interface { - // Private returns the private key. - Private() []byte - // Public returns the public key. - Public() []byte -} - -// KeyPair contains a private key and a public key. -type KeyPair struct { - Pvt []byte - Pub []byte - FileList []*asset.File -} - -// Generate generates the rsa private / public key pair. -func (k *KeyPair) Generate(filenameBase string) error { - key, err := PrivateKey() - if err != nil { - return errors.Wrap(err, "failed to generate private key") - } - - pubkeyData, err := PublicKeyToPem(&key.PublicKey) - if err != nil { - return errors.Wrap(err, "failed to get public key data from private key") - } - - k.Pvt = PrivateKeyToPem(key) - k.Pub = pubkeyData - - k.FileList = []*asset.File{ - { - Filename: assetFilePath(filenameBase + ".key"), - Data: k.Pvt, - }, - { - Filename: assetFilePath(filenameBase + ".pub"), - Data: k.Pub, - }, - } - - return nil -} - -// Public returns the public key. -func (k *KeyPair) Public() []byte { - return k.Pub -} - -// Private returns the private key. -func (k *KeyPair) Private() []byte { - return k.Pvt -} - -// Files returns the files generated by the asset. -func (k *KeyPair) Files() []*asset.File { - return k.FileList -} diff --git a/pkg/asset/tls/kubeca.go b/pkg/asset/tls/kubeca.go deleted file mode 100644 index d4818803174..00000000000 --- a/pkg/asset/tls/kubeca.go +++ /dev/null @@ -1,44 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - - "github.com/openshift/installer/pkg/asset" -) - -// KubeCA is the asset that generates the kube-ca key/cert pair. -type KubeCA struct { - CertKey -} - -var _ asset.Asset = (*KubeCA)(nil) - -// Dependencies returns the dependency of the the cert/key pair, which includes -// the parent CA, and install config if it depends on the install config for -// DNS names, etc. -func (a *KubeCA) Dependencies() []asset.Asset { - return []asset.Asset{ - &RootCA{}, - } -} - -// Generate generates the cert/key pair based on its dependencies. -func (a *KubeCA) Generate(dependencies asset.Parents) error { - rootCA := &RootCA{} - dependencies.Get(rootCA) - - cfg := &CertCfg{ - Subject: pkix.Name{CommonName: "kube-ca", OrganizationalUnit: []string{"bootkube"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: ValidityTenYears, - IsCA: true, - } - - return a.CertKey.Generate(cfg, rootCA, "kube-ca", DoNotAppendParent) -} - -// Name returns the human-friendly name of the asset. -func (a *KubeCA) Name() string { - return "Certificate (kube-ca)" -} diff --git a/pkg/asset/tls/kubeletcertkey.go b/pkg/asset/tls/kubeletcertkey.go deleted file mode 100644 index b8632e2025e..00000000000 --- a/pkg/asset/tls/kubeletcertkey.go +++ /dev/null @@ -1,46 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - - "github.com/openshift/installer/pkg/asset" -) - -// KubeletCertKey is the asset that generates the kubelet key/cert pair. -type KubeletCertKey struct { - CertKey -} - -var _ asset.Asset = (*KubeletCertKey)(nil) - -// Dependencies returns the dependency of the the cert/key pair, which includes -// the parent CA, and install config if it depends on the install config for -// DNS names, etc. -func (a *KubeletCertKey) Dependencies() []asset.Asset { - return []asset.Asset{ - &KubeCA{}, - } -} - -// Generate generates the cert/key pair based on its dependencies. -func (a *KubeletCertKey) Generate(dependencies asset.Parents) error { - kubeCA := &KubeCA{} - dependencies.Get(kubeCA) - - cfg := &CertCfg{ - // system:masters is a hack to get the kubelet up without kube-core - // TODO(node): make kubelet bootstrapping secure with minimal permissions eventually switching to system:node:* CommonName - Subject: pkix.Name{CommonName: "system:serviceaccount:kube-system:default", Organization: []string{"system:serviceaccounts:kube-system", "system:masters"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, - Validity: ValidityThirtyMinutes, - } - - return a.CertKey.Generate(cfg, kubeCA, "kubelet", DoNotAppendParent) -} - -// Name returns the human-friendly name of the asset. -func (a *KubeletCertKey) Name() string { - return "Certificate (system:serviceaccount:kube-system:default)" -} diff --git a/pkg/asset/tls/mcscertkey.go b/pkg/asset/tls/mcscertkey.go deleted file mode 100644 index 9463d6c2673..00000000000 --- a/pkg/asset/tls/mcscertkey.go +++ /dev/null @@ -1,49 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/asset/installconfig" -) - -// MCSCertKey is the asset that generates the MCS key/cert pair. -type MCSCertKey struct { - CertKey -} - -var _ asset.Asset = (*MCSCertKey)(nil) - -// Dependencies returns the dependency of the the cert/key pair, which includes -// the parent CA, and install config if it depends on the install config for -// DNS names, etc. -func (a *MCSCertKey) Dependencies() []asset.Asset { - return []asset.Asset{ - &RootCA{}, - &installconfig.InstallConfig{}, - } -} - -// Generate generates the cert/key pair based on its dependencies. -func (a *MCSCertKey) Generate(dependencies asset.Parents) error { - rootCA := &RootCA{} - installConfig := &installconfig.InstallConfig{} - dependencies.Get(rootCA, installConfig) - - hostname := apiAddress(installConfig.Config) - - cfg := &CertCfg{ - Subject: pkix.Name{CommonName: hostname}, - ExtKeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - Validity: ValidityTenYears, - DNSNames: []string{hostname}, - } - - return a.CertKey.Generate(cfg, rootCA, "machine-config-server", DoNotAppendParent) -} - -// Name returns the human-friendly name of the asset. -func (a *MCSCertKey) Name() string { - return "Certificate (mcs)" -} diff --git a/pkg/asset/tls/root.go b/pkg/asset/tls/root.go deleted file mode 100644 index 3651f15391e..00000000000 --- a/pkg/asset/tls/root.go +++ /dev/null @@ -1,49 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - - "github.com/openshift/installer/pkg/asset" - "github.com/pkg/errors" -) - -// RootCA contains the private key and the cert that's -// self-signed as the root CA. -type RootCA struct { - CertKey -} - -var _ asset.WritableAsset = (*RootCA)(nil) - -// Dependencies returns the dependency of the root-ca, which is empty. -func (c *RootCA) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Generate generates the root-ca key and cert pair. -func (c *RootCA) Generate(parents asset.Parents) error { - cfg := &CertCfg{ - Subject: pkix.Name{CommonName: "root-ca", OrganizationalUnit: []string{"openshift"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: ValidityTenYears, - IsCA: true, - } - - key, crt, err := GenerateRootCertKey(cfg) - if err != nil { - return errors.Wrap(err, "failed to generate RootCA") - } - - c.KeyRaw = PrivateKeyToPem(key) - c.CertRaw = CertToPem(crt) - - c.generateFiles("root-ca") - - return nil -} - -// Name returns the human-friendly name of the asset. -func (c *RootCA) Name() string { - return "Root CA" -} diff --git a/pkg/asset/tls/serviceaccountkeypair.go b/pkg/asset/tls/serviceaccountkeypair.go deleted file mode 100644 index ffbb363f10a..00000000000 --- a/pkg/asset/tls/serviceaccountkeypair.go +++ /dev/null @@ -1,32 +0,0 @@ -package tls - -import "github.com/openshift/installer/pkg/asset" - -// ServiceAccountKeyPair is the asset that generates the service-account public/private key pair. -type ServiceAccountKeyPair struct { - KeyPair -} - -var _ asset.WritableAsset = (*ServiceAccountKeyPair)(nil) - -// Dependencies returns the dependency of the the cert/key pair, which includes -// the parent CA, and install config if it depends on the install config for -// DNS names, etc. -func (a *ServiceAccountKeyPair) Dependencies() []asset.Asset { - return []asset.Asset{} -} - -// Generate generates the cert/key pair based on its dependencies. -func (a *ServiceAccountKeyPair) Generate(dependencies asset.Parents) error { - return a.KeyPair.Generate("service-account") -} - -// Name returns the human-friendly name of the asset. -func (a *ServiceAccountKeyPair) Name() string { - return "Key Pair (service-account.pub)" -} - -// Load is a no-op because the service account keypair is not written to disk. -func (a *ServiceAccountKeyPair) Load(asset.FileFetcher) (bool, error) { - return false, nil -} diff --git a/pkg/asset/tls/serviceservingca.go b/pkg/asset/tls/serviceservingca.go deleted file mode 100644 index 714140cc338..00000000000 --- a/pkg/asset/tls/serviceservingca.go +++ /dev/null @@ -1,44 +0,0 @@ -package tls - -import ( - "crypto/x509" - "crypto/x509/pkix" - - "github.com/openshift/installer/pkg/asset" -) - -// ServiceServingCA is the asset that generates the service-serving-ca key/cert pair. -type ServiceServingCA struct { - CertKey -} - -var _ asset.Asset = (*ServiceServingCA)(nil) - -// Dependencies returns the dependency of the the cert/key pair, which includes -// the parent CA, and install config if it depends on the install config for -// DNS names, etc. -func (a *ServiceServingCA) Dependencies() []asset.Asset { - return []asset.Asset{ - &RootCA{}, - } -} - -// Generate generates the cert/key pair based on its dependencies. -func (a *ServiceServingCA) Generate(dependencies asset.Parents) error { - rootCA := &RootCA{} - dependencies.Get(rootCA) - - cfg := &CertCfg{ - Subject: pkix.Name{CommonName: "service-serving", OrganizationalUnit: []string{"bootkube"}}, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Validity: ValidityTenYears, - IsCA: true, - } - - return a.CertKey.Generate(cfg, rootCA, "service-serving-ca", DoNotAppendParent) -} - -// Name returns the human-friendly name of the asset. -func (a *ServiceServingCA) Name() string { - return "Certificate (service-serving)" -} diff --git a/pkg/asset/tls/tls.go b/pkg/asset/tls/tls.go deleted file mode 100644 index 75957f38559..00000000000 --- a/pkg/asset/tls/tls.go +++ /dev/null @@ -1,213 +0,0 @@ -package tls - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "math" - "math/big" - "net" - "time" - - "github.com/pkg/errors" -) - -const ( - keySize = 2048 - - // ValidityTenYears sets the validity of a cert to 10 years. - ValidityTenYears = time.Hour * 24 * 365 * 10 - - // ValidityThirtyMinutes sets the validity of a cert to 30 minutes. - // This is for the kubelet bootstrap. - ValidityThirtyMinutes = time.Minute * 30 -) - -// CertCfg contains all needed fields to configure a new certificate -type CertCfg struct { - DNSNames []string - ExtKeyUsages []x509.ExtKeyUsage - IPAddresses []net.IP - KeyUsages x509.KeyUsage - Subject pkix.Name - Validity time.Duration - IsCA bool -} - -// rsaPublicKey reflects the ASN.1 structure of a PKCS#1 public key. -type rsaPublicKey struct { - N *big.Int - E int -} - -// PrivateKey generates an RSA Private key and returns the value -func PrivateKey() (*rsa.PrivateKey, error) { - rsaKey, err := rsa.GenerateKey(rand.Reader, keySize) - if err != nil { - return nil, errors.Wrap(err, "error generating RSA private key") - } - - return rsaKey, nil -} - -// SelfSignedCACert Creates a self signed CA certificate -func SelfSignedCACert(cfg *CertCfg, key *rsa.PrivateKey) (*x509.Certificate, error) { - var err error - - cert := x509.Certificate{ - BasicConstraintsValid: true, - IsCA: cfg.IsCA, - KeyUsage: cfg.KeyUsages, - NotAfter: time.Now().Add(cfg.Validity), - NotBefore: time.Now(), - SerialNumber: new(big.Int).SetInt64(0), - Subject: cfg.Subject, - } - // verifies that the CN and/or OU for the cert is set - if len(cfg.Subject.CommonName) == 0 || len(cfg.Subject.OrganizationalUnit) == 0 { - return nil, errors.Errorf("certification's subject is not set, or invalid") - } - pub := key.Public() - cert.SubjectKeyId, err = generateSubjectKeyID(pub) - if err != nil { - return nil, errors.Wrap(err, "failed to set subject key identifier") - } - certBytes, err := x509.CreateCertificate(rand.Reader, &cert, &cert, key.Public(), key) - if err != nil { - return nil, errors.Wrap(err, "failed to create certificate") - } - return x509.ParseCertificate(certBytes) -} - -// SignedCertificate creates a new X.509 certificate based on a template. -func SignedCertificate( - cfg *CertCfg, - csr *x509.CertificateRequest, - key *rsa.PrivateKey, - caCert *x509.Certificate, - caKey *rsa.PrivateKey, -) (*x509.Certificate, error) { - serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64)) - if err != nil { - return nil, err - } - - certTmpl := x509.Certificate{ - DNSNames: csr.DNSNames, - ExtKeyUsage: cfg.ExtKeyUsages, - IPAddresses: csr.IPAddresses, - KeyUsage: cfg.KeyUsages, - NotAfter: time.Now().Add(cfg.Validity), - NotBefore: caCert.NotBefore, - SerialNumber: serial, - Subject: csr.Subject, - IsCA: cfg.IsCA, - Version: 3, - BasicConstraintsValid: true, - } - pub := caCert.PublicKey.(*rsa.PublicKey) - certTmpl.SubjectKeyId, err = generateSubjectKeyID(pub) - if err != nil { - return nil, errors.Wrap(err, "failed to set subject key identifier") - } - certBytes, err := x509.CreateCertificate(rand.Reader, &certTmpl, caCert, key.Public(), caKey) - if err != nil { - return nil, errors.Wrap(err, "failed to create x509 certificate") - } - return x509.ParseCertificate(certBytes) -} - -// generateSubjectKeyID generates a SHA-1 hash of the subject public key. -func generateSubjectKeyID(pub crypto.PublicKey) ([]byte, error) { - var publicKeyBytes []byte - var err error - - switch pub := pub.(type) { - case *rsa.PublicKey: - publicKeyBytes, err = asn1.Marshal(rsaPublicKey{N: pub.N, E: pub.E}) - if err != nil { - return nil, errors.Wrap(err, "failed to Marshal ans1 public key") - } - case *ecdsa.PublicKey: - publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - default: - return nil, errors.New("only RSA and ECDSA public keys supported") - } - - hash := sha1.Sum(publicKeyBytes) - return hash[:], nil -} - -// GenerateCert creates a key, csr & a signed cert -// This is useful for apiserver and openshift-apiser cert which will be -// authenticated by the kubeconfig using root-ca. -func GenerateCert(caKey *rsa.PrivateKey, - caCert *x509.Certificate, - cfg *CertCfg) (*rsa.PrivateKey, *x509.Certificate, error) { - - // create a private key - key, err := PrivateKey() - if err != nil { - return nil, nil, errors.Wrap(err, "failed to generate private key") - } - - // create a CSR - csrTmpl := x509.CertificateRequest{Subject: cfg.Subject, DNSNames: cfg.DNSNames, IPAddresses: cfg.IPAddresses} - csrBytes, err := x509.CreateCertificateRequest(rand.Reader, &csrTmpl, key) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create certificate request") - } - csr, err := x509.ParseCertificateRequest(csrBytes) - if err != nil { - return nil, nil, errors.Wrap(err, "error parsing x509 certificate request") - } - - // create a cert - cert, err := GenerateSignedCert(cfg, csr, key, caKey, caCert) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create a signed certificate") - } - return key, cert, nil -} - -// GenerateRootCA creates and returns the root CA -func GenerateRootCA(key *rsa.PrivateKey, cfg *CertCfg) (*x509.Certificate, error) { - cert, err := SelfSignedCACert(cfg, key) - if err != nil { - return nil, errors.Wrap(err, "failed to generate self signed certificate") - } - return cert, nil -} - -// GenerateSignedCert generates a signed certificate. -func GenerateSignedCert(cfg *CertCfg, - csr *x509.CertificateRequest, - key *rsa.PrivateKey, - caKey *rsa.PrivateKey, - caCert *x509.Certificate) (*x509.Certificate, error) { - cert, err := SignedCertificate(cfg, csr, key, caCert, caKey) - if err != nil { - return nil, errors.Wrap(err, "failed to create a signed certificate") - } - return cert, nil -} - -// GenerateRootCertKey generates a root key/cert pair. -func GenerateRootCertKey(cfg *CertCfg) (*rsa.PrivateKey, *x509.Certificate, error) { - key, err := PrivateKey() - if err != nil { - return nil, nil, errors.Wrap(err, "failed to generate private key") - } - - crt, err := GenerateRootCA(key, cfg) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create root CA certificate") - } - return key, crt, nil -} diff --git a/pkg/asset/tls/tls_test.go b/pkg/asset/tls/tls_test.go deleted file mode 100644 index 57d727c77c3..00000000000 --- a/pkg/asset/tls/tls_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package tls - -import ( - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "testing" - "time" -) - -func TestSelfSignedCACert(t *testing.T) { - key, err := PrivateKey() - if err != nil { - t.Fatalf("Failed to generate Private Key: %v", err) - } - cases := []struct { - cfg *CertCfg - err bool - }{ - { - cfg: &CertCfg{ - Validity: time.Hour * 5, - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Subject: pkix.Name{ - CommonName: "root_ca", - OrganizationalUnit: []string{"openshift"}, - }, - IsCA: true, - }, - err: false, - }, - { - cfg: &CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Subject: pkix.Name{ - CommonName: "root_ca", - }, - IsCA: false, - }, - err: true, - }, - { - cfg: &CertCfg{ - KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, - Subject: pkix.Name{ - OrganizationalUnit: []string{"openshift"}, - }, - }, - err: true, - }, - } - for i, c := range cases { - if _, err := SelfSignedCACert(c.cfg, key); (err != nil) != c.err { - no := "no" - if c.err { - no = "an" - } - t.Errorf("test case %d: expected %s error, got %v", i, no, err) - } - } -} - -func TestSignedCertificate(t *testing.T) { - key, err := PrivateKey() - if err != nil { - t.Fatalf("Failed to generate private key: %v", err) - } - - cases := []struct { - Subject pkix.Name - SignatureAlgorithm x509.SignatureAlgorithm - err bool - }{ - { - Subject: pkix.Name{ - CommonName: "csr", - OrganizationalUnit: []string{"openshift"}, - }, - err: false, - }, - { - Subject: pkix.Name{}, - err: false, - }, - { - Subject: pkix.Name{ - CommonName: "csr-wrong-alg", - OrganizationalUnit: []string{"openshift"}, - }, - SignatureAlgorithm: 123, - err: true, - }, - } - for i, c := range cases { - csrTmpl := x509.CertificateRequest{ - Subject: c.Subject, - SignatureAlgorithm: c.SignatureAlgorithm, - } - if _, err := x509.CreateCertificateRequest(rand.Reader, &csrTmpl, key); (err != nil) != c.err { - no := "no" - if c.err { - no = "an" - } - t.Errorf("test case %d: expected %s error, got %v", i, no, err) - } - } -} diff --git a/pkg/asset/tls/utils.go b/pkg/asset/tls/utils.go deleted file mode 100644 index c374fcda086..00000000000 --- a/pkg/asset/tls/utils.go +++ /dev/null @@ -1,76 +0,0 @@ -package tls - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - - "github.com/pkg/errors" -) - -// PrivateKeyToPem converts an rsa.PrivateKey object to pem string -func PrivateKeyToPem(key *rsa.PrivateKey) []byte { - keyInBytes := x509.MarshalPKCS1PrivateKey(key) - keyinPem := pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: keyInBytes, - }, - ) - return keyinPem -} - -// CertToPem converts an x509.Certificate object to a pem string -func CertToPem(cert *x509.Certificate) []byte { - certInPem := pem.EncodeToMemory( - &pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - }, - ) - return certInPem -} - -// CSRToPem converts an x509.CertificateRequest to a pem string -func CSRToPem(cert *x509.CertificateRequest) []byte { - certInPem := pem.EncodeToMemory( - &pem.Block{ - Type: "CERTIFICATE REQUEST", - Bytes: cert.Raw, - }, - ) - return certInPem -} - -// PublicKeyToPem converts an rsa.PublicKey object to pem string -func PublicKeyToPem(key *rsa.PublicKey) ([]byte, error) { - keyInBytes, err := x509.MarshalPKIXPublicKey(key) - if err != nil { - return nil, errors.Wrap(err, "failed to MarshalPKIXPublicKey") - } - keyinPem := pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PUBLIC KEY", - Bytes: keyInBytes, - }, - ) - return keyinPem, nil -} - -// PemToPrivateKey converts a data block to rsa.PrivateKey. -func PemToPrivateKey(data []byte) (*rsa.PrivateKey, error) { - block, _ := pem.Decode(data) - if block == nil { - return nil, errors.Errorf("could not find a PEM block in the private key") - } - return x509.ParsePKCS1PrivateKey(block.Bytes) -} - -// PemToCertificate converts a data block to x509.Certificate. -func PemToCertificate(data []byte) (*x509.Certificate, error) { - block, _ := pem.Decode(data) - if block == nil { - return nil, errors.Errorf("could not find a PEM block in the certificate") - } - return x509.ParseCertificate(block.Bytes) -} diff --git a/pkg/asset/userprovided.go b/pkg/asset/userprovided.go deleted file mode 100644 index 0251982125b..00000000000 --- a/pkg/asset/userprovided.go +++ /dev/null @@ -1,50 +0,0 @@ -package asset - -import ( - "io/ioutil" - "os" - - "github.com/pkg/errors" - survey "gopkg.in/AlecAivazis/survey.v1" -) - -// GenerateUserProvidedAsset queries for input from the user. -func GenerateUserProvidedAsset(inputName string, question *survey.Question, envVarName string) (string, error) { - return generateUserProvidedAsset(inputName, question, envVarName, "") -} - -// GenerateUserProvidedAssetForPath queries for input from the user. The input can -// be read from a file specified in an environment variable. -func GenerateUserProvidedAssetForPath(inputName string, question *survey.Question, envVarName, pathEnvVarName string) (string, error) { - return generateUserProvidedAsset(inputName, question, envVarName, pathEnvVarName) -} - -func generateUserProvidedAsset(inputName string, question *survey.Question, envVarName, pathEnvVarName string) (response string, err error) { - defer func() { - if err != nil { - err = errors.Wrapf(err, "failed to acquire user-provided input %s", inputName) - } - }() - - if value, ok := os.LookupEnv(envVarName); ok { - response = value - } else if path, ok := os.LookupEnv(pathEnvVarName); ok { - value, err := ioutil.ReadFile(path) - if err != nil { - return "", errors.Wrapf(err, "failed to read file from %s", pathEnvVarName) - } - response = string(value) - } - - if response == "" { - if err := survey.Ask([]*survey.Question{question}, &response); err != nil { - return "", errors.Wrap(err, "failed to Ask") - } - } else if question.Validate != nil { - if err := question.Validate(response); err != nil { - return "", errors.Wrap(err, "validation failed") - } - } - - return response, nil -} diff --git a/pkg/assets/asset.go b/pkg/assets/asset.go new file mode 100644 index 00000000000..d0782b4f50b --- /dev/null +++ b/pkg/assets/asset.go @@ -0,0 +1,209 @@ +package assets + +import ( + "bytes" + "context" + "crypto/sha1" + "encoding/json" + "io/ioutil" + "os" + "path" + "path/filepath" + "regexp" + "sort" + "strings" + + "github.com/go-log/log" + "github.com/pkg/errors" +) + +var filenameRegexp = regexp.MustCompile("[^A-Za-z0-9.-]+") +var subDir = ".state" + +// GetByString retrieves an asset from a store. Returning an Asset +// (instead of an *Asset) creates a copy to ensure the caller cannot +// adjust the immutable *Asset held in the store. It returns +// os.ErrNotExist if the asset is not found. +type GetByString func(ctx context.Context, id string) (asset Asset, err error) + +// GetByBytes retrieves an asset from a store. Returning an Asset +// (instead of an *Asset) creates a copy to ensure the caller cannot +// adjust the immutable *Asset held in the store. It returns +// os.ErrNotExist if the asset is not found. +type GetByBytes func(ctx context.Context, id []byte) (asset Asset, err error) + +// Rebuild rebuilds an asset after a parent changes. +type Rebuild func(ctx context.Context, getByName GetByString) (*Asset, error) + +// Asset is a node in the asset graph. +type Asset struct { + // Parents holds hashes for our parent assets. + Parents []Reference `json:"parents,omitempty"` + + // Name summarizes the semantic meaning of this asset in a form + // which could be used as a filename. + Name string `json:"name,omitempty"` + + // Data holds the asset payload (e.g. the password for a password + // asset). + Data []byte `json:"data,omitempty"` + + // Frozen marks assets that shoud no longer be regenerated + // (e.g. because a user has overridden their values). + Frozen bool `json:"frozen,omitempty"` + + // RebuildHelper rebuilds this asset based on updated dependencies. + RebuildHelper Rebuild `json:"-"` +} + +// Hash returns a cryptographic hash for this asset. +func (asset *Asset) Hash() (hash []byte, err error) { + sort.Sort(referenceSlice(asset.Parents)) + data, err := json.Marshal(asset) + if err != nil { + return nil, err + } + + hsh := sha1.Sum(data) + return hsh[:], nil +} + +// GetParents retrieves parents by name and adds them to Asset.Parent. +func (asset *Asset) GetParents(ctx context.Context, getByName GetByString, names ...string) (parents map[string]*Asset, err error) { + parents = make(map[string]*Asset) + for _, name := range names { + prnt, err := getByName(ctx, name) + if err != nil { + return nil, err + } + parents[name] = &prnt + + hash, err := parents[name].Hash() + if err != nil { + return nil, errors.Wrapf(err, "hash %q parent %q", asset.Name, name) + } + + asset.Parents = append(asset.Parents, Reference{ + Name: name, + Hash: hash, + }) + } + + return parents, nil +} + +// path returns a slugged version of the asset name. +func (asset *Asset) path() (subPath string) { + segments := []string{} + remaining := asset.Name + var file string + for remaining != "." { + remaining, file = path.Split(remaining) + segments = append([]string{filenameRegexp.ReplaceAllString(file, "-")}, segments...) + next := path.Dir(remaining) // drop any trailing slash + if next == remaining { + break + } + remaining = next + } + return filepath.Join(segments...) +} + +// Write writes the asset to the given directory, using a slugged +// version of the name as the filename. If getByHash is non-nil, +// Write will recurse through parent assets. If written is non-nil, +// every filename written will be added to the map with a true value. +func (asset *Asset) Write(ctx context.Context, directory string, getByHash GetByBytes, written map[string]bool) (err error) { + data, err := json.Marshal(asset) + if err != nil { + return err + } + + subPath := asset.path() + path := filepath.Join(directory, subDir, subPath) + dir := filepath.Dir(path) + err = os.MkdirAll(dir, 0777) + if err != nil { + return err + } + + err = ioutil.WriteFile(path, data, 0666) + if err != nil { + return err + } + + path = filepath.Join(directory, subPath) + dir = filepath.Dir(path) + err = os.MkdirAll(dir, 0777) + if err != nil { + return err + } + err = ioutil.WriteFile(path, asset.Data, 0666) + if err != nil { + return err + } + + if written != nil { + written[subPath] = true + written[filepath.Join(subDir, subPath)] = true + } + + if getByHash != nil { + for _, hash := range asset.Parents { + parent, err := getByHash(ctx, hash.Hash) + if err != nil { + return errors.Errorf("failed to retrieve %x by hash", hash) + } + + err = (&parent).Write(ctx, directory, getByHash, written) + if err != nil { + return err + } + } + } + + return nil +} + +// Read reads the asset from the given directory, using a slugged +// version of the name as the filename. +func (asset *Asset) Read(ctx context.Context, directory string, logger log.Logger) (err error) { + name := asset.Name + subPath := asset.path() + path := filepath.Join(directory, subDir, subPath) + data, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + err = json.Unmarshal(data, &asset) + if err != nil { + return err + } + if asset.Name != name { + return errors.Errorf("name %q read from %q does not match the expected %q", asset.Name, path, name) + } + + path = filepath.Join(directory, subPath) + data, err = ioutil.ReadFile(path) + if err == nil && !bytes.Equal(data, asset.Data) { + if asset.Parents == nil { + logger.Logf("%q data changed via %q", name, path) + } else { + parents := make([]string, 0, len(asset.Parents)) + for _, parent := range asset.Parents { + parents = append(parents, parent.Name) + } + logger.Logf("%q data changed via %q, future changes to the previous parents (%s) will no longer update this asset", name, path, strings.Join(parents, ", ")) + } + asset.Data = data + asset.Parents = nil + asset.Frozen = true + asset.RebuildHelper = ConstantDataRebuilder(ctx, name, data, true) + } else if asset.Frozen { + logger.Logf("%q is frozen due to a previous user change", name) + asset.RebuildHelper = ConstantDataRebuilder(ctx, name, asset.Data, true) + } + + return nil +} diff --git a/pkg/assets/asset_test.go b/pkg/assets/asset_test.go new file mode 100644 index 00000000000..5e1f47dc09a --- /dev/null +++ b/pkg/assets/asset_test.go @@ -0,0 +1,41 @@ +package assets + +import ( + "context" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAssetWriteRead(t *testing.T) { + ctx := context.Background() + tempDir, err := ioutil.TempDir("", "openshift-install-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + asset := &Asset{ + Parents: []Reference{{ + Name: "a", + Hash: []byte("\x00\x01\x02\x03"), + }}, + Name: "b", + Data: []byte("b-data"), + } + + err = asset.Write(ctx, tempDir, nil, nil) + if err != nil { + t.Fatal(err) + } + + recovered := &Asset{Name: asset.Name} + err = recovered.Read(ctx, tempDir, t) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, asset, recovered) +} diff --git a/pkg/assets/assets.go b/pkg/assets/assets.go new file mode 100644 index 00000000000..9bad04ae2cc --- /dev/null +++ b/pkg/assets/assets.go @@ -0,0 +1,352 @@ +// Package assets defines a generic Merkle DAG for assets. +package assets + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + + "github.com/go-log/log" + "github.com/pkg/errors" +) + +// GetData retrieves injected data by asset name during generation. +// It returns os.ErrNotExist if the asset is not found. +type GetData func(ctx context.Context, name string) (data []byte, err error) + +// Put adds an asset to a store. +type Put func(asset Asset) (hash []byte, err error) + +// Assets holds a directed, acyclic graph of assets, which can be used +// for building, and rebuilding, before installation. +type Assets struct { + // assetsByHash stores all the assets by hash. + assetsByHash map[string]*Asset + + // assetsByName stores the most-recently-put asset for each name. + assetsByName map[string]*Asset + + // Root references the root asset. + Root Reference + + // Rebuilders registers asset rebuilders by name. + Rebuilders map[string]Rebuild +} + +// Prune removes any assets from the store which are not accessible +// from Root. +func (assets *Assets) Prune() (err error) { + currentHashes := map[string]bool{} + currentNames := map[string]bool{} + stack := [][]byte{assets.Root.Hash} + for len(stack) > 0 { + hash := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if currentHashes[string(hash)] { + continue + } + currentHashes[string(hash)] = true + asset, ok := assets.assetsByHash[string(hash)] + if !ok { + continue // Already over-pruned. + } + currentNames[asset.Name] = true + for _, reference := range asset.Parents { + stack = append(stack, reference.Hash) + } + } + + for hash := range assets.assetsByHash { + if !currentHashes[string(hash)] { + delete(assets.assetsByHash, hash) + } + } + + for name := range assets.assetsByName { + if !currentNames[name] { + delete(assets.assetsByName, name) + } + } + + return nil +} + +// rebuildAsset is a per-asset helper for Assets.Rebuild. It rebuilds +// the asset and returns the new asset and its hash on success. +// Ancestors are rebuilt inside getByName as they are retrieved. +func (assets *Assets) rebuildAsset(ctx context.Context, asset *Asset, getByName GetByString, put Put, rebuilt map[string]Reference, logger log.Logger) (newAsset *Asset, hash []byte, err error) { + logger.Logf("rebuilding %q", asset.Name) + + oldReference := Reference{Name: asset.Name} + oldReference.Hash, err = asset.Hash() + if err != nil { + return nil, nil, err + } + + if asset.RebuildHelper == nil { + return nil, nil, errors.Errorf("cannot rebuild %s without a rebuilder", oldReference.String()) + } + + newAsset, err = asset.RebuildHelper(ctx, getByName) + if err != nil { + return nil, nil, err + } + if newAsset == nil { + return nil, nil, errors.Errorf("RebuildHelper returned nil for %q", asset.Name) + } + + newReference := Reference{Name: newAsset.Name} + newReference.Hash, err = put(*newAsset) + if err != nil { + return newAsset, nil, err + } + + if bytes.Equal(newReference.Hash, oldReference.Hash) { + logger.Logf("%q is still fresh (%x)", asset.Name, string(oldReference.Hash)) + rebuilt[oldReference.String()] = oldReference + return newAsset, oldReference.Hash, nil + } + + logger.Logf("rebuilt %q (%x -> %x)", asset.Name, oldReference.Hash, newReference.Hash) + rebuilt[oldReference.String()] = newReference + return newAsset, newReference.Hash, nil +} + +// getByName is a helper for Assets.Rebuild. It rebuilds checks for +// injections and rebuilds any requested assets that aren't already in +// the store. +func (assets *Assets) getByName(ctx context.Context, name string, getByName GetByString, getInjection GetByString, rebuilt map[string]Reference, logger log.Logger) (asset *Asset, err error) { + assetValue, err := assets.GetByName(ctx, name) + asset = &assetValue + if err == nil || !os.IsNotExist(err) { + return asset, err + } + + hash, err := asset.Hash() + if err != nil { + return asset, err + } + + oldReference := Reference{ + Name: name, + Hash: hash, + } + + if getInjection != nil { + assetValue, err = getInjection(ctx, name) + if err == nil { + asset = &assetValue + } else if !os.IsNotExist(err) { + return &assetValue, errors.Wrapf(err, "inject content for %q", name) + } + } + + asset, newHash, err := assets.rebuildAsset(ctx, asset, getByName, assets.Put, rebuilt, logger) + if err != nil { + return asset, errors.Wrapf(err, "retrieve %q by name", name) + } + + if bytes.Equal(newHash, oldReference.Hash) { + logger.Logf("rebuilt %q (%x -> %x)", name, oldReference.Hash, newHash) + } + + return asset, nil +} + +// Rebuild rebuilds the asset store, pulling in any injected data. +func (assets *Assets) Rebuild(ctx context.Context, getInjection GetByString, logger log.Logger) (err error) { + rebuilt := map[string]Reference{} + + var getByName GetByString + getByName = func(ctx context.Context, name string) (Asset, error) { + asset, err := assets.getByName(ctx, name, getByName, getInjection, rebuilt, logger) + if asset == nil { + asset = &Asset{Name: name} + } + return *asset, err + } + + var asset Asset + var newHash []byte + if assets.Root.Hash != nil { + asset, err = assets.GetByHash(ctx, assets.Root.Hash) + if err != nil && !os.IsNotExist(err) { + return errors.Wrapf(err, "failed to retrieve root %x by hash", assets.Root.Hash) + } + + var newAsset *Asset + newAsset, newHash, err = assets.rebuildAsset(ctx, &asset, getByName, assets.Put, rebuilt, logger) + if err != nil { + return err + } + asset = *newAsset + } else if assets.Root.Name != "" { + asset, err = getByName(ctx, assets.Root.Name) + if err != nil && !os.IsNotExist(err) { + return errors.Wrap(err, "retrieve root") + } + + newHash, err = asset.Hash() + if err != nil && !os.IsNotExist(err) { + return err + } + } else { + return nil + } + + assets.Root.Hash = newHash + assets.Root.Name = asset.Name + return nil +} + +// GetByHash retrieves an asset from the store by hash. +func (assets *Assets) GetByHash(ctx context.Context, hash []byte) (asset Asset, err error) { + pointer, ok := assets.assetsByHash[string(hash)] + if ok { + return *pointer, nil + } + return asset, os.ErrNotExist +} + +// GetByName retrieves an asset from the store by name. +func (assets *Assets) GetByName(ctx context.Context, name string) (asset Asset, err error) { + pointer, ok := assets.assetsByName[name] + if ok { + return *pointer, nil + } + return asset, os.ErrNotExist +} + +// Put adds an asset to the store. +func (assets *Assets) Put(asset Asset) (hash []byte, err error) { + hash, err = (&asset).Hash() + if err != nil { + return hash, err + } + + if assets.assetsByHash == nil { + assets.assetsByHash = map[string]*Asset{} + } + assets.assetsByHash[string(hash)] = &asset + + if assets.assetsByName == nil { + assets.assetsByName = map[string]*Asset{} + } + assets.assetsByName[asset.Name] = &asset + return hash, nil +} + +// Write writes assets to the target directory. If prune is true, it +// also removes any files from that directory which it did not write +// (for example, leftovers from previous invocations). +func (assets *Assets) Write(ctx context.Context, directory string, prune bool) (err error) { + err = os.MkdirAll(filepath.Join(directory, subDir), 0777) + if err != nil { + return err + } + + written := map[string]bool{} + + if assets.Root.Hash != nil { + asset, ok := assets.assetsByHash[string(assets.Root.Hash)] + if !ok { + return errors.Errorf("failed to retrieve root %x by hash", assets.Root.Hash) + } + + err = asset.Write(ctx, directory, assets.GetByHash, written) + if err != nil { + return err + } + } + + for path := range written { + remaining := path + for remaining != "." { + written[remaining] = true + remaining = filepath.Dir(remaining) + } + } + + if prune { + filepath.Walk(directory, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == directory { + return nil + } + + rel, err := filepath.Rel(directory, path) + if err != nil { + return err + } + + if written[rel] { + return nil + } + fmt.Printf("XXX prune %q\n", path) + + if info.IsDir() { + err = os.RemoveAll(path) + if err != nil { + return err + } + return filepath.SkipDir + } + + return os.Remove(path) + }) + } + + return nil +} + +// Read rebuilds the asset store, pulling in data from a previous +// Assets.Write call and falling back to getDefault for requested +// assets that aren't in that directory. +func (assets *Assets) Read(ctx context.Context, directory string, getDefault GetData, logger log.Logger) (err error) { + return assets.Rebuild(ctx, func(ctx context.Context, name string) (Asset, error) { + logger.Logf("checking injection for %q", name) + asset := &Asset{Name: name} + loaded := false + err := asset.Read(ctx, directory, logger) + if err == nil { + loaded = true + logger.Logf("loaded %q from %q", name, asset.path()) + } else if !os.IsNotExist(err) { + return *asset, err + } + + if !loaded && getDefault != nil { + var data []byte + data, err = getDefault(ctx, name) + if err == nil { + asset.RebuildHelper = ConstantDataRebuilder(ctx, name, data, false) + if len(data) > 10 { + logger.Logf("default %q to \"%s...\"", name, string(data[:10])) + } else { + logger.Logf("default %q to %q", name, string(data)) + } + } else if !os.IsNotExist(err) { + return *asset, errors.Wrapf(err, "defaulting %q", name) + } + } + + if asset.RebuildHelper == nil { + var ok bool + asset.RebuildHelper, ok = assets.Rebuilders[name] + if !ok { + if loaded { + asset.RebuildHelper = ConstantDataRebuilder(ctx, name, asset.Data, false) + } else { + return *asset, errors.Wrapf(err, "cannot inject %q without a file, default, or rebuilder", name) + } + } + } + + return *asset, nil + }, logger) +} diff --git a/pkg/assets/assets_test.go b/pkg/assets/assets_test.go new file mode 100644 index 00000000000..c41ef05a9c3 --- /dev/null +++ b/pkg/assets/assets_test.go @@ -0,0 +1,302 @@ +package assets + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPutGet(t *testing.T) { + ctx := context.Background() + assets := &Assets{} + + assetA := Asset{ + Name: "a", + Data: []byte("a-data"), + } + hashA, err := assets.Put(assetA) + if err != nil { + t.Fatal(err) + } + + assetB := Asset{ + Name: "a", + Data: []byte("b-data"), + } + hashB, err := assets.Put(assetB) + if err != nil { + t.Fatal(err) + } + + t.Run("GetByHash", func(t *testing.T) { + for _, test := range []struct { + hash []byte + asset *Asset + }{ + { + hash: hashA, + asset: &assetA, + }, + { + hash: hashB, + asset: &assetB, + }, + } { + t.Run(fmt.Sprintf("%x", test.hash), func(t *testing.T) { + retrieved, err := assets.GetByHash(ctx, test.hash) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(retrieved, *test.asset) { + t.Errorf("%+v != %+v", retrieved, test.asset) + } + }) + } + }) + + t.Run("GetByName", func(t *testing.T) { + retrieved, err := assets.GetByName(ctx, "a") + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(retrieved, assetB) { + t.Errorf("%+v != %+v", retrieved, assetB) + } + }) +} + +func buildB(ctx context.Context, getByName GetByString) (asset *Asset, err error) { + asset = &Asset{ + Name: "b", + RebuildHelper: buildB, + } + + parents, err := asset.GetParents(ctx, getByName, "a/a") + if err != nil { + return nil, err + } + + asset.Data = append(parents["a/a"].Data, []byte(", modified by b")...) + return asset, nil +} + +func buildC(ctx context.Context, getByName GetByString) (asset *Asset, err error) { + asset = &Asset{ + Name: "c", + RebuildHelper: buildC, + } + + parents, err := asset.GetParents(ctx, getByName, "b") + if err != nil { + return nil, err + } + + asset.Data = append(parents["b"].Data, []byte(", modified by c")...) + return asset, nil +} + +func newAssets() *Assets { + return &Assets{ + Root: Reference{ + Name: "c", + }, + Rebuilders: map[string]Rebuild{ + "b": buildB, + "c": buildC, + }, + } +} + +func defaultA(ctx context.Context, name string) (data []byte, err error) { + if name == "a/a" { + return []byte("a-data"), nil + } + return nil, os.ErrNotExist +} + +func TestAssetsRead(t *testing.T) { + ctx := context.Background() + tempDir, err := ioutil.TempDir("", "openshift-install-") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tempDir) + + t.Run("from scratch with a broken dependency", func(t *testing.T) { + assets := newAssets() + err = assets.Read(ctx, tempDir, nil, t) + if err == nil { + t.Fatal("unexpected success") + } + assert.Regexp(t, "^retrieve root: retrieve \"c\" by name: retrieve \"b\" by name: inject content for \"a/a\": cannot inject \"a/a\" without a file, default, or rebuilder: open .*a: no such file or directory$", err.Error()) + }) + + var rootHash []byte + t.Run("from scratch", func(t *testing.T) { + assets := newAssets() + err = assets.Read(ctx, tempDir, defaultA, t) + if err != nil { + t.Fatal(err) + } + + rootHash = assets.Root.Hash + assetC, err := assets.GetByHash(ctx, assets.Root.Hash) + if err != nil { + t.Fatal(err) + } + + expected := "a-data, modified by b, modified by c" + if string(assetC.Data) != expected { + t.Fatalf("unexpected new asset C data: %q != %q", string(assetC.Data), expected) + } + + refB := assetC.Parents[0] + if refB.Name != "b" { + t.Fatalf("asset %q has an unexpected parent name %q", assetC.Name, refB.Name) + } + + assetB, err := assets.GetByHash(ctx, refB.Hash) + if err != nil { + t.Fatal(err) + } + + expected = "a-data, modified by b" + if string(assetB.Data) != expected { + t.Fatalf("unexpected new asset %q data: %q != %q", refB.Name, string(assetB.Data), expected) + } + + refA := assetB.Parents[0] + if refA.Name != "a/a" { + t.Fatalf("asset %q has an unexpected parent name %q", assetB.Name, refA.Name) + } + + assetA, err := assets.GetByHash(ctx, refA.Hash) + if err != nil { + t.Fatal(err) + } + + expected = "a-data" + if string(assetA.Data) != expected { + t.Fatalf("unexpected new asset %q data: %q != %q", refA.Name, string(assetA.Data), expected) + } + + err = assets.Write(ctx, tempDir, false) + if err != nil { + t.Fatal(err) + } + }) + + t.Run("read without edits", func(t *testing.T) { + assets := newAssets() + err = assets.Read(ctx, tempDir, defaultA, t) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, rootHash, assets.Root.Hash) + }) + + t.Run("read with leaf edit", func(t *testing.T) { + err = ioutil.WriteFile(filepath.Join(tempDir, "a", "a"), []byte("edited a"), 0666) + if err != nil { + t.Fatal(err) + } + + assets := newAssets() + err = assets.Read(ctx, tempDir, defaultA, t) + if err != nil { + t.Fatal(err) + } + + assert.NotEqual(t, rootHash, assets.Root.Hash) + + assetC, err := assets.GetByHash(ctx, assets.Root.Hash) + if err != nil { + t.Fatal(err) + } + + expected := "edited a, modified by b, modified by c" + if string(assetC.Data) != expected { + t.Fatalf("unexpected new asset C data: %q != %q", string(assetC.Data), expected) + } + + err = assets.Prune() + if err != nil { + t.Fatal(err) + } + + _, err = assets.GetByHash(ctx, rootHash) + if !os.IsNotExist(err) { + t.Fatalf("can retrieve the original root asset by hash (%x) after pruning: %v", rootHash, err) + } + + rootHash = assets.Root.Hash + err = assets.Write(ctx, tempDir, false) + if err != nil { + t.Fatal(err) + } + }) + + t.Run("reread after leaf edit", func(t *testing.T) { + assets := newAssets() + err = assets.Read(ctx, tempDir, defaultA, t) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, rootHash, assets.Root.Hash) + }) + + t.Run("read with branch edit", func(t *testing.T) { + err = ioutil.WriteFile(filepath.Join(tempDir, "b"), []byte("edited b"), 0666) + if err != nil { + t.Fatal(err) + } + + assets := newAssets() + err = assets.Read(ctx, tempDir, defaultA, t) + if err != nil { + t.Fatal(err) + } + + assetC, err := assets.GetByHash(ctx, assets.Root.Hash) + if err != nil { + t.Fatal(err) + } + + expected := "edited b, modified by c" + if string(assetC.Data) != expected { + t.Fatalf("unexpected new asset C data: %q != %q", string(assetC.Data), expected) + } + + assetB, err := assets.GetByHash(ctx, assetC.Parents[0].Hash) + if err != nil { + t.Fatal(err) + } + assert.Len(t, assetB.Parents, 0) + + rootHash = assets.Root.Hash + err = assets.Write(ctx, tempDir, false) + if err != nil { + t.Fatal(err) + } + }) + + t.Run("reread after branch edit", func(t *testing.T) { + assets := newAssets() + err = assets.Read(ctx, tempDir, defaultA, t) + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, rootHash, assets.Root.Hash) + }) +} diff --git a/pkg/assets/rebuilder.go b/pkg/assets/rebuilder.go new file mode 100644 index 00000000000..2a6198226d4 --- /dev/null +++ b/pkg/assets/rebuilder.go @@ -0,0 +1,18 @@ +package assets + +import ( + "context" +) + +// ConstantDataRebuilder returns a Rebuild function which sets the +// data to a constant value. +func ConstantDataRebuilder(ctx context.Context, name string, data []byte, frozen bool) Rebuild { + return func(ctx context.Context, getByName GetByString) (asset *Asset, err error) { + return &Asset{ + Name: name, + Data: data, + Frozen: frozen, + RebuildHelper: ConstantDataRebuilder(ctx, name, data, frozen), + }, nil + } +} diff --git a/pkg/assets/reference.go b/pkg/assets/reference.go new file mode 100644 index 00000000000..c71c5dd0791 --- /dev/null +++ b/pkg/assets/reference.go @@ -0,0 +1,34 @@ +package assets + +import ( + "fmt" +) + +// Reference holds an Asset reference. +type Reference struct { + // Name holds the the name of the referenced asset. This value can + // be used to retrieve the latest value of that asset's state. + Name string `json:"name,omitempty"` + + // Hash holds the hash of the referenced asset. This value pins a + // specific value of that asset's state. + Hash []byte `json:"hash,omitempty"` +} + +type referenceSlice []Reference + +func (ref *Reference) String() string { + return fmt.Sprintf("%q (%x)", ref.Name, ref.Hash) +} + +func (refs referenceSlice) Len() int { + return len(refs) +} + +func (refs referenceSlice) Less(i, j int) bool { + return refs[i].Name < refs[i].Name +} + +func (refs referenceSlice) Swap(i, j int) { + refs[i], refs[j] = refs[j], refs[i] +} diff --git a/pkg/destroy/bootstrap/bootstrap.go b/pkg/destroy/bootstrap/bootstrap.go index c66f3eb7ea7..c2273499e02 100644 --- a/pkg/destroy/bootstrap/bootstrap.go +++ b/pkg/destroy/bootstrap/bootstrap.go @@ -6,15 +6,15 @@ import ( "os" "path/filepath" - "github.com/openshift/installer/pkg/asset/cluster" "github.com/openshift/installer/pkg/terraform" + "github.com/openshift/installer/pkg/types" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // Destroy uses Terraform to remove bootstrap resources. func Destroy(dir string) (err error) { - metadata, err := cluster.LoadMetadata(dir) + metadata, err := types.LoadClusterMetadata(dir) if err != nil { return err } @@ -24,7 +24,7 @@ func Destroy(dir string) (err error) { return errors.New("no platform configured in metadata") } - copyNames := []string{terraform.StateFileName, cluster.TfVarsFileName} + copyNames := []string{terraform.StateFileName, "terraform.tfvars"} if platform == "libvirt" { err = ioutil.WriteFile(filepath.Join(dir, "disable-bootstrap.auto.tfvars"), []byte(`{ diff --git a/pkg/destroy/destroyer.go b/pkg/destroy/destroyer.go index 13b04d727ff..c7ee82fd983 100644 --- a/pkg/destroy/destroyer.go +++ b/pkg/destroy/destroyer.go @@ -4,7 +4,6 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/openshift/installer/pkg/asset/cluster" "github.com/openshift/installer/pkg/types" ) @@ -22,7 +21,7 @@ var Registry = make(map[string]NewFunc) // New returns a Destroyer based on `metadata.json` in `rootDir`. func New(logger logrus.FieldLogger, rootDir string) (Destroyer, error) { - metadata, err := cluster.LoadMetadata(rootDir) + metadata, err := types.LoadClusterMetadata(rootDir) if err != nil { return nil, err } diff --git a/pkg/installerassets/admin.go b/pkg/installerassets/admin.go new file mode 100644 index 00000000000..29144595fce --- /dev/null +++ b/pkg/installerassets/admin.go @@ -0,0 +1,66 @@ +package installerassets + +import ( + "context" + "os" + + "github.com/openshift/installer/pkg/validate" + "github.com/pkg/errors" + survey "gopkg.in/AlecAivazis/survey.v1" +) + +func getAdminEmail(ctx context.Context) ([]byte, error) { + value := os.Getenv("OPENSHIFT_INSTALL_EMAIL_ADDRESS") + if value != "" { + err := validate.Email(value) + if err != nil { + return nil, err + } + return []byte(value), nil + } + + question := &survey.Question{ + Prompt: &survey.Input{ + Message: "Email Address", + Help: "The email address of the cluster administrator. This will be used to log in to the console.", + }, + Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { + return validate.Email(ans.(string)) + }), + } + + var response string + err := survey.Ask([]*survey.Question{question}, &response) + if err != nil { + return nil, errors.Wrap(err, "ask") + } + + return []byte(response), nil +} + +func getAdminPassword(ctx context.Context) ([]byte, error) { + value := os.Getenv("OPENSHIFT_INSTALL_PASSWORD") + if value != "" { + return []byte(value), nil + } + + question := &survey.Question{ + Prompt: &survey.Password{ + Message: "Password", + Help: "The password of the cluster administrator. This will be used to log in to the console.", + }, + } + + var response string + err := survey.Ask([]*survey.Question{question}, &response) + if err != nil { + return nil, errors.Wrap(err, "ask") + } + + return []byte(response), nil +} + +func init() { + Defaults["admin/email"] = getAdminEmail + Defaults["admin/password"] = getAdminPassword +} diff --git a/pkg/installerassets/aws/ami.go b/pkg/installerassets/aws/ami.go new file mode 100644 index 00000000000..f1721ff0fc5 --- /dev/null +++ b/pkg/installerassets/aws/ami.go @@ -0,0 +1,37 @@ +package aws + +import ( + "context" + "time" + + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" + "github.com/openshift/installer/pkg/rhcos" + "github.com/pkg/errors" +) + +func amiRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "aws/ami", + RebuildHelper: amiRebuilder, + } + + parents, err := asset.GetParents(ctx, getByName, "aws/region") + if err != nil { + return nil, err + } + + amiContext, cancel := context.WithTimeout(ctx, 60*time.Second) + defer cancel() + ami, err := rhcos.AMI(amiContext, rhcos.DefaultChannel, string(parents["aws/region"].Data)) + if err != nil { + return nil, errors.Wrap(err, "failed to determine default AMI") + } + + asset.Data = []byte(ami) + return asset, nil +} + +func init() { + installerassets.Rebuilders["aws/ami"] = amiRebuilder +} diff --git a/pkg/installerassets/aws/doc.go b/pkg/installerassets/aws/doc.go new file mode 100644 index 00000000000..59d112f363c --- /dev/null +++ b/pkg/installerassets/aws/doc.go @@ -0,0 +1,2 @@ +// Package aws contains AWS-specific helpers for the asset Merkle DAG. +package aws diff --git a/pkg/installerassets/aws/machines.go b/pkg/installerassets/aws/machines.go new file mode 100644 index 00000000000..14e83ea76dc --- /dev/null +++ b/pkg/installerassets/aws/machines.go @@ -0,0 +1,165 @@ +package aws + +import ( + "context" + "fmt" + "strconv" + + "github.com/ghodss/yaml" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + awsprovider "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsproviderconfig/v1alpha1" + clusterapi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" +) + +func masterMachinesRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "manifests/aws/99_openshift-cluster-api_master-machines.yaml", + RebuildHelper: masterMachinesRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "aws/ami", + "aws/instance-type", + "aws/region", + "aws/user-tags", + "aws/zones", + "cluster-id", + "cluster-name", + "machines/master-count", + ) + if err != nil { + return nil, err + } + + ami := string(parents["aws/ami"].Data) + clusterID := string(parents["cluster-id"].Data) + clusterName := string(parents["cluster-name"].Data) + instanceType := string(parents["aws/instance-type"].Data) + region := string(parents["aws/region"].Data) + var userTags map[string]string + err = yaml.Unmarshal(parents["aws/user-tags"].Data, &userTags) + if err != nil { + return nil, errors.Wrap(err, "unmarshal user tags") + } + + masterCount, err := strconv.ParseUint(string(parents["machines/master-count"].Data), 10, 32) + if err != nil { + return nil, errors.Wrap(err, "parse master count") + } + + var zones []string + err = yaml.Unmarshal(parents["aws/zones"].Data, &zones) + if err != nil { + return nil, errors.Wrap(err, "unmarshal zones") + } + + role := "master" + userDataSecret := fmt.Sprintf("%s-user-data", role) + poolName := role // FIXME: knob to control this? + total := int64(masterCount) + + var machines []runtime.RawExtension + for idx := int64(0); idx < total; idx++ { + zone := zones[int(idx)%len(zones)] + provider, err := provider(clusterID, clusterName, region, zone, instanceType, ami, userTags, role, userDataSecret) + if err != nil { + return nil, errors.Wrap(err, "create provider") + } + + provider.PublicIP = pointer.BoolPtr(true) + provider.LoadBalancers = []awsprovider.LoadBalancerReference{ + { + Name: fmt.Sprintf("%s-ext", clusterName), + Type: awsprovider.NetworkLoadBalancerType, + }, + { + Name: fmt.Sprintf("%s-int", clusterName), + Type: awsprovider.NetworkLoadBalancerType, + }, + } + + machine := clusterapi.Machine{ + TypeMeta: metav1.TypeMeta{ + Kind: "Machine", + APIVersion: "cluster.k8s.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%d", clusterName, poolName, idx), + Namespace: "openshift-cluster-api", + Labels: map[string]string{ + "sigs.k8s.io/cluster-api-cluster": clusterName, + "sigs.k8s.io/cluster-api-machine-role": role, + "sigs.k8s.io/cluster-api-machine-type": role, + }, + }, + Spec: clusterapi.MachineSpec{ + ProviderConfig: clusterapi.ProviderConfig{ + Value: &runtime.RawExtension{Object: provider}, + }, + // we don't need to set Versions, because we control those via operators. + }, + } + + machines = append(machines, runtime.RawExtension{Object: &machine}) + } + + list := &metav1.List{ + TypeMeta: metav1.TypeMeta{ + Kind: "List", + APIVersion: "v1", + }, + Items: machines, + } + + asset.Data, err = yaml.Marshal(list) + if err != nil { + return nil, err + } + + return asset, nil +} + +func provider(clusterID, clusterName, region, zone, instanceType, ami string, userTags map[string]string, role, userDataSecret string) (*awsprovider.AWSMachineProviderConfig, error) { + tags, err := tagsFromUserTags(clusterID, clusterName, userTags) + if err != nil { + return nil, errors.Wrap(err, "failed to create awsprovider.TagSpecifications from UserTags") + } + return &awsprovider.AWSMachineProviderConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "aws.cluster.k8s.io/v1alpha1", + Kind: "AWSMachineProviderConfig", + }, + InstanceType: instanceType, + AMI: awsprovider.AWSResourceReference{ID: &ami}, + Tags: tags, + IAMInstanceProfile: &awsprovider.AWSResourceReference{ID: pointer.StringPtr(fmt.Sprintf("%s-%s-profile", clusterName, role))}, + UserDataSecret: &corev1.LocalObjectReference{Name: userDataSecret}, + Subnet: awsprovider.AWSResourceReference{ + Filters: []awsprovider.Filter{{ + Name: "tag:Name", + Values: []string{fmt.Sprintf("%s-%s-%s", clusterName, role, zone)}, + }}, + }, + Placement: awsprovider.Placement{Region: region, AvailabilityZone: zone}, + SecurityGroups: []awsprovider.AWSResourceReference{{ + Filters: []awsprovider.Filter{{ + Name: "tag:Name", + Values: []string{fmt.Sprintf("%s_%s_sg", clusterName, role)}, + }}, + }}, + }, nil +} + +func init() { + installerassets.Rebuilders["manifests/aws/99_openshift-cluster-api_master-machines.yaml"] = masterMachinesRebuilder + installerassets.Defaults["aws/instance-type"] = installerassets.ConstantDefault([]byte("m4.large")) +} diff --git a/pkg/installerassets/aws/machinesets.go b/pkg/installerassets/aws/machinesets.go new file mode 100644 index 00000000000..e2ad898ac3b --- /dev/null +++ b/pkg/installerassets/aws/machinesets.go @@ -0,0 +1,137 @@ +package aws + +import ( + "context" + "fmt" + + "github.com/ghodss/yaml" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clusterapi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" +) + +func workerMachineSetsRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml", + RebuildHelper: workerMachineSetsRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "aws/ami", + "aws/instance-type", + "aws/region", + "aws/user-tags", + "aws/zones", + "cluster-id", + "cluster-name", + ) + if err != nil { + return nil, err + } + + ami := string(parents["aws/ami"].Data) + clusterID := string(parents["cluster-id"].Data) + clusterName := string(parents["cluster-name"].Data) + instanceType := string(parents["aws/instance-type"].Data) + region := string(parents["aws/region"].Data) + var userTags map[string]string + err = yaml.Unmarshal(parents["aws/user-tags"].Data, &userTags) + if err != nil { + return nil, errors.Wrap(err, "unmarshal user tags") + } + + var zones []string + err = yaml.Unmarshal(parents["aws/zones"].Data, &zones) + if err != nil { + return nil, errors.Wrap(err, "unmarshal zones") + } + numZones := int64(len(zones)) + + role := "worker" + userDataSecret := fmt.Sprintf("%s-user-data", role) + poolName := role // FIXME: knob to control this + total := int64(3) // FIXME: knob to control this + + var machineSets []runtime.RawExtension + for idx, zone := range zones { + name := fmt.Sprintf("%s-%s-%s", clusterName, poolName, zone) + + replicas := int32(total / numZones) + if int64(idx) < total%numZones { + replicas++ + } + + provider, err := provider(clusterID, clusterName, region, zone, instanceType, ami, userTags, role, userDataSecret) + if err != nil { + return nil, errors.Wrap(err, "create provider") + } + + machineSet := clusterapi.MachineSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineSet", + APIVersion: "cluster.k8s.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "openshift-cluster-api", + Labels: map[string]string{ + "sigs.k8s.io/cluster-api-cluster": clusterName, + "sigs.k8s.io/cluster-api-machine-role": role, + "sigs.k8s.io/cluster-api-machine-type": role, + }, + }, + Spec: clusterapi.MachineSetSpec{ + Replicas: &replicas, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "sigs.k8s.io/cluster-api-machineset": name, + "sigs.k8s.io/cluster-api-cluster": clusterName, + }, + }, + Template: clusterapi.MachineTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "sigs.k8s.io/cluster-api-machineset": name, + "sigs.k8s.io/cluster-api-cluster": clusterName, + "sigs.k8s.io/cluster-api-machine-role": role, + "sigs.k8s.io/cluster-api-machine-type": role, + }, + }, + Spec: clusterapi.MachineSpec{ + ProviderConfig: clusterapi.ProviderConfig{ + Value: &runtime.RawExtension{Object: provider}, + }, + // we don't need to set Versions, because we control those via cluster operators. + }, + }, + }, + } + + machineSets = append(machineSets, runtime.RawExtension{Object: &machineSet}) + } + + list := &metav1.List{ + TypeMeta: metav1.TypeMeta{ + Kind: "List", + APIVersion: "v1", + }, + Items: machineSets, + } + + asset.Data, err = yaml.Marshal(list) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + installerassets.Rebuilders["manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml"] = workerMachineSetsRebuilder +} diff --git a/pkg/installerassets/aws/manifests.go b/pkg/installerassets/aws/manifests.go new file mode 100644 index 00000000000..50583083c9f --- /dev/null +++ b/pkg/installerassets/aws/manifests.go @@ -0,0 +1,32 @@ +package aws + +import ( + "context" + + "github.com/aws/aws-sdk-go/aws/session" + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" +) + +func cloudConfigRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + ssn := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + creds, err := ssn.Config.Credentials.Get() + if err != nil { + return nil, err + } + + return installerassets.TemplateRebuilder( + "files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml", + nil, + map[string]interface{}{ + "AccessKeyID": creds.AccessKeyID, + "SecretAccessKey": creds.SecretAccessKey, + }, + )(ctx, getByName) +} + +func init() { + installerassets.Rebuilders["files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml"] = cloudConfigRebuilder +} diff --git a/pkg/installerassets/aws/metadata.go b/pkg/installerassets/aws/metadata.go new file mode 100644 index 00000000000..6890efe1202 --- /dev/null +++ b/pkg/installerassets/aws/metadata.go @@ -0,0 +1,54 @@ +package aws + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" + "github.com/openshift/installer/pkg/types/aws" +) + +func metadataRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "aws/metadata.json", + RebuildHelper: metadataRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "aws/region", + "cluster-id", + "cluster-name", + ) + if err != nil { + return nil, err + } + + region := string(parents["aws/region"].Data) + clusterID := string(parents["cluster-id"].Data) + clusterName := string(parents["cluster-name"].Data) + + metadata := &aws.Metadata{ + Region: region, + Identifier: []map[string]string{ + { + "tectonicClusterID": clusterID, + fmt.Sprintf("kubernetes.io/cluster/%s", clusterName): "owned", + }, + }, + } + + asset.Data, err = json.Marshal(metadata) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + installerassets.Rebuilders["aws/metadata.json"] = metadataRebuilder +} diff --git a/pkg/installerassets/aws/network.go b/pkg/installerassets/aws/network.go new file mode 100644 index 00000000000..b7a60cdc4e6 --- /dev/null +++ b/pkg/installerassets/aws/network.go @@ -0,0 +1,9 @@ +package aws + +import ( + "github.com/openshift/installer/pkg/installerassets" +) + +func init() { + installerassets.Defaults["aws/external-vpc-id"] = installerassets.ConstantDefault(nil) +} diff --git a/pkg/installerassets/aws/region.go b/pkg/installerassets/aws/region.go new file mode 100644 index 00000000000..bcada1d60d1 --- /dev/null +++ b/pkg/installerassets/aws/region.go @@ -0,0 +1,89 @@ +package aws + +import ( + "context" + "fmt" + "os" + "sort" + "strings" + + "github.com/openshift/installer/pkg/installerassets" + "github.com/pkg/errors" + survey "gopkg.in/AlecAivazis/survey.v1" +) + +var ( + validAWSRegions = map[string]string{ + "ap-northeast-1": "Tokyo", + "ap-northeast-2": "Seoul", + "ap-northeast-3": "Osaka-Local", + "ap-south-1": "Mumbai", + "ap-southeast-1": "Singapore", + "ap-southeast-2": "Sydney", + "ca-central-1": "Central", + "cn-north-1": "Beijing", + "cn-northwest-1": "Ningxia", + "eu-central-1": "Frankfurt", + "eu-west-1": "Ireland", + "eu-west-2": "London", + "eu-west-3": "Paris", + "sa-east-1": "São Paulo", + "us-east-1": "N. Virginia", + "us-east-2": "Ohio", + "us-west-1": "N. California", + "us-west-2": "Oregon", + } +) + +func getRegion(ctx context.Context) ([]byte, error) { + longRegions := make([]string, 0, len(validAWSRegions)) + shortRegions := make([]string, 0, len(validAWSRegions)) + for id, location := range validAWSRegions { + longRegions = append(longRegions, fmt.Sprintf("%s (%s)", id, location)) + shortRegions = append(shortRegions, id) + } + regionTransform := survey.TransformString(func(s string) string { + return strings.SplitN(s, " ", 2)[0] + }) + sort.Strings(longRegions) + sort.Strings(shortRegions) + + value := os.Getenv("OPENSHIFT_INSTALL_AWS_REGION") + if value != "" { + i := sort.SearchStrings(shortRegions, value) + if i == len(shortRegions) || shortRegions[i] != value { + return nil, errors.Errorf("invalid region %q", value) + } + return []byte(value), nil + } + + question := &survey.Question{ + Prompt: &survey.Select{ + Message: "Region", + Help: "The AWS region to be used for installation.", + Default: "us-east-1 (N. Virginia)", + Options: longRegions, + }, + Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { + choice := regionTransform(ans).(string) + i := sort.SearchStrings(shortRegions, choice) + if i == len(shortRegions) || shortRegions[i] != choice { + return errors.Errorf("invalid region %q", choice) + } + return nil + }), + Transform: regionTransform, + } + + var response string + err := survey.Ask([]*survey.Question{question}, &response) + if err != nil { + return nil, errors.Wrap(err, "ask") + } + + return []byte(response), nil +} + +func init() { + installerassets.Defaults["aws/region"] = getRegion +} diff --git a/pkg/installerassets/aws/tags.go b/pkg/installerassets/aws/tags.go new file mode 100644 index 00000000000..d3cdb9ecaa8 --- /dev/null +++ b/pkg/installerassets/aws/tags.go @@ -0,0 +1,46 @@ +package aws + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/ghodss/yaml" + "github.com/openshift/installer/pkg/installerassets" + "github.com/pkg/errors" + awsprovider "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsproviderconfig/v1alpha1" +) + +func getUserTags(ctx context.Context) (data []byte, err error) { + userTags := map[string]string{} + if value, ok := os.LookupEnv("_CI_ONLY_STAY_AWAY_OPENSHIFT_INSTALL_AWS_USER_TAGS"); ok { + if err := json.Unmarshal([]byte(value), &userTags); err != nil { + return nil, errors.Wrapf(err, "_CI_ONLY_STAY_AWAY_OPENSHIFT_INSTALL_AWS_USER_TAGS contains invalid JSON: %s", value) + } + } + + return yaml.Marshal(userTags) +} + +func tagsFromUserTags(clusterID, clusterName string, userTags map[string]string) ([]awsprovider.TagSpecification, error) { + tags := []awsprovider.TagSpecification{ + {Name: "tectonicClusterID", Value: clusterID}, + {Name: fmt.Sprintf("kubernetes.io/cluster/%s", clusterName), Value: "owned"}, + } + forbiddenTags := map[string]bool{} + for _, tag := range tags { + forbiddenTags[tag.Name] = true + } + for key, value := range userTags { + if forbiddenTags[key] { + return nil, errors.Errorf("user tags may not clobber %s", key) + } + tags = append(tags, awsprovider.TagSpecification{Name: key, Value: value}) + } + return tags, nil +} + +func init() { + installerassets.Defaults["aws/user-tags"] = getUserTags +} diff --git a/pkg/installerassets/aws/terraform.go b/pkg/installerassets/aws/terraform.go new file mode 100644 index 00000000000..c4ce2f44ddb --- /dev/null +++ b/pkg/installerassets/aws/terraform.go @@ -0,0 +1,78 @@ +package aws + +import ( + "context" + "encoding/json" + + "github.com/ghodss/yaml" + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" + "github.com/pkg/errors" +) + +type endpoints string + +const ( + endpointsAll endpoints = "all" + endpointsPrivate endpoints = "private" + endpointsPublic endpoints = "public" +) + +type terraformConfig struct { + AMI string `json:"tectonic_aws_ec2_ami_override,omitempty"` + Endpoints endpoints `json:"tectonic_aws_endpoints,omitempty"` + ExternalVPCID string `json:"tectonic_aws_external_vpc_id,omitempty"` + InstallerRole string `json:"tectonic_aws_installer_role,omitempty"` + MasterInstanceType string `json:"tectonic_aws_master_ec2_type,omitempty"` + Region string `json:"tectonic_aws_region,omitempty"` + UserTags map[string]string `json:"tectonic_aws_extra_tags,omitempty"` + VPCCIDR string `json:"tectonic_aws_vpc_cidr_block,omitempty"` +} + +func terraformRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "terraform/aws-terraform.auto.tfvars", + RebuildHelper: terraformRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "aws/ami", + "aws/external-vpc-id", + "aws/instance-type", + "aws/region", + "aws/user-tags", + "network/node-cidr", + ) + if err != nil { + return nil, err + } + + var userTags map[string]string + err = yaml.Unmarshal(parents["aws/user-tags"].Data, &userTags) + if err != nil { + return nil, errors.Wrap(err, "parse user tags") + } + + config := &terraformConfig{ + AMI: string(parents["aws/ami"].Data), + Endpoints: endpointsAll, + ExternalVPCID: string(parents["aws/external-vpc-id"].Data), + MasterInstanceType: string(parents["aws/instance-type"].Data), + Region: string(parents["aws/region"].Data), + UserTags: userTags, + VPCCIDR: string(parents["network/node-cidr"].Data), + } + + asset.Data, err = json.Marshal(config) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + installerassets.Rebuilders["terraform/aws-terraform.auto.tfvars"] = terraformRebuilder +} diff --git a/pkg/installerassets/aws/zones.go b/pkg/installerassets/aws/zones.go new file mode 100644 index 00000000000..8efd818be8b --- /dev/null +++ b/pkg/installerassets/aws/zones.go @@ -0,0 +1,61 @@ +package aws + +import ( + "context" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/ghodss/yaml" + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" + "github.com/pkg/errors" +) + +func zoneRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "aws/zones", + RebuildHelper: zoneRebuilder, + } + + parents, err := asset.GetParents(ctx, getByName, "aws/region") + if err != nil { + return nil, err + } + + region := aws.String(string(parents["aws/region"].Data)) + ssn := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + Config: aws.Config{ + Region: region, + }, + })) + + resp, err := ec2.New(ssn).DescribeAvailabilityZones(&ec2.DescribeAvailabilityZonesInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("region-name"), + Values: []*string{region}, + }, + }, + }) + if err != nil { + return nil, errors.Wrap(err, "describe availability zones") + } + + zones := []string{} + for _, zone := range resp.AvailabilityZones { + zones = append(zones, *zone.ZoneName) + } + + asset.Data, err = yaml.Marshal(zones) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + installerassets.Rebuilders["aws/zones"] = zoneRebuilder +} diff --git a/pkg/installerassets/basedomain.go b/pkg/installerassets/basedomain.go new file mode 100644 index 00000000000..8013a8814fb --- /dev/null +++ b/pkg/installerassets/basedomain.go @@ -0,0 +1,43 @@ +package installerassets + +import ( + "context" + "os" + + "github.com/openshift/installer/pkg/validate" + "github.com/pkg/errors" + survey "gopkg.in/AlecAivazis/survey.v1" +) + +func getBaseDomain(ctx context.Context) ([]byte, error) { + value := os.Getenv("OPENSHIFT_INSTALL_BASE_DOMAIN") + if value != "" { + err := validate.DomainName(value) + if err != nil { + return nil, err + } + return []byte(value), nil + } + + question := &survey.Question{ + Prompt: &survey.Input{ + Message: "Base Domain", + Help: "The base domain of the cluster. All DNS records will be sub-domains of this base.\n\nFor AWS, this must be a previously-existing public Route 53 zone. You can check for any already in your account with:\n\n $ aws route53 list-hosted-zones --query 'HostedZones[? !(Config.PrivateZone)].Name' --output text", + }, + Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { + return validate.DomainName(ans.(string)) + }), + } + + var response string + err := survey.Ask([]*survey.Question{question}, &response) + if err != nil { + return nil, errors.Wrap(err, "ask") + } + + return []byte(response), nil +} + +func init() { + Defaults["base-domain"] = getBaseDomain +} diff --git a/pkg/installerassets/cluster.go b/pkg/installerassets/cluster.go new file mode 100644 index 00000000000..de05cee0d4a --- /dev/null +++ b/pkg/installerassets/cluster.go @@ -0,0 +1,41 @@ +package installerassets + +import ( + "context" + "fmt" + "os" + + "github.com/openshift/installer/pkg/assets" +) + +func clusterRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "cluster", + RebuildHelper: clusterRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "platform", + // unused in this rebuilder, but we want these before launching the cluster + "metadata.json", + "terraform/terraform.tfvars", + ) + if err != nil { + return nil, err + } + + platform := string(parents["platform"].Data) + perPlatformName := fmt.Sprintf("terraform/%s-terraform.auto.tfvars", platform) + parents, err = asset.GetParents(ctx, getByName, perPlatformName) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + + return asset, nil +} + +func init() { + Rebuilders["cluster"] = clusterRebuilder +} diff --git a/pkg/installerassets/clusterapi.go b/pkg/installerassets/clusterapi.go new file mode 100644 index 00000000000..3c77861e0a9 --- /dev/null +++ b/pkg/installerassets/clusterapi.go @@ -0,0 +1,72 @@ +package installerassets + +import ( + "context" + + "github.com/ghodss/yaml" + netopv1 "github.com/openshift/cluster-network-operator/pkg/apis/networkoperator/v1" + "github.com/openshift/installer/pkg/assets" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1a1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" +) + +func clusterAPIClusterRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "manifests/99_openshift-cluster-api_cluster.yaml", + RebuildHelper: clusterAPIClusterRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "cluster-name", + "manifests/cluster-network-02-config.yaml", + "network/service-cidr", + ) + if err != nil { + return nil, err + } + + var netConfig *netopv1.NetworkConfig + err = yaml.Unmarshal(parents["manifests/cluster-network-02-config.yaml"].Data, &netConfig) + if err != nil { + return nil, errors.Wrap(err, "unmarshal network config") + } + + pods := []string{} + for _, clusterNetwork := range netConfig.Spec.ClusterNetworks { + pods = append(pods, clusterNetwork.CIDR) + } + + cluster := clusterv1a1.Cluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "cluster.k8s.io/v1alpha1", + Kind: "Cluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: string(parents["cluster-name"].Data), + Namespace: "openshift-cluster-api", + }, + Spec: clusterv1a1.ClusterSpec{ + ClusterNetwork: clusterv1a1.ClusterNetworkingConfig{ + Services: clusterv1a1.NetworkRanges{ + CIDRBlocks: []string{string(parents["network/service-cidr"].Data)}, + }, + Pods: clusterv1a1.NetworkRanges{ + CIDRBlocks: pods, + }, + }, + }, + } + + asset.Data, err = yaml.Marshal(cluster) + if err != nil { + return nil, err + } + return asset, nil +} + +func init() { + Rebuilders["manifests/99_openshift-cluster-api_cluster.yaml"] = clusterAPIClusterRebuilder +} diff --git a/pkg/installerassets/clusterconfig.go b/pkg/installerassets/clusterconfig.go new file mode 100644 index 00000000000..ace23dc27f7 --- /dev/null +++ b/pkg/installerassets/clusterconfig.go @@ -0,0 +1,126 @@ +package installerassets + +import ( + "context" + "net" + "strconv" + + "github.com/ghodss/yaml" + netopv1 "github.com/openshift/cluster-network-operator/pkg/apis/networkoperator/v1" + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/ipnet" + "github.com/openshift/installer/pkg/types" + "github.com/openshift/installer/pkg/types/aws" + "github.com/openshift/installer/pkg/types/libvirt" + "github.com/openshift/installer/pkg/types/openstack" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func clusterConfigRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "manifests/cluster-config.yaml", + RebuildHelper: clusterConfigRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "admin/email", + "admin/password", + "base-domain", + "cluster-id", + "cluster-name", + "network/cluster-cidr", + "network/host-subnet-length", + "network/service-cidr", + "platform", + "pull-secret", + "ssh.pub", + ) + if err != nil { + return nil, err + } + + _, serviceCIDR, err := net.ParseCIDR(string(parents["network/service-cidr"].Data)) + if err != nil { + return nil, errors.Wrap(err, "parse service CIDR") + } + + hostSubnetLength, err := strconv.ParseUint(string(parents["network/host-subnet-length"].Data), 10, 32) + if err != nil { + return nil, errors.Wrap(err, "parse host subnet length") + } + + config := &types.InstallConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: string(parents["cluster-name"].Data), + }, + ClusterID: string(parents["cluster-id"].Data), + Admin: types.Admin{ + Email: string(parents["admin/email"].Data), + Password: string(parents["admin/password"].Data), + SSHKey: string(parents["ssh.pub"].Data), + }, + BaseDomain: string(parents["base-domain"].Data), + Networking: types.Networking{ + Type: "OpenshiftSDN", + ServiceCIDR: ipnet.IPNet{ + IPNet: *serviceCIDR, + }, + ClusterNetworks: []netopv1.ClusterNetwork{ + { + CIDR: string(parents["network/cluster-cidr"].Data), + HostSubnetLength: uint32(hostSubnetLength), + }, + }, + }, + } + + // support the machine-config operator: + // Dec 01 00:38:21 wking-bootstrap bootkube.sh[5569]: panic: invalid platform + // Dec 01 00:38:21 wking-bootstrap bootkube.sh[5569]: goroutine 1 [running]: + // Dec 01 00:38:21 wking-bootstrap bootkube.sh[5569]: github.com/openshift/machine-config-operator/pkg/operator.platformFromInstallConfig(0x0, 0x0, 0x0, 0x0, 0xc4203d34b0, 0x5, 0x0, 0x0, 0x0, 0x0, ...) + platform := string(parents["platform"].Data) + switch platform { + case "aws": + config.Platform.AWS = &aws.Platform{} + case "libvirt": + config.Platform.Libvirt = &libvirt.Platform{} + case "openstack": + config.Platform.OpenStack = &openstack.Platform{} + default: + return nil, errors.Errorf("unrecognized platform %q", platform) + } + + data, err := yaml.Marshal(config) + if err != nil { + return nil, err + } + + configMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-config-v1", + Namespace: metav1.NamespaceSystem, + }, + Data: map[string]string{ + "install-config": string(data), + }, + } + + asset.Data, err = yaml.Marshal(configMap) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + Rebuilders["manifests/cluster-config.yaml"] = clusterConfigRebuilder +} diff --git a/pkg/installerassets/clusterid.go b/pkg/installerassets/clusterid.go new file mode 100644 index 00000000000..fa01ed0e822 --- /dev/null +++ b/pkg/installerassets/clusterid.go @@ -0,0 +1,15 @@ +package installerassets + +import ( + "context" + + "github.com/pborman/uuid" +) + +func getUUID(ctx context.Context) (data []byte, err error) { + return []byte(uuid.New()), nil +} + +func init() { + Defaults["cluster-id"] = getUUID +} diff --git a/pkg/installerassets/clustername.go b/pkg/installerassets/clustername.go new file mode 100644 index 00000000000..410b3c31f40 --- /dev/null +++ b/pkg/installerassets/clustername.go @@ -0,0 +1,43 @@ +package installerassets + +import ( + "context" + "os" + + "github.com/openshift/installer/pkg/validate" + "github.com/pkg/errors" + survey "gopkg.in/AlecAivazis/survey.v1" +) + +func getClusterName(ctx context.Context) ([]byte, error) { + value := os.Getenv("OPENSHIFT_INSTALL_CLUSTER_NAME") + if value != "" { + err := validate.DomainName(value) + if err != nil { + return nil, err + } + return []byte(value), nil + } + + question := &survey.Question{ + Prompt: &survey.Input{ + Message: "Cluster Name", + Help: "The name of the cluster. This will be used when generating sub-domains.", + }, + Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { + return validate.DomainName(ans.(string)) + }), + } + + var response string + err := survey.Ask([]*survey.Question{question}, &response) + if err != nil { + return nil, errors.Wrap(err, "ask") + } + + return []byte(response), nil +} + +func init() { + Defaults["cluster-name"] = getClusterName +} diff --git a/pkg/installerassets/ignition.go b/pkg/installerassets/ignition.go new file mode 100644 index 00000000000..c09d5f0fd70 --- /dev/null +++ b/pkg/installerassets/ignition.go @@ -0,0 +1,310 @@ +package installerassets + +import ( + "context" + "encoding/json" + "fmt" + "os" + "path" + + "github.com/coreos/ignition/config/v2_2/types" + "github.com/ghodss/yaml" + "github.com/openshift/installer/pkg/assets" + "github.com/vincent-petithory/dataurl" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// fileFromBytes creates an ignition-config file with the given +// contents. +func fileFromBytes(path string, mode int, contents []byte) types.File { + return types.File{ + Node: types.Node{ + Filesystem: "root", + Path: path, + }, + FileEmbedded1: types.FileEmbedded1{ + Mode: &mode, + Contents: types.FileContents{ + Source: dataurl.EncodeBytes(contents), + }, + }, + } +} + +func bootstrapIgnRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "ignition/bootstrap.ign", + RebuildHelper: bootstrapIgnRebuilder, + } + + config := &types.Config{ + Ignition: types.Ignition{ + Version: types.MaxVersion.String(), + }, + } + + parents, err := asset.GetParents(ctx, getByName, "ssh.pub") + if err == nil { + config.Passwd.Users = append( + config.Passwd.Users, + types.PasswdUser{ + Name: "core", + SSHAuthorizedKeys: []types.SSHAuthorizedKey{ + types.SSHAuthorizedKey(parents["ssh.pub"].Data), + }, + }, + ) + } else if !os.IsNotExist(err) { + return nil, err + } + + for _, entry := range []struct { + name string + enabled bool + }{ + {name: "systemd/units/bootkube.service"}, + {name: "systemd/units/tectonic.service"}, + {name: "systemd/units/progress.service", enabled: true}, + {name: "systemd/units/kubelet.service", enabled: true}, + } { + parents, err := asset.GetParents(ctx, getByName, entry.name) + if err != nil { + return nil, err + } + + unit := types.Unit{ + Name: path.Base(parents[entry.name].Name), + Contents: string(parents[entry.name].Data), + } + if entry.enabled { + unit.Enabled = &entry.enabled + } + + config.Systemd.Units = append(config.Systemd.Units, unit) + } + + parents, err = asset.GetParents(ctx, getByName, "platform") + if err != nil { + return nil, err + } + platform := string(parents["platform"].Data) + + for _, entry := range []struct { + path string + name string + mode int + append bool + user string + group string + platform string + }{ + {path: "/etc/kubernetes/kubeconfig", name: "auth/kubeconfig-kubelet", mode: 0600}, + {path: "/etc/motd", name: "files/etc/motd", mode: 0644, append: true}, + {path: "/etc/ssl/etcd/ca.crt", name: "tls/etcd-client.crt", mode: 0600}, + {path: "/home/core/.bash_history", name: "files/home/core/.bash_history", mode: 0600, user: "core", group: "core"}, + {path: "/opt/tectonic/bootkube-config-overrides/kube-apiserver-config-overrides.yaml", name: "files/opt/tectonic/bootkube-config-overrides/kube-apiserver-config-overrides.yaml", mode: 0600}, + {path: "/opt/tectonic/bootkube-config-overrides/kube-controller-manager-config-overrides.yaml", name: "files/opt/tectonic/bootkube-config-overrides/kube-controller-manager-config-overrides.yaml", mode: 0600}, + {path: "/opt/tectonic/bootkube-config-overrides/kube-scheduler-config-overrides.yaml", name: "files/opt/tectonic/bootkube-config-overrides/kube-scheduler-config-overrides.yaml", mode: 0600}, + {path: "/opt/tectonic/auth/kubeconfig", name: "auth/kubeconfig-admin", mode: 0600}, + {path: "/opt/tectonic/manifests/03-openshift-web-console-namespace.yaml", name: "files/opt/tectonic/manifests/03-openshift-web-console-namespace.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/04-openshift-machine-config-operator.yaml", name: "files/opt/tectonic/manifests/04-openshift-machine-config-operator.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/05-openshift-cluster-api-namespace.yaml", name: "files/opt/tectonic/manifests/05-openshift-cluster-api-namespace.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/09-openshift-service-cert-signer-namespace.yaml", name: "files/opt/tectonic/manifests/09-openshift-service-cert-signer-namespace.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/cluster-config.yaml", name: "manifests/cluster-config.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/cluster-ingress-01-crd.yaml", name: "files/opt/tectonic/manifests/cluster-ingress-01-crd.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/cluster-ingress-02-config.yaml", name: "manifests/cluster-ingress-02-config.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/cluster-network-01-crd.yaml", name: "files/opt/tectonic/manifests/cluster-network-01-crd.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/cluster-network-02-config.yaml", name: "manifests/cluster-network-02-config.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/cvo-overrides.yaml", name: "files/opt/tectonic/manifests/cvo-overrides.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/etcd-service.yaml", name: "files/opt/tectonic/manifests/etcd-service.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/host-etcd-service.yaml", name: "files/opt/tectonic/manifests/host-etcd-service.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/host-etcd-service-endpoints.yaml", name: "files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/legacy-cvo-overrides.yaml", name: "files/opt/tectonic/manifests/legacy-cvo-overrides.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/kube-cloud-config.yaml", name: "files/opt/tectonic/manifests/kube-cloud-config.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml", name: "files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml", name: "files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml", name: "files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/machine-config-server-tls-secret.yaml", name: "files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml", name: "files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml", mode: 0600}, + // FIXME: dup? {path: "/opt/tectonic/manifests/openshift-apiserver-secret.yaml", name: "manifests/openshift-apiserver-secret.yaml", mode: 0600}, + {path: "/opt/tectonic/manifests/pull.json", name: "manifests/pull.json", mode: 0600}, + {path: "/opt/tectonic/tectonic/99_binding-discovery.yaml", name: "files/opt/tectonic/tectonic/99_binding-discovery.yaml", mode: 0600}, + {path: "/opt/tectonic/tectonic/99_cloud-creds-secret.yaml", name: "files/opt/tectonic/tectonic/aws/99_cloud-creds-secret.yaml", mode: 0600, platform: "aws"}, + {path: "/opt/tectonic/tectonic/99_cloud-creds-secret.yaml", name: "files/opt/tectonic/tectonic/openstack/99_cloud-creds-secret.yaml", mode: 0600, platform: "openstack"}, + {path: "/opt/tectonic/tectonic/99_openshift-cluster-api_cluster.yaml", name: "manifests/99_openshift-cluster-api_cluster.yaml", mode: 0600}, + {path: "/opt/tectonic/tectonic/99_openshift-cluster-api_master-machines.yaml", name: "manifests/aws/99_openshift-cluster-api_master-machines.yaml", mode: 0600, platform: "aws"}, + {path: "/opt/tectonic/tectonic/99_openshift-cluster-api_master-machines.yaml", name: "manifests/libvirt/99_openshift-cluster-api_master-machines.yaml", mode: 0600, platform: "libvirt"}, + {path: "/opt/tectonic/tectonic/99_openshift-cluster-api_master-machines.yaml", name: "manifests/openstack/99_openshift-cluster-api_master-machines.yaml", mode: 0600, platform: "openstack"}, // FIXME + {path: "/opt/tectonic/tectonic/99_openshift-cluster-api_master-user-data-secret.yaml", name: "manifests/99_openshift-cluster-api_master-user-data-secret.yaml"}, + {path: "/opt/tectonic/tectonic/99_openshift-cluster-api_worker-machinesets.yaml", name: "manifests/aws/99_openshift-cluster-api_worker-machinesets.yaml", mode: 0600, platform: "aws"}, + {path: "/opt/tectonic/tectonic/99_openshift-cluster-api_worker-machinesets.yaml", name: "manifests/libvirt/99_openshift-cluster-api_worker-machinesets.yaml", mode: 0600, platform: "libvirt"}, + {path: "/opt/tectonic/tectonic/99_openshift-cluster-api_worker-machinesets.yaml", name: "manifests/openstack/99_openshift-cluster-api_worker-machinesets.yaml", mode: 0600, platform: "openstack"}, // FIXME + {path: "/opt/tectonic/tectonic/99_openshift-cluster-api_worker-user-data-secret.yaml", name: "manifests/99_openshift-cluster-api_worker-user-data-secret.yaml"}, + {path: "/opt/tectonic/tectonic/99_role-cloud-creds-secret-reader.yaml", name: "files/opt/tectonic/tectonic/aws/99_role-cloud-creds-secret-reader.yaml", mode: 0600, platform: "aws"}, + {path: "/opt/tectonic/tectonic/99_role-cloud-creds-secret-reader.yaml", name: "files/opt/tectonic/tectonic/openstack/99_role-cloud-creds-secret-reader.yaml", mode: 0600, platform: "openstack"}, + {path: "/opt/tectonic/tls/admin.crt", name: "tls/admin-client.crt", mode: 0600}, + {path: "/opt/tectonic/tls/admin.key", name: "tls/admin-client.key", mode: 0600}, + {path: "/opt/tectonic/tls/aggregator-ca.crt", name: "tls/aggregator-ca.crt", mode: 0600}, + {path: "/opt/tectonic/tls/aggregator-ca.key", name: "tls/aggregator-ca.key", mode: 0600}, + {path: "/opt/tectonic/tls/apiserver.crt", name: "tls/api-server-chain.crt", mode: 0600}, + {path: "/opt/tectonic/tls/apiserver.key", name: "tls/api-server.key", mode: 0600}, + {path: "/opt/tectonic/tls/apiserver-proxy.crt", name: "tls/api-server-proxy.crt", mode: 0600}, + {path: "/opt/tectonic/tls/apiserver-proxy.key", name: "tls/api-server-proxy.key", mode: 0600}, + {path: "/opt/tectonic/tls/etcd-client-ca.crt", name: "tls/etcd-ca.crt", mode: 0600}, + {path: "/opt/tectonic/tls/etcd-client-ca.key", name: "tls/etcd-ca.key", mode: 0600}, + {path: "/opt/tectonic/tls/etcd-client.crt", name: "tls/etcd-client.crt", mode: 0600}, + {path: "/opt/tectonic/tls/etcd-client.key", name: "tls/etcd-client.key", mode: 0600}, + {path: "/opt/tectonic/tls/kube-ca.crt", name: "tls/kube-ca.crt", mode: 0600}, + {path: "/opt/tectonic/tls/kube-ca.key", name: "tls/kube-ca.key", mode: 0600}, + {path: "/opt/tectonic/tls/kubelet.crt", name: "tls/kubelet-client.crt", mode: 0600}, + {path: "/opt/tectonic/tls/kubelet.key", name: "tls/kubelet-client.key", mode: 0600}, + {path: "/opt/tectonic/tls/machine-config-server.crt", name: "tls/machine-config-server.crt", mode: 0600}, + {path: "/opt/tectonic/tls/machine-config-server.key", name: "tls/machine-config-server.key", mode: 0600}, + {path: "/opt/tectonic/tls/root-ca.crt", name: "tls/root-ca.crt", mode: 0600}, + {path: "/opt/tectonic/tls/root-ca.key", name: "tls/root-ca.key", mode: 0600}, + {path: "/opt/tectonic/tls/service-account.key", name: "tls/service-account.key", mode: 0600}, + //{path: "/opt/tectonic/tls/service-account.pub", name: "", mode: 0600}, FIXME: do we need this? + {path: "/opt/tectonic/tls/service-serving-ca.crt", name: "tls/service-serving-ca.crt", mode: 0600}, + {path: "/opt/tectonic/tls/service-serving-ca.key", name: "tls/service-serving-ca.key", mode: 0600}, + {path: "/usr/local/bin/bootkube.sh", name: "files/usr/local/bin/bootkube.sh", mode: 0555}, + {path: "/usr/local/bin/report-progress.sh", name: "files/usr/local/bin/report-progress.sh", mode: 0555}, + {path: "/usr/local/bin/tectonic.sh", name: "files/usr/local/bin/tectonic.sh", mode: 0555}, + {path: "/var/lib/kubelet/kubeconfig", name: "auth/kubeconfig-kubelet", mode: 0600}, + } { + if entry.platform != "" && entry.platform != platform { + continue + } + + parents, err = asset.GetParents(ctx, getByName, entry.name) + if err != nil { + return nil, err + } + + file := fileFromBytes(entry.path, entry.mode, parents[entry.name].Data) + file.Append = entry.append + if entry.user != "" { + file.User = &types.NodeUser{Name: entry.user} + } + if entry.group != "" { + file.Group = &types.NodeGroup{Name: entry.group} + } + config.Storage.Files = append(config.Storage.Files, file) + } + + asset.Data, err = json.Marshal(config) + if err != nil { + return nil, err + } + + return asset, nil +} + +func pointerIgnitionRebuilder(role string) assets.Rebuild { + return func(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: fmt.Sprintf("ignition/%s.ign", role), + RebuildHelper: pointerIgnitionRebuilder(role), + } + + parents, err := asset.GetParents( + ctx, + getByName, + "base-domain", + "cluster-name", + "tls/root-ca.crt", + ) + if err != nil { + return nil, err + } + config := &types.Config{ + Ignition: types.Ignition{ + Version: types.MaxVersion.String(), + Config: types.IgnitionConfig{ + Append: []types.ConfigReference{{ + Source: fmt.Sprintf("https://%s-api.%s:49500/config/%s", string(parents["cluster-name"].Data), string(parents["base-domain"].Data), role), + }}, + }, + Security: types.Security{ + TLS: types.TLS{ + CertificateAuthorities: []types.CaReference{{ + Source: dataurl.EncodeBytes(parents["tls/root-ca.crt"].Data), + }}, + }, + }, + }, + } + + // XXX: Remove this once MCO supports injecting SSH keys. + parents, err = asset.GetParents(ctx, getByName, "ssh.pub") + if err == nil { + config.Passwd.Users = append( + config.Passwd.Users, + types.PasswdUser{ + Name: "core", + SSHAuthorizedKeys: []types.SSHAuthorizedKey{ + types.SSHAuthorizedKey(parents["ssh.pub"].Data), + }, + }, + ) + } else if !os.IsNotExist(err) { + return nil, err + } + + asset.Data, err = json.Marshal(config) + if err != nil { + return nil, err + } + + return asset, nil + } +} + +func pointerIgnitionUserDataRebuilder(role string) assets.Rebuild { + return func(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: fmt.Sprintf("manifests/99_openshift-cluster-api_%s-user-data-secret.yaml", role), + RebuildHelper: pointerIgnitionUserDataRebuilder(role), + } + + parentName := fmt.Sprintf("ignition/%s.ign", role) + parents, err := asset.GetParents( + ctx, + getByName, + parentName, + ) + if err != nil { + return nil, err + } + + secret := corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-user-data", role), + Namespace: "openshift-cluster-api", + }, + Data: map[string][]byte{ + "userData": parents[parentName].Data, + }, + Type: corev1.SecretTypeOpaque, + } + + asset.Data, err = yaml.Marshal(secret) + if err != nil { + return nil, err + } + + return asset, nil + } +} + +func init() { + Rebuilders["ignition/bootstrap.ign"] = bootstrapIgnRebuilder + Rebuilders["ignition/master.ign"] = pointerIgnitionRebuilder("master") + Rebuilders["ignition/worker.ign"] = pointerIgnitionRebuilder("worker") + Rebuilders["manifests/99_openshift-cluster-api_master-user-data-secret.yaml"] = pointerIgnitionUserDataRebuilder("master") + Rebuilders["manifests/99_openshift-cluster-api_worker-user-data-secret.yaml"] = pointerIgnitionUserDataRebuilder("worker") +} diff --git a/pkg/installerassets/images.go b/pkg/installerassets/images.go new file mode 100644 index 00000000000..3ef15b2bc09 --- /dev/null +++ b/pkg/installerassets/images.go @@ -0,0 +1,8 @@ +package installerassets + +func init() { + Defaults["image/bootkube"] = ConstantDefault([]byte("quay.io/coreos/bootkube:v0.14.0")) + Defaults["image/etcd-cert-signer"] = ConstantDefault([]byte("quay.io/coreos/kube-etcd-signer-server:678cc8e6841e2121ebfdb6e2db568fce290b67d6")) + Defaults["image/etcdctl"] = ConstantDefault([]byte("quay.io/coreos/etcd:v3.2.14")) + Defaults["image/release"] = ConstantDefault([]byte("registry.svc.ci.openshift.org/openshift/origin-release:v4.0")) +} diff --git a/pkg/installerassets/ingress.go b/pkg/installerassets/ingress.go new file mode 100644 index 00000000000..4e7e7e84776 --- /dev/null +++ b/pkg/installerassets/ingress.go @@ -0,0 +1,51 @@ +package installerassets + +import ( + "context" + "fmt" + + "github.com/ghodss/yaml" + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/installer/pkg/assets" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func ingressConfigRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "manifests/cluster-ingress-02-config.yaml", + RebuildHelper: ingressConfigRebuilder, + } + + parents, err := asset.GetParents(ctx, getByName, "base-domain", "cluster-name") + if err != nil { + return nil, err + } + + baseDomain := string(parents["base-domain"].Data) + clusterName := string(parents["cluster-name"].Data) + + config := &configv1.Ingress{ + TypeMeta: metav1.TypeMeta{ + APIVersion: configv1.SchemeGroupVersion.String(), + Kind: "Ingress", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + // not namespaced + }, + Spec: configv1.IngressSpec{ + Domain: fmt.Sprintf("apps.%s.%s", clusterName, baseDomain), + }, + } + + asset.Data, err = yaml.Marshal(config) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + Rebuilders["manifests/cluster-ingress-02-config.yaml"] = ingressConfigRebuilder +} diff --git a/pkg/installerassets/installerassets.go b/pkg/installerassets/installerassets.go new file mode 100644 index 00000000000..c37d7ccd7ad --- /dev/null +++ b/pkg/installerassets/installerassets.go @@ -0,0 +1,238 @@ +// Package installerassets contains installer-specific helpers for the +// asset Merkle DAG. +package installerassets + +import ( + "bytes" + "context" + "encoding/base64" + "fmt" + "os" + "path" + "strconv" + "strings" + "text/template" + + "github.com/openshift/installer/pkg/assets" + "github.com/pkg/errors" +) + +// Rebuilders registers installer asset rebuilders by name. Use this +// to set up assets that have parents. For example: +// +// func yourRebuilder(getByName assets.GetByString) (asset *assets.Asset, err error) { +// asset = &assets.Asset{ +// Name: "tls/root-ca.crt", +// RebuildHelper: rootCARebuilder, +// } +// +// parents, err := asset.GetParents(getByName, "tls/root-ca.key") +// if err != nil { +// return nil, err +// } +// +// // Assemble your data based on the parent content using your custom logic. +// for name, parent := range parents { +// asset.Data = append(asset.Data, parent.Data) +// } +// +// return asset, nil +// } +// +// and then somewhere (e.g. an init() function), add it to the registry: +// +// Rebuilders["your/asset"] = yourRebuilder +var Rebuilders = make(map[string]assets.Rebuild) + +// Defaulter returns a default value. This type is consumed by the +// Defaults registry and helpers interacting with that registry. +type Defaulter func(ctx context.Context) (data []byte, err error) + +// Defaults registers installer asset default functions by name. Use +// this to set up assets that do not have parents. For example, +// constants: +// +// Defaults["your/asset"] = ConstantDefault([]byte("your value")) +// +// or values populated from outside the asset graph: +// +// Defaults["your/asset"] = func() ([]byte, error) { +// value = os.Getenv("YOUR_ENVIRONMENT_VARIABLE") +// return []byte(value), nil +// } +var Defaults = make(map[string]Defaulter) + +// New returns a new installer asset store. +func New() *assets.Assets { + return &assets.Assets{ + Root: assets.Reference{ + Name: "cluster", + }, + Rebuilders: Rebuilders, + } +} + +// GetDefault calculates defaults for missing assets. +func GetDefault(ctx context.Context, name string) ([]byte, error) { + defaulter, ok := Defaults[name] + if !ok { + return nil, os.ErrNotExist + } + + return defaulter(ctx) +} + +// ConstantDefault returns a Defaulter which returns a constant value. +func ConstantDefault(value []byte) Defaulter { + return func(ctx context.Context) ([]byte, error) { + return value, nil + } +} + +// PlatformOverrideRebuilder generates rebuilders for values that +// allow per-platform overrides. It pulls the 'platform' parent and +// looks up the {platform}/{name} asset. If that asset exists, it uses +// its value. If that asset does not exist, it uses 'defaulter' to +// calculate a generic default. +func PlatformOverrideRebuilder(name string, defaulter Defaulter) assets.Rebuild { + rebuild := func(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: name, + RebuildHelper: PlatformOverrideRebuilder(name, defaulter), + } + + parents, err := asset.GetParents(ctx, getByName, "platform") + if err != nil { + return nil, err + } + + platform := string(parents["platform"].Data) + perPlatformName := path.Join(platform, name) + parents, err = asset.GetParents(ctx, getByName, perPlatformName) + if err == nil { + asset.Data = parents[perPlatformName].Data + return asset, nil + } else if os.IsNotExist(errors.Cause(err)) { + asset.Data, err = defaulter(ctx) + if err != nil { + return nil, err + } + return asset, nil + } + + return nil, err + } + + return rebuild +} + +// TemplateRebuilder returns a rebuilder that pulls a template from +// {name}.template and renders it with the parameters map for context. +// Keys for 'parameters' are template properties and values are parent +// names. You can also inject additional parameters directly without +// involving parent assets. For example: +// +// TemplateRebuilder( +// "my/asset", +// map[string]string{"my/parent": "MyParent"}, +// map[string]interface{}{"MyExtra": "my extra"}, +// ) +// +// will pull the template from my/asset.template and the template can +// use {{.MyParent}} to access the data from my/parent and +// {{.MyExtra}} to access "my extra". +// +// The following functions are also available in the template: +// +// * add, which returns the sum of its arguments. For example: +// +// {{add $index 1}} +// +// * base64, which encodes its argument in base64. For example: +// +// {{.Key | base64}} +// +// * etcdURIs, which, when called with clusterName, baseDomain, and +// count, returns a []string of +// https://{clusterName}-etcd-{count}.{baseDomain}:2379 URIs. +// +// * indent, which takes a count and a string and appends count spaces +// to any newlines in the string. +// +// * int, which converts a base-10 string into an int. +// +// * join, which, when called with a separator and a slice of strings, +// returns the slice elements separated by the separator. +func TemplateRebuilder(name string, parameters map[string]string, extra map[string]interface{}) assets.Rebuild { + return func(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: name, + RebuildHelper: TemplateRebuilder(name, parameters, extra), + } + + templateName := fmt.Sprintf("%s.template", name) + parents, err := asset.GetParents(ctx, getByName, templateName) + if err != nil { + return nil, err + } + templateData := string(parents[templateName].Data) + + funcMap := template.FuncMap{ + "add": func(values ...int) int { + sum := 0 + for _, value := range values { + sum += value + } + return sum + }, + "base64": func(data string) string { + return base64.StdEncoding.EncodeToString([]byte(data)) + }, + "etcdURIs": func(clusterName string, baseDomain string, count int) []string { + uris := make([]string, 0, count) + for i := 0; i < count; i++ { + uris = append(uris, fmt.Sprintf("https://%s-etcd-%d.%s:2379", clusterName, i, baseDomain)) + } + return uris + }, + "indent": func(indentation int, value string) string { + newline := "\n" + strings.Repeat(" ", indentation) + return strings.Replace(value, "\n", newline, -1) + }, + "int": func(value string) (int, error) { + integer, err := strconv.ParseInt(value, 10, 0) + return int(integer), err + }, + "join": func(separator string, slice []string) string { + return strings.Join(slice, separator) + }, + } + + tmpl, err := template.New(name).Funcs(funcMap).Parse(templateData) + if err != nil { + return nil, err + } + + params := make(map[string]interface{}, len(parameters)+len(extra)) + for key, parentName := range parameters { + parents, err = asset.GetParents(ctx, getByName, parentName) + if err != nil { + return nil, err + } + + params[key] = string(parents[parentName].Data) + } + for key, value := range extra { + params[key] = value + } + + buf := &bytes.Buffer{} + err = tmpl.Option("missingkey=error").Execute(buf, params) + if err != nil { + return nil, err + } + + asset.Data = buf.Bytes() + return asset, err + } +} diff --git a/pkg/installerassets/kubeconfig.go b/pkg/installerassets/kubeconfig.go new file mode 100644 index 00000000000..ee6ff645c77 --- /dev/null +++ b/pkg/installerassets/kubeconfig.go @@ -0,0 +1,98 @@ +package installerassets + +import ( + "context" + "fmt" + + "github.com/ghodss/yaml" + "github.com/openshift/installer/pkg/assets" + clientcmd "k8s.io/client-go/tools/clientcmd/api/v1" +) + +func kubeconfigRebuilder(role string, clientKey string, clientCert string) assets.Rebuild { + return func(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "auth/kubeconfig-" + role, + RebuildHelper: kubeconfigRebuilder(role, clientKey, clientCert), + } + + parents, err := asset.GetParents( + ctx, + getByName, + "base-domain", + "cluster-name", + clientKey, + clientCert, + "tls/root-ca.crt", + ) + if err != nil { + return nil, err + } + + asset.Data, err = kubeconfig( + parents["tls/root-ca.crt"].Data, + parents[clientKey].Data, + parents[clientCert].Data, + string(parents["cluster-name"].Data), + string(parents["base-domain"].Data), + role, + ) + + return asset, err + } +} + +// kubeconfig renders a YAML kubeconfig from the given parameters. +func kubeconfig( + rootCACert []byte, + clientKey []byte, + clientCert []byte, + clusterName string, + baseDomain string, + userName string, +) ([]byte, error) { + return yaml.Marshal(&clientcmd.Config{ + Clusters: []clientcmd.NamedCluster{ + { + Name: clusterName, + Cluster: clientcmd.Cluster{ + Server: fmt.Sprintf("https://%s-api.%s:6443", clusterName, baseDomain), + CertificateAuthorityData: rootCACert, + }, + }, + }, + AuthInfos: []clientcmd.NamedAuthInfo{ + { + Name: userName, + AuthInfo: clientcmd.AuthInfo{ + ClientKeyData: clientKey, + ClientCertificateData: clientCert, + }, + }, + }, + Contexts: []clientcmd.NamedContext{ + { + Name: userName, + Context: clientcmd.Context{ + Cluster: clusterName, + AuthInfo: userName, + }, + }, + }, + CurrentContext: userName, + }) +} + +func init() { + Rebuilders["auth/kubeconfig-admin"] = kubeconfigRebuilder( + "admin", + "tls/admin-client.key", + "tls/admin-client.crt", + ) + + Rebuilders["auth/kubeconfig-kubelet"] = kubeconfigRebuilder( + "kubelet", + "tls/kubelet-client.key", + "tls/kubelet-client.crt", + ) +} diff --git a/pkg/tfvars/libvirt/cache.go b/pkg/installerassets/libvirt/cache.go similarity index 74% rename from pkg/tfvars/libvirt/cache.go rename to pkg/installerassets/libvirt/cache.go index 60df1dfbe69..2d1c2a27a9f 100644 --- a/pkg/tfvars/libvirt/cache.go +++ b/pkg/installerassets/libvirt/cache.go @@ -11,6 +11,7 @@ import ( "github.com/gregjones/httpcache" "github.com/gregjones/httpcache/diskcache" + "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -23,89 +24,88 @@ import ( // periodically blow away your cache. // // [1]: https://standards.freedesktop.org/basedir-spec/basedir-spec-0.7.html -func (libvirt *Libvirt) UseCachedImage() (err error) { - if strings.HasPrefix(libvirt.Image, "file://") { - return nil +func getCachedImage(uri string) (string, error) { + if strings.HasPrefix(uri, "file://") { + return uri, nil } - logrus.Infof("Fetching OS image...") + logrus.Infof("fetching OS image...") // FIXME: Use os.UserCacheDir() once we bump to Go 1.11 // baseCacheDir, err := os.UserCacheDir() // if err != nil { - // return err + // return uri, err // } baseCacheDir := filepath.Join(os.Getenv("HOME"), ".cache") cacheDir := filepath.Join(baseCacheDir, "openshift-install", "libvirt") httpCacheDir := filepath.Join(cacheDir, "http") - err = os.MkdirAll(httpCacheDir, 0777) + err := os.MkdirAll(httpCacheDir, 0777) if err != nil { - return err + return uri, err } cache := diskcache.New(httpCacheDir) transport := httpcache.NewTransport(cache) - resp, err := transport.Client().Get(libvirt.Image) + resp, err := transport.Client().Get(uri) if err != nil { - return err + return uri, err } if resp.StatusCode != 200 { - return fmt.Errorf("%s while getting %s", resp.Status, libvirt.Image) + return uri, errors.Errorf("%s while getting %s", resp.Status, uri) } defer resp.Body.Close() key, err := cacheKey(resp.Header.Get("ETag")) if err != nil { - return fmt.Errorf("invalid ETag for %s: %v", libvirt.Image, err) + return uri, errors.Wrapf(err, "invalid ETag for %s", uri) } imageCacheDir := filepath.Join(cacheDir, "image") err = os.MkdirAll(imageCacheDir, 0777) if err != nil { - return err + return uri, err } imagePath := filepath.Join(imageCacheDir, key) _, err = os.Stat(imagePath) if err == nil { - logrus.Debugf("Using cached OS image %q", imagePath) + logrus.Debugf("using cached OS image %q", imagePath) } else { if !os.IsNotExist(err) { - return err + return uri, err } err = cacheImage(resp.Body, imagePath) if err != nil { - return err + return uri, err } } - libvirt.Image = fmt.Sprintf("file://%s", filepath.ToSlash(imagePath)) - return nil + return fmt.Sprintf("file://%s", filepath.ToSlash(imagePath)), nil } func cacheKey(etag string) (key string, err error) { if etag == "" { - return "", fmt.Errorf("caching is not supported when ETag is unset") + return "", errors.Errorf("caching is not supported when ETag is unset") } etagSections := strings.SplitN(etag, "\"", 3) if len(etagSections) != 3 { - return "", fmt.Errorf("broken quoting: %s", etag) + return "", errors.Errorf("broken quoting: %s", etag) } if etagSections[0] == "W/" { - return "", fmt.Errorf("caching is not supported for weak ETags: %s", etag) + return "", errors.Errorf("caching is not supported for weak ETags: %s", etag) } opaque := etagSections[1] if opaque == "" { - return "", fmt.Errorf("caching is not supported when the opaque tag is unset: %s", etag) + return "", errors.Errorf("caching is not supported when the opaque tag is unset: %s", etag) } hashed := md5.Sum([]byte(opaque)) return hex.EncodeToString(hashed[:]), nil } func cacheImage(reader io.Reader, imagePath string) (err error) { - logrus.Debugf("Unpacking OS image into %q...", imagePath) + logrus.Debugf("unpacking OS image into %q...", imagePath) flockPath := fmt.Sprintf("%s.lock", imagePath) flock, err := os.Create(flockPath) diff --git a/pkg/installerassets/libvirt/doc.go b/pkg/installerassets/libvirt/doc.go new file mode 100644 index 00000000000..2580cd00cc3 --- /dev/null +++ b/pkg/installerassets/libvirt/doc.go @@ -0,0 +1,3 @@ +// Package libvirt contains libvirt-specific helpers for the asset +// Merkle DAG. +package libvirt diff --git a/pkg/installerassets/libvirt/image.go b/pkg/installerassets/libvirt/image.go new file mode 100644 index 00000000000..c65445ebe35 --- /dev/null +++ b/pkg/installerassets/libvirt/image.go @@ -0,0 +1,32 @@ +package libvirt + +import ( + "context" + "os" + + "github.com/openshift/installer/pkg/installerassets" + "github.com/openshift/installer/pkg/rhcos" + "github.com/pkg/errors" +) + +func getImage(ctx context.Context) ([]byte, error) { + value := os.Getenv("OPENSHIFT_INSTALL_LIBVIRT_IMAGE") + if value == "" { + value, err := rhcos.QEMU(ctx, rhcos.DefaultChannel) + if err != nil { + return nil, errors.Wrap(err, "failed to fetch QEMU image URL") + } + return []byte(value), nil + } + + err := validURI(value) + if err != nil { + return nil, errors.Wrap(err, "resolve OPENSHIFT_INSTALL_LIBVIRT_IMAGE") + } + + return []byte(value), nil +} + +func init() { + installerassets.Defaults["libvirt/image"] = getImage +} diff --git a/pkg/installerassets/libvirt/machines.go b/pkg/installerassets/libvirt/machines.go new file mode 100644 index 00000000000..f77ec7792cd --- /dev/null +++ b/pkg/installerassets/libvirt/machines.go @@ -0,0 +1,124 @@ +package libvirt + +import ( + "context" + "fmt" + "strconv" + + "github.com/ghodss/yaml" + libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1alpha1" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clusterapi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" +) + +func masterMachinesRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "manifests/libvirt/99_openshift-cluster-api_master-machines.yaml", + RebuildHelper: masterMachinesRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "cluster-name", + "libvirt/uri", + "machines/master-count", + "network/cluster-cidr", + ) + if err != nil { + return nil, err + } + + clusterCIDR := string(parents["network/cluster-cidr"].Data) + clusterName := string(parents["cluster-name"].Data) + uri := string(parents["libvirt/uri"].Data) + + masterCount, err := strconv.ParseUint(string(parents["machines/master-count"].Data), 10, 32) + if err != nil { + return nil, errors.Wrap(err, "parse master count") + } + + role := "master" + userDataSecret := fmt.Sprintf("%s-user-data", role) + poolName := role // FIXME: knob to control this? + total := int64(masterCount) + + provider, err := provider(uri, clusterName, clusterCIDR, userDataSecret) + if err != nil { + return nil, errors.Wrap(err, "create provider") + } + + var machines []runtime.RawExtension + for idx := int64(0); idx < total; idx++ { + machine := clusterapi.Machine{ + TypeMeta: metav1.TypeMeta{ + Kind: "Machine", + APIVersion: "cluster.k8s.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-%d", clusterName, poolName, idx), + Namespace: "openshift-cluster-api", + Labels: map[string]string{ + "sigs.k8s.io/cluster-api-cluster": clusterName, + "sigs.k8s.io/cluster-api-machine-role": role, + "sigs.k8s.io/cluster-api-machine-type": role, + }, + }, + Spec: clusterapi.MachineSpec{ + ProviderConfig: clusterapi.ProviderConfig{ + Value: &runtime.RawExtension{Object: provider}, + }, + // we don't need to set Versions, because we control those via operators. + }, + } + + machines = append(machines, runtime.RawExtension{Object: &machine}) + } + + list := &metav1.List{ + TypeMeta: metav1.TypeMeta{ + Kind: "List", + APIVersion: "v1", + }, + Items: machines, + } + + asset.Data, err = yaml.Marshal(list) + if err != nil { + return nil, err + } + + return asset, nil +} + +func provider(uri, clusterName, clusterCIDR, userDataSecret string) (*libvirtprovider.LibvirtMachineProviderConfig, error) { + return &libvirtprovider.LibvirtMachineProviderConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "LibvirtMachineProviderConfig", + APIVersion: "libvirtproviderconfig.k8s.io/v1alpha1", + }, + DomainMemory: 2048, + DomainVcpu: 2, + Ignition: &libvirtprovider.Ignition{ + UserDataSecret: userDataSecret, + }, + Volume: &libvirtprovider.Volume{ + PoolName: "default", + BaseVolumeID: fmt.Sprintf("/var/lib/libvirt/images/%s-base", clusterName), + }, + NetworkInterfaceName: clusterName, + NetworkInterfaceAddress: clusterCIDR, + Autostart: false, + URI: uri, + }, nil +} + +func init() { + installerassets.Rebuilders["manifests/libvirt/99_openshift-cluster-api_master-machines.yaml"] = masterMachinesRebuilder + installerassets.Defaults["libvirt/machines/master-count"] = installerassets.ConstantDefault([]byte("1")) +} diff --git a/pkg/installerassets/libvirt/machinesets.go b/pkg/installerassets/libvirt/machinesets.go new file mode 100644 index 00000000000..37f4d42bd32 --- /dev/null +++ b/pkg/installerassets/libvirt/machinesets.go @@ -0,0 +1,101 @@ +package libvirt + +import ( + "context" + "fmt" + + "github.com/ghodss/yaml" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + clusterapi "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" +) + +func workerMachineSetsRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "manifests/libvirt/99_openshift-cluster-api_worker-machinesets.yaml", + RebuildHelper: workerMachineSetsRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "cluster-name", + "libvirt/uri", + "network/cluster-cidr", + ) + if err != nil { + return nil, err + } + + clusterCIDR := string(parents["network/cluster-cidr"].Data) + clusterName := string(parents["cluster-name"].Data) + uri := string(parents["libvirt/uri"].Data) + + role := "worker" + userDataSecret := fmt.Sprintf("%s-user-data", role) + poolName := role // FIXME: knob to control this + total := int64(1) // FIXME: knob to control this + + provider, err := provider(uri, clusterName, clusterCIDR, userDataSecret) + if err != nil { + return nil, errors.Wrap(err, "create provider") + } + + name := fmt.Sprintf("%s-%s-%d", clusterName, poolName, 0) + machineSet := clusterapi.MachineSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineSet", + APIVersion: "cluster.k8s.io/v1alpha1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "openshift-cluster-api", + Labels: map[string]string{ + "sigs.k8s.io/cluster-api-cluster": clusterName, + "sigs.k8s.io/cluster-api-machine-role": role, + "sigs.k8s.io/cluster-api-machine-type": role, + }, + }, + Spec: clusterapi.MachineSetSpec{ + Replicas: pointer.Int32Ptr(int32(total)), + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "sigs.k8s.io/cluster-api-machineset": name, + "sigs.k8s.io/cluster-api-cluster": clusterName, + }, + }, + Template: clusterapi.MachineTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "sigs.k8s.io/cluster-api-machineset": name, + "sigs.k8s.io/cluster-api-cluster": clusterName, + "sigs.k8s.io/cluster-api-machine-role": role, + "sigs.k8s.io/cluster-api-machine-type": role, + }, + }, + Spec: clusterapi.MachineSpec{ + ProviderConfig: clusterapi.ProviderConfig{ + Value: &runtime.RawExtension{Object: provider}, + }, + // we don't need to set Versions, because we control those via cluster operators. + }, + }, + }, + } + + asset.Data, err = yaml.Marshal(machineSet) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + installerassets.Rebuilders["manifests/libvirt/99_openshift-cluster-api_worker-machinesets.yaml"] = workerMachineSetsRebuilder +} diff --git a/pkg/installerassets/libvirt/metadata.go b/pkg/installerassets/libvirt/metadata.go new file mode 100644 index 00000000000..8e8631ca41c --- /dev/null +++ b/pkg/installerassets/libvirt/metadata.go @@ -0,0 +1,39 @@ +package libvirt + +import ( + "context" + "encoding/json" + + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" + "github.com/openshift/installer/pkg/types/libvirt" +) + +func metadataRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "libvirt/metadata.json", + RebuildHelper: metadataRebuilder, + } + + parents, err := asset.GetParents(ctx, getByName, "libvirt/uri") + if err != nil { + return nil, err + } + + uri := string(parents["libvirt/uri"].Data) + + metadata := &libvirt.Metadata{ + URI: uri, + } + + asset.Data, err = json.Marshal(metadata) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + installerassets.Rebuilders["libvirt/metadata.json"] = metadataRebuilder +} diff --git a/pkg/installerassets/libvirt/network.go b/pkg/installerassets/libvirt/network.go new file mode 100644 index 00000000000..9cc1bc895f9 --- /dev/null +++ b/pkg/installerassets/libvirt/network.go @@ -0,0 +1,10 @@ +package libvirt + +import ( + "github.com/openshift/installer/pkg/installerassets" +) + +func init() { + installerassets.Defaults["libvirt/network/interface-name"] = installerassets.ConstantDefault([]byte("tt0")) + installerassets.Defaults["libvirt/network/node-cidr"] = installerassets.ConstantDefault([]byte("192.168.126.0/24")) +} diff --git a/pkg/installerassets/libvirt/terraform.go b/pkg/installerassets/libvirt/terraform.go new file mode 100644 index 00000000000..df5d22c087a --- /dev/null +++ b/pkg/installerassets/libvirt/terraform.go @@ -0,0 +1,92 @@ +package libvirt + +import ( + "context" + "encoding/json" + "net" + "strconv" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" + "github.com/pkg/errors" +) + +type terraformConfig struct { + URI string `json:"tectonic_libvirt_uri,omitempty"` + Image string `json:"tectonic_os_image,omitempty"` + IfName string `json:"tectonic_libvirt_network_if,omitempty"` + IPRange string `json:"tectonic_libvirt_ip_range,omitempty"` + BootstrapIP string `json:"tectonic_libvirt_bootstrap_ip,omitempty"` + MasterIPs []string `json:"tectonic_libvirt_master_ips,omitempty"` +} + +func terraformRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "terraform/libvirt-terraform.auto.tfvars", + RebuildHelper: terraformRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "libvirt/image", + "libvirt/network/interface-name", + "libvirt/uri", + "machines/master-count", + "network/node-cidr", + ) + if err != nil { + return nil, err + } + + masterCount, err := strconv.ParseUint(string(parents["machines/master-count"].Data), 10, 0) + if err != nil { + return nil, errors.Wrap(err, "parse master count") + } + + _, nodeCIDR, err := net.ParseCIDR(string(parents["network/node-cidr"].Data)) + if err != nil { + return nil, errors.Wrap(err, "parse node CIDR") + } + + bootstrapIP, err := cidr.Host(nodeCIDR, 10) + if err != nil { + return nil, errors.Wrap(err, "generate bootstrap IP") + } + + masterIPs := make([]string, 0, masterCount) + for i := 0; i < int(masterCount); i++ { + masterIP, err := cidr.Host(nodeCIDR, 11+i) + if err != nil { + return nil, errors.Wrap(err, "generate master IP") + } + masterIPs = append(masterIPs, masterIP.String()) + } + + image := string(parents["libvirt/image"].Data) + image, err = getCachedImage(image) + if err != nil { + return nil, errors.Wrapf(err, "pull %s through the cache", image) + } + + config := &terraformConfig{ + URI: string(parents["libvirt/uri"].Data), + Image: image, + IfName: string(parents["libvirt/network/interface-name"].Data), + IPRange: nodeCIDR.String(), + BootstrapIP: bootstrapIP.String(), + MasterIPs: masterIPs, + } + + asset.Data, err = json.Marshal(config) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + installerassets.Rebuilders["terraform/libvirt-terraform.auto.tfvars"] = terraformRebuilder +} diff --git a/pkg/installerassets/libvirt/uri.go b/pkg/installerassets/libvirt/uri.go new file mode 100644 index 00000000000..14962b5b761 --- /dev/null +++ b/pkg/installerassets/libvirt/uri.go @@ -0,0 +1,62 @@ +package libvirt + +import ( + "context" + "fmt" + "net/url" + "os" + + "github.com/openshift/installer/pkg/installerassets" + "github.com/pkg/errors" + survey "gopkg.in/AlecAivazis/survey.v1" +) + +func getURI(ctx context.Context) ([]byte, error) { + value := os.Getenv("OPENSHIFT_INSTALL_LIBVIRT_URI") + if value != "" { + err := validURI(value) + if err != nil { + return nil, errors.Wrap(err, "resolve OPENSHIFT_INSTALL_LIBVIRT_URI") + } + return []byte(value), nil + } + + question := &survey.Question{ + Prompt: &survey.Input{ + Message: "Libvirt Connection URI", + Help: "The libvirt connection URI to be used. This must be accessible from the running cluster.", + Default: "qemu+tcp://192.168.122.1/system", + }, + Validate: survey.ComposeValidators(survey.Required, uriValidator), + } + + var response string + err := survey.Ask([]*survey.Question{question}, &response) + if err != nil { + return nil, errors.Wrap(err, "ask") + } + + return []byte(response), nil +} + +// uriValidator validates if the answer provided in prompt is a valid +// url and has non-empty scheme. +func uriValidator(ans interface{}) error { + return validURI(ans.(string)) +} + +// validURI validates if the URI is a valid URI with a non-empty scheme. +func validURI(uri string) error { + parsed, err := url.Parse(uri) + if err != nil { + return err + } + if parsed.Scheme == "" { + return fmt.Errorf("invalid URI %q (no scheme)", uri) + } + return nil +} + +func init() { + installerassets.Defaults["libvirt/uri"] = getURI +} diff --git a/pkg/installerassets/machines.go b/pkg/installerassets/machines.go new file mode 100644 index 00000000000..6c501bb85b0 --- /dev/null +++ b/pkg/installerassets/machines.go @@ -0,0 +1,8 @@ +package installerassets + +func init() { + Rebuilders["machines/master-count"] = PlatformOverrideRebuilder( + "machines/master-count", + ConstantDefault([]byte("3")), + ) +} diff --git a/pkg/installerassets/manifests.go b/pkg/installerassets/manifests.go new file mode 100644 index 00000000000..9a9d23647c8 --- /dev/null +++ b/pkg/installerassets/manifests.go @@ -0,0 +1,82 @@ +package installerassets + +func init() { + Rebuilders["files/opt/tectonic/manifests/cvo-overrides.yaml"] = TemplateRebuilder( + "files/opt/tectonic/manifests/cvo-overrides.yaml", + map[string]string{ + "ClusterID": "cluster-id", + }, + nil, + ) + + Rebuilders["files/opt/tectonic/manifests/etcd-service-endpoints.yaml"] = TemplateRebuilder( + "files/opt/tectonic/manifests/etcd-service-endpoints.yaml", + map[string]string{ + "BaseDomain": "base-domain", + "ClusterName": "cluster-name", + "MasterCount": "machines/master-count", + }, + nil, + ) + + Rebuilders["files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml"] = TemplateRebuilder( + "files/opt/tectonic/manifests/host-etcd-service-endpoints.yaml", + map[string]string{ + "BaseDomain": "base-domain", + "ClusterName": "cluster-name", + "MasterCount": "machines/master-count", + }, + nil, + ) + + Rebuilders["files/opt/tectonic/manifests/legacy-cvo-overrides.yaml"] = TemplateRebuilder( + "files/opt/tectonic/manifests/legacy-cvo-overrides.yaml", + map[string]string{ + "ClusterID": "cluster-id", + }, + nil, + ) + + Rebuilders["files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml"] = TemplateRebuilder( + "files/opt/tectonic/manifests/kube-system-configmap-etcd-serving-ca.yaml", + map[string]string{ + "Cert": "tls/etcd-ca.crt", + }, + nil, + ) + + Rebuilders["files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml"] = TemplateRebuilder( + "files/opt/tectonic/manifests/kube-system-configmap-root-ca.yaml", + map[string]string{ + "Cert": "tls/root-ca.crt", + }, + nil, + ) + + Rebuilders["files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml"] = TemplateRebuilder( + "files/opt/tectonic/manifests/kube-system-secret-etcd-client.yaml", + map[string]string{ + "Cert": "tls/etcd-client.crt", + "Key": "tls/etcd-client.key", + }, + nil, + ) + + Rebuilders["files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml"] = TemplateRebuilder( + "files/opt/tectonic/manifests/machine-config-server-tls-secret.yaml", + map[string]string{ + "Cert": "tls/machine-config-server.crt", + "Key": "tls/machine-config-server.key", + }, + nil, + ) + + Rebuilders["files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml"] = TemplateRebuilder( + "files/opt/tectonic/manifests/openshift-service-cert-signer-ca-secret.yaml", + map[string]string{ + "Cert": "tls/service-serving-ca.crt", + "Key": "tls/service-serving-ca.key", + }, + nil, + ) +} diff --git a/pkg/installerassets/metadata.go b/pkg/installerassets/metadata.go new file mode 100644 index 00000000000..b2eb7993351 --- /dev/null +++ b/pkg/installerassets/metadata.go @@ -0,0 +1,47 @@ +package installerassets + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/openshift/installer/pkg/assets" +) + +func metadataRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "metadata.json", + RebuildHelper: metadataRebuilder, + } + + parents, err := asset.GetParents(ctx, getByName, "cluster-name", "platform") + if err != nil { + return nil, err + } + + clusterName := string(parents["cluster-name"].Data) + platform := string(parents["platform"].Data) + + platformMetadataName := fmt.Sprintf("%s/metadata.json", platform) + parents, err = asset.GetParents(ctx, getByName, platformMetadataName) + if err != nil { + return nil, err + } + + platformMetadata := json.RawMessage(parents[platformMetadataName].Data) + metadata := map[string]interface{}{ + "clusterName": clusterName, + platform: platformMetadata, + } + + asset.Data, err = json.Marshal(metadata) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + Rebuilders["metadata.json"] = metadataRebuilder +} diff --git a/pkg/installerassets/network.go b/pkg/installerassets/network.go new file mode 100644 index 00000000000..c7c90c6db96 --- /dev/null +++ b/pkg/installerassets/network.go @@ -0,0 +1,115 @@ +package installerassets + +import ( + "context" + "strconv" + + "github.com/ghodss/yaml" + netopv1 "github.com/openshift/cluster-network-operator/pkg/apis/networkoperator/v1" + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/types" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func networkConfigRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "manifests/cluster-network-02-config.yaml", + RebuildHelper: networkConfigRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "manifests/cluster-config.yaml", + "network/host-subnet-length", + ) + if err != nil { + return nil, err + } + + hostSubnetLength, err := strconv.ParseUint(string(parents["network/host-subnet-length"].Data), 10, 32) + if err != nil { + return nil, errors.Wrap(err, "parse host subnet length") + } + + var clusterConfig *corev1.ConfigMap + err = yaml.Unmarshal(parents["manifests/cluster-config.yaml"].Data, &clusterConfig) + if err != nil { + return nil, errors.Wrap(err, "unmarshal cluster-config") + } + + var installConfig *types.InstallConfig + err = yaml.Unmarshal([]byte(clusterConfig.Data["install-config"]), &installConfig) + if err != nil { + return nil, errors.Wrap(err, "unmarshal install-config") + } + + netConfig := installConfig.Networking + + // determine pod address space. + // This can go away when we get rid of PodCIDR + // entirely in favor of ClusterNetworks + var clusterNets []netopv1.ClusterNetwork + if len(netConfig.ClusterNetworks) > 0 { + clusterNets = netConfig.ClusterNetworks + } else if netConfig.PodCIDR == nil || netConfig.PodCIDR.IPNet.IP.IsUnspecified() { + return nil, errors.Errorf("either PodCIDR or ClusterNetworks must be specified") + } else { + clusterNets = []netopv1.ClusterNetwork{ + { + CIDR: netConfig.PodCIDR.String(), + HostSubnetLength: uint32(hostSubnetLength), + }, + } + } + + defaultNet := netopv1.DefaultNetworkDefinition{ + Type: netConfig.Type, + } + + // Add any network-specific configuration defaults here. + switch netConfig.Type { + case netopv1.NetworkTypeOpenshiftSDN: + defaultNet.OpenshiftSDNConfig = &netopv1.OpenshiftSDNConfig{ + // Default to network policy, operator provides all other defaults. + Mode: netopv1.SDNModePolicy, + } + } + + config := &netopv1.NetworkConfig{ + TypeMeta: metav1.TypeMeta{ + APIVersion: netopv1.SchemeGroupVersion.String(), + Kind: "NetworkConfig", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + // not namespaced + }, + + Spec: netopv1.NetworkConfigSpec{ + ServiceNetwork: netConfig.ServiceCIDR.String(), + ClusterNetworks: clusterNets, + DefaultNetwork: defaultNet, + }, + } + + asset.Data, err = yaml.Marshal(config) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + Rebuilders["manifests/cluster-network-02-config.yaml"] = networkConfigRebuilder + Rebuilders["network/node-cidr"] = PlatformOverrideRebuilder( + "network/node-cidr", + ConstantDefault([]byte("10.0.0.0/16")), + ) + Defaults["network/cluster-cidr"] = ConstantDefault([]byte("10.128.0.0/14")) + Defaults["network/host-subnet-length"] = ConstantDefault([]byte("9")) + Defaults["network/service-cidr"] = ConstantDefault([]byte("172.30.0.0/16")) +} diff --git a/pkg/asset/installconfig/openstack/OWNERS b/pkg/installerassets/openstack/OWNERS similarity index 100% rename from pkg/asset/installconfig/openstack/OWNERS rename to pkg/installerassets/openstack/OWNERS diff --git a/pkg/installerassets/openstack/cloud.go b/pkg/installerassets/openstack/cloud.go new file mode 100644 index 00000000000..c0034ca32d2 --- /dev/null +++ b/pkg/installerassets/openstack/cloud.go @@ -0,0 +1,44 @@ +package openstack + +import ( + "context" + "os" + + "github.com/openshift/installer/pkg/installerassets" + "github.com/pkg/errors" + survey "gopkg.in/AlecAivazis/survey.v1" +) + +func getCloud(ctx context.Context) ([]byte, error) { + value := os.Getenv("OPENSHIFT_INSTALL_OPENSTACK_CLOUD") + if value != "" { + //FIXME(russellb) add some validation here + return []byte(value), nil + } + + question := &survey.Question{ + //TODO(russellb) - We could open clouds.yaml here and read the list of defined clouds + //and then use survey.Select to let the user choose one. + Prompt: &survey.Input{ + Message: "Cloud", + Help: "The OpenStack cloud name from clouds.yaml.", + Default: "cloudOne", + }, + Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { + //FIXME(russellb) add some validation here + return nil + }), + } + + var response string + err := survey.Ask([]*survey.Question{question}, &response) + if err != nil { + return nil, errors.Wrap(err, "ask") + } + + return []byte(response), nil +} + +func init() { + installerassets.Defaults["openstack/cloud"] = getCloud +} diff --git a/pkg/installerassets/openstack/doc.go b/pkg/installerassets/openstack/doc.go new file mode 100644 index 00000000000..74481cb52f9 --- /dev/null +++ b/pkg/installerassets/openstack/doc.go @@ -0,0 +1,3 @@ +// Package openstack contains OpenStack-specific helpers for the asset +// Merkle DAG. +package openstack diff --git a/pkg/installerassets/openstack/externalnetwork.go b/pkg/installerassets/openstack/externalnetwork.go new file mode 100644 index 00000000000..62a1abbf4ca --- /dev/null +++ b/pkg/installerassets/openstack/externalnetwork.go @@ -0,0 +1,41 @@ +package openstack + +import ( + "context" + "os" + + "github.com/openshift/installer/pkg/installerassets" + "github.com/pkg/errors" + survey "gopkg.in/AlecAivazis/survey.v1" +) + +func getNetwork(ctx context.Context) (data []byte, err error) { + value := os.Getenv("OPENSHIFT_INSTALL_OPENSTACK_EXTERNAL_NETWORK") + if value != "" { + //FIXME(shardy) add some validation here + return []byte(value), nil + } + + question := &survey.Question{ + Prompt: &survey.Input{ + Message: "ExternalNetwork", + Help: "The OpenStack external network to be used for installation.", + }, + Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { + //FIXME(shadower) add some validation here + return nil + }), + } + + var response string + err = survey.Ask([]*survey.Question{question}, &response) + if err != nil { + return nil, errors.Wrap(err, "ask") + } + + return []byte(response), nil +} + +func init() { + installerassets.Defaults["openstack/external-network"] = getNetwork +} diff --git a/pkg/installerassets/openstack/image.go b/pkg/installerassets/openstack/image.go new file mode 100644 index 00000000000..31f720cae88 --- /dev/null +++ b/pkg/installerassets/openstack/image.go @@ -0,0 +1,42 @@ +package openstack + +import ( + "context" + "os" + + "github.com/openshift/installer/pkg/installerassets" + "github.com/pkg/errors" + survey "gopkg.in/AlecAivazis/survey.v1" +) + +func getImage(ctx context.Context) ([]byte, error) { + value := os.Getenv("OPENSHIFT_INSTALL_OPENSTACK_IMAGE") + if value != "" { + //FIXME(shardy) add some validation here + return []byte(value), nil + } + + question := &survey.Question{ + Prompt: &survey.Input{ + Message: "Image", + Help: "The OpenStack image to be used for installation.", + Default: "rhcos", + }, + Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { + //FIXME(shardy) add some validation here + return nil + }), + } + + var response string + err := survey.Ask([]*survey.Question{question}, &response) + if err != nil { + return nil, errors.Wrap(err, "ask") + } + + return []byte(response), nil +} + +func init() { + installerassets.Defaults["openstack/image"] = getImage +} diff --git a/pkg/installerassets/openstack/manifests.go b/pkg/installerassets/openstack/manifests.go new file mode 100644 index 00000000000..6ef19d5068a --- /dev/null +++ b/pkg/installerassets/openstack/manifests.go @@ -0,0 +1,34 @@ +package openstack + +import ( + "context" + + "github.com/ghodss/yaml" + "github.com/gophercloud/utils/openstack/clientconfig" + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" +) + +func cloudConfigRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + clouds, err := clientconfig.LoadCloudsYAML() + if err != nil { + return nil, err + } + + marshalled, err := yaml.Marshal(clouds) + if err != nil { + return nil, err + } + + return installerassets.TemplateRebuilder( + "files/opt/tectonic/tectonic/openstack/99_cloud-creds-secret.yaml", + nil, + map[string]interface{}{ + "Creds": marshalled, + }, + )(ctx, getByName) +} + +func init() { + installerassets.Rebuilders["files/opt/tectonic/tectonic/openstack/99_cloud-creds-secret.yaml"] = cloudConfigRebuilder +} diff --git a/pkg/installerassets/openstack/metadata.go b/pkg/installerassets/openstack/metadata.go new file mode 100644 index 00000000000..a8e9bdf28cb --- /dev/null +++ b/pkg/installerassets/openstack/metadata.go @@ -0,0 +1,51 @@ +package openstack + +import ( + "context" + "encoding/json" + + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" + "github.com/openshift/installer/pkg/types/openstack" +) + +func metadataRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "openstack/metadata.json", + RebuildHelper: metadataRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "openstack/cloud", + "openstack/region", + "cluster-id", + ) + if err != nil { + return nil, err + } + + cloud := string(parents["openstack/cloud"].Data) + region := string(parents["openstack/region"].Data) + clusterID := string(parents["cluster-id"].Data) + + metadata := &openstack.Metadata{ + Cloud: cloud, + Region: region, + Identifier: map[string]string{ + "tectonicClusterID": clusterID, + }, + } + + asset.Data, err = json.Marshal(metadata) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + installerassets.Rebuilders["openstack/metadata.json"] = metadataRebuilder +} diff --git a/pkg/installerassets/openstack/region.go b/pkg/installerassets/openstack/region.go new file mode 100644 index 00000000000..7f7a6467f14 --- /dev/null +++ b/pkg/installerassets/openstack/region.go @@ -0,0 +1,42 @@ +package openstack + +import ( + "context" + "os" + + "github.com/openshift/installer/pkg/installerassets" + "github.com/pkg/errors" + survey "gopkg.in/AlecAivazis/survey.v1" +) + +func getRegion(ctx context.Context) ([]byte, error) { + value := os.Getenv("OPENSHIFT_INSTALL_OPENSTACK_REGION") + if value != "" { + //FIXME(shardy) add some validation here + return []byte(value), nil + } + + question := &survey.Question{ + Prompt: &survey.Input{ + Message: "Region", + Help: "The OpenStack region to be used for installation.", + Default: "regionOne", + }, + Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { + //FIXME(shardy) add some validation here + return nil + }), + } + + var response string + err := survey.Ask([]*survey.Question{question}, &response) + if err != nil { + return nil, errors.Wrap(err, "ask") + } + + return []byte(response), nil +} + +func init() { + installerassets.Defaults["openstack/region"] = getRegion +} diff --git a/pkg/installerassets/platform.go b/pkg/installerassets/platform.go new file mode 100644 index 00000000000..56d312f59f6 --- /dev/null +++ b/pkg/installerassets/platform.go @@ -0,0 +1,49 @@ +package installerassets + +import ( + "context" + "os" + "sort" + + "github.com/openshift/installer/pkg/types" + "github.com/pkg/errors" + survey "gopkg.in/AlecAivazis/survey.v1" +) + +func getPlatform(ctx context.Context) (data []byte, err error) { + value := os.Getenv("OPENSHIFT_INSTALL_PLATFORM") + if value != "" { + i := sort.SearchStrings(types.PlatformNames, value) + if i == len(types.PlatformNames) || types.PlatformNames[i] != value { + return nil, errors.Errorf("invalid platform %q", value) + } + return []byte(value), nil + } + + question := &survey.Question{ + Prompt: &survey.Select{ + Message: "Platform", + Options: types.PlatformNames, + }, + Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { + choice := ans.(string) + i := sort.SearchStrings(types.PlatformNames, choice) + if i == len(types.PlatformNames) || types.PlatformNames[i] != choice { + return errors.Errorf("invalid platform %q", choice) + } + return nil + }), + } + + var response string + err = survey.Ask([]*survey.Question{question}, &response) + if err != nil { + return nil, errors.Wrap(err, "ask") + } + + return []byte(response), nil +} + +func init() { + Defaults["platform"] = getPlatform +} diff --git a/pkg/installerassets/pull.go b/pkg/installerassets/pull.go new file mode 100644 index 00000000000..8400fc7325a --- /dev/null +++ b/pkg/installerassets/pull.go @@ -0,0 +1,96 @@ +package installerassets + +import ( + "context" + "encoding/json" + "io/ioutil" + "os" + + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/validate" + "github.com/pkg/errors" + survey "gopkg.in/AlecAivazis/survey.v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func getPullSecret(ctx context.Context) (data []byte, err error) { + value := os.Getenv("OPENSHIFT_INSTALL_PULL_SECRET") + if value != "" { + err := validate.JSON([]byte(value)) + if err != nil { + return nil, err + } + return []byte(value), nil + } + + path := os.Getenv("OPENSHIFT_INSTALL_PULL_SECRET_PATH") + if path != "" { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + err = validate.JSON(data) + if err != nil { + return nil, err + } + return data, nil + } + + question := &survey.Question{ + Prompt: &survey.Input{ + Message: "Pull Secret", + Help: "The container registry pull secret for this cluster.", + }, + Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error { + return validate.JSON([]byte(ans.(string))) + }), + } + + var response string + err = survey.Ask([]*survey.Question{question}, &response) + if err != nil { + return nil, errors.Wrap(err, "ask") + } + + return []byte(response), nil +} + +func pullSecretRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "manifests/pull.json", + RebuildHelper: pullSecretRebuilder, + } + + parents, err := asset.GetParents(ctx, getByName, "pull-secret") + if err != nil { + return nil, err + } + + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "coreos-pull-secret", + Namespace: metav1.NamespaceSystem, + }, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: parents["pull-secret"].Data, + }, + Type: corev1.SecretTypeDockerConfigJson, + } + + asset.Data, err = json.Marshal(secret) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + Defaults["pull-secret"] = getPullSecret + Rebuilders["manifests/pull.json"] = pullSecretRebuilder +} diff --git a/pkg/installerassets/scripts.go b/pkg/installerassets/scripts.go new file mode 100644 index 00000000000..5c6344ff3b5 --- /dev/null +++ b/pkg/installerassets/scripts.go @@ -0,0 +1,17 @@ +package installerassets + +func init() { + Rebuilders["files/usr/local/bin/bootkube.sh"] = TemplateRebuilder( + "files/usr/local/bin/bootkube.sh", + map[string]string{ + "BaseDomain": "base-domain", + "BootkubeImage": "image/bootkube", + "ClusterName": "cluster-name", + "EtcdCertSignerImage": "image/etcd-cert-signer", + "EtcdCtlImage": "image/etcdctl", + "ReleaseImage": "image/release", + "MasterCount": "machines/master-count", + }, + nil, + ) +} diff --git a/pkg/asset/installconfig/ssh.go b/pkg/installerassets/ssh.go similarity index 66% rename from pkg/asset/installconfig/ssh.go rename to pkg/installerassets/ssh.go index d792e402a61..a19421a2fe7 100644 --- a/pkg/asset/installconfig/ssh.go +++ b/pkg/installerassets/ssh.go @@ -1,34 +1,18 @@ -package installconfig +package installerassets import ( + "context" "fmt" "io/ioutil" "os" "path/filepath" "sort" + "github.com/openshift/installer/pkg/validate" "github.com/pkg/errors" survey "gopkg.in/AlecAivazis/survey.v1" - - "github.com/openshift/installer/pkg/asset" - "github.com/openshift/installer/pkg/validate" ) -const ( - none = "" -) - -type sshPublicKey struct { - Key string -} - -var _ asset.Asset = (*sshPublicKey)(nil) - -// Dependencies returns no dependencies. -func (a *sshPublicKey) Dependencies() []asset.Asset { - return nil -} - func readSSHKey(path string) (string, error) { keyAsBytes, err := ioutil.ReadFile(path) if err != nil { @@ -45,23 +29,22 @@ func readSSHKey(path string) (string, error) { return key, nil } -// Generate generates the SSH public key asset. -func (a *sshPublicKey) Generate(asset.Parents) error { +func sshDefaulter(ctx context.Context) ([]byte, error) { if value, ok := os.LookupEnv("OPENSHIFT_INSTALL_SSH_PUB_KEY"); ok { if value != "" { if err := validate.SSHPublicKey(value); err != nil { - return errors.Wrap(err, "failed to validate public key") + return nil, errors.Wrap(err, "failed to validate public key") } } - a.Key = value - return nil + return []byte(value), nil } + none := "" pubKeys := map[string]string{} if path, ok := os.LookupEnv("OPENSHIFT_INSTALL_SSH_PUB_KEY_PATH"); ok { key, err := readSSHKey(path) if err != nil { - return errors.Wrap(err, "failed to read public key file") + return nil, errors.Wrap(err, "failed to read public key file") } pubKeys[path] = key } else { @@ -70,7 +53,7 @@ func (a *sshPublicKey) Generate(asset.Parents) error { if home != "" { paths, err := filepath.Glob(filepath.Join(home, ".ssh", "*.pub")) if err != nil { - return errors.Wrap(err, "failed to glob for public key files") + return nil, errors.Wrap(err, "failed to glob for public key files") } for _, path := range paths { key, err := readSSHKey(path) @@ -84,9 +67,8 @@ func (a *sshPublicKey) Generate(asset.Parents) error { if len(pubKeys) == 1 { for _, value := range pubKeys { - a.Key = value + return []byte(value), nil } - return nil } var paths []string @@ -109,14 +91,12 @@ func (a *sshPublicKey) Generate(asset.Parents) error { } return nil }); err != nil { - return errors.Wrap(err, "failed UserInput for SSH public key") + return nil, errors.Wrap(err, "failed UserInput for SSH public key") } - a.Key = pubKeys[path] - return nil + return []byte(pubKeys[path]), nil } -// Name returns the human-friendly name of the asset. -func (a sshPublicKey) Name() string { - return "SSH Key" +func init() { + Defaults["ssh.pub"] = sshDefaulter } diff --git a/pkg/installerassets/terraform.go b/pkg/installerassets/terraform.go new file mode 100644 index 00000000000..513e3ce4a50 --- /dev/null +++ b/pkg/installerassets/terraform.go @@ -0,0 +1,66 @@ +package installerassets + +import ( + "context" + "encoding/json" + "strconv" + + "github.com/openshift/installer/pkg/assets" + "github.com/pkg/errors" +) + +type terraformConfig struct { + ClusterID string `json:"tectonic_cluster_id,omitempty"` + Name string `json:"tectonic_cluster_name,omitempty"` + BaseDomain string `json:"tectonic_base_domain,omitempty"` + Masters int `json:"tectonic_master_count,omitempty"` + + IgnitionBootstrap string `json:"ignition_bootstrap,omitempty"` + IgnitionMaster string `json:"ignition_master,omitempty"` +} + +func terraformRebuilder(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: "terraform/terraform.tfvars", + RebuildHelper: terraformRebuilder, + } + + parents, err := asset.GetParents( + ctx, + getByName, + "base-domain", + "cluster-id", + "cluster-name", + "ignition/bootstrap.ign", + "ignition/master.ign", + "machines/master-count", + ) + if err != nil { + return nil, err + } + + masterCount, err := strconv.ParseUint(string(parents["machines/master-count"].Data), 10, 0) + if err != nil { + return nil, errors.Wrap(err, "parse master count") + } + + config := &terraformConfig{ + ClusterID: string(parents["cluster-id"].Data), + Name: string(parents["cluster-name"].Data), + BaseDomain: string(parents["base-domain"].Data), + Masters: int(masterCount), + IgnitionBootstrap: string(parents["ignition/bootstrap.ign"].Data), + IgnitionMaster: string(parents["ignition/master.ign"].Data), + } + + asset.Data, err = json.Marshal(config) + if err != nil { + return nil, err + } + + return asset, nil +} + +func init() { + Rebuilders["terraform/terraform.tfvars"] = terraformRebuilder +} diff --git a/pkg/installerassets/tls/admin.go b/pkg/installerassets/tls/admin.go new file mode 100644 index 00000000000..ddddf73002e --- /dev/null +++ b/pkg/installerassets/tls/admin.go @@ -0,0 +1,31 @@ +package tls + +import ( + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" + + "github.com/openshift/installer/pkg/installerassets" +) + +func init() { + installerassets.Defaults["tls/admin-client.key"] = privateKey + installerassets.Rebuilders["tls/admin-client.crt"] = certificateRebuilder( + "tls/admin-client.crt", + "tls/admin-client.key", + "tls/kube-ca.crt", + "tls/kube-ca.key", + &x509.Certificate{ + BasicConstraintsValid: true, + IsCA: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth /* FIXME: why? */, x509.ExtKeyUsageClientAuth}, + NotAfter: time.Now().Add(validityTenYears), + NotBefore: time.Now(), + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{CommonName: "system:admin", OrganizationalUnit: []string{"system:masters"}}, + }, + nil, + ) +} diff --git a/pkg/installerassets/tls/aggregatorca.go b/pkg/installerassets/tls/aggregatorca.go new file mode 100644 index 00000000000..c62fd2af1bc --- /dev/null +++ b/pkg/installerassets/tls/aggregatorca.go @@ -0,0 +1,30 @@ +package tls + +import ( + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" + + "github.com/openshift/installer/pkg/installerassets" +) + +func init() { + installerassets.Defaults["tls/aggregator-ca.key"] = privateKey + installerassets.Rebuilders["tls/aggregator-ca.crt"] = certificateRebuilder( + "tls/aggregator-ca.crt", + "tls/aggregator-ca.key", + "tls/root-ca.crt", + "tls/root-ca.key", + &x509.Certificate{ + BasicConstraintsValid: true, + IsCA: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + NotAfter: time.Now().Add(validityTenYears), + NotBefore: time.Now(), + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{CommonName: "aggregator", OrganizationalUnit: []string{"bootkube"}}, + }, + nil, + ) +} diff --git a/pkg/installerassets/tls/apiserver.go b/pkg/installerassets/tls/apiserver.go new file mode 100644 index 00000000000..f7ded810d5e --- /dev/null +++ b/pkg/installerassets/tls/apiserver.go @@ -0,0 +1,80 @@ +package tls + +import ( + "context" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "math/big" + "net" + "time" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" + "github.com/pkg/errors" +) + +func init() { + installerassets.Defaults["tls/api-server.key"] = privateKey + installerassets.Rebuilders["tls/api-server.crt"] = certificateRebuilder( + "tls/api-server.crt", + "tls/api-server.key", + "tls/kube-ca.crt", + "tls/kube-ca.key", + &x509.Certificate{ + BasicConstraintsValid: true, + IsCA: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + NotAfter: time.Now().Add(validityTenYears), + NotBefore: time.Now(), + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{CommonName: "system:kube-apiserver", OrganizationalUnit: []string{"kube-master"}}, + }, + func(ctx context.Context, asset *assets.Asset, getByName assets.GetByString, template *x509.Certificate) (err error) { + parents, err := asset.GetParents( + ctx, + getByName, + "base-domain", + "cluster-name", + "network/service-cidr", + ) + if err != nil { + return err + } + + ip, ipnet, err := net.ParseCIDR(string(parents["network/service-cidr"].Data)) + if err != nil { + return errors.Wrap(err, "parse service CIDR") + } + ipnet.IP = ip + + apiServerAddress, err := cidr.Host(ipnet, 1) + if err != nil { + return errors.Wrap(err, "calculate API-server address") + } + + template.DNSNames = []string{ + fmt.Sprintf("%s-api.%s", string(parents["cluster-name"].Data), string(parents["base-domain"].Data)), + "kubernetes", + "kubernetes.default", + "kubernetes.default.svc", + "kubernetes.default.svc.cluster.local", + "localhost", + } + template.IPAddresses = []net.IP{ + apiServerAddress, + net.ParseIP("127.0.0.1"), + } + + return nil + }, + ) + + installerassets.Rebuilders["tls/api-server-chain.crt"] = certificateChainRebuilder( + "tls/api-server-chain.crt", + "tls/api-server.crt", + "tls/kube-ca.crt", + ) +} diff --git a/pkg/installerassets/tls/apiserverproxy.go b/pkg/installerassets/tls/apiserverproxy.go new file mode 100644 index 00000000000..96a877428be --- /dev/null +++ b/pkg/installerassets/tls/apiserverproxy.go @@ -0,0 +1,30 @@ +package tls + +import ( + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" + + "github.com/openshift/installer/pkg/installerassets" +) + +func init() { + installerassets.Defaults["tls/api-server-proxy.key"] = privateKey + installerassets.Rebuilders["tls/api-server-proxy.crt"] = certificateRebuilder( + "tls/api-server-proxy.crt", + "tls/api-server-proxy.key", + "tls/kube-ca.crt", + "tls/kube-ca.key", + &x509.Certificate{ + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + NotAfter: time.Now().Add(validityTenYears), + NotBefore: time.Now(), + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{CommonName: "system:kube-apiserver-proxy", OrganizationalUnit: []string{"kube-master"}}, + }, + nil, + ) +} diff --git a/pkg/installerassets/tls/etcdca.go b/pkg/installerassets/tls/etcdca.go new file mode 100644 index 00000000000..2d079d12d65 --- /dev/null +++ b/pkg/installerassets/tls/etcdca.go @@ -0,0 +1,30 @@ +package tls + +import ( + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" + + "github.com/openshift/installer/pkg/installerassets" +) + +func init() { + installerassets.Defaults["tls/etcd-ca.key"] = privateKey + installerassets.Rebuilders["tls/etcd-ca.crt"] = certificateRebuilder( + "tls/etcd-ca.crt", + "tls/etcd-ca.key", + "tls/root-ca.crt", + "tls/root-ca.key", + &x509.Certificate{ + BasicConstraintsValid: true, + IsCA: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + NotAfter: time.Now().Add(validityTenYears), + NotBefore: time.Now(), + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{CommonName: "etcd", OrganizationalUnit: []string{"etcd"}}, + }, + nil, + ) +} diff --git a/pkg/installerassets/tls/etcdclient.go b/pkg/installerassets/tls/etcdclient.go new file mode 100644 index 00000000000..107c868dd4d --- /dev/null +++ b/pkg/installerassets/tls/etcdclient.go @@ -0,0 +1,30 @@ +package tls + +import ( + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" + + "github.com/openshift/installer/pkg/installerassets" +) + +func init() { + installerassets.Defaults["tls/etcd-client.key"] = privateKey + installerassets.Rebuilders["tls/etcd-client.crt"] = certificateRebuilder( + "tls/etcd-client.crt", + "tls/etcd-client.key", + "tls/etcd-ca.crt", + "tls/etcd-ca.key", + &x509.Certificate{ + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + NotAfter: time.Now().Add(validityTenYears), + NotBefore: time.Now(), + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{CommonName: "etcd", OrganizationalUnit: []string{"etcd"}}, + }, + nil, + ) +} diff --git a/pkg/installerassets/tls/kubeca.go b/pkg/installerassets/tls/kubeca.go new file mode 100644 index 00000000000..c840c289f1f --- /dev/null +++ b/pkg/installerassets/tls/kubeca.go @@ -0,0 +1,30 @@ +package tls + +import ( + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" + + "github.com/openshift/installer/pkg/installerassets" +) + +func init() { + installerassets.Defaults["tls/kube-ca.key"] = privateKey + installerassets.Rebuilders["tls/kube-ca.crt"] = certificateRebuilder( + "tls/kube-ca.crt", + "tls/kube-ca.key", + "tls/root-ca.crt", + "tls/root-ca.key", + &x509.Certificate{ + BasicConstraintsValid: true, + IsCA: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + NotAfter: time.Now().Add(validityTenYears), + NotBefore: time.Now(), + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{CommonName: "kube-ca", OrganizationalUnit: []string{"bootkube"}}, + }, + nil, + ) +} diff --git a/pkg/installerassets/tls/kubelet.go b/pkg/installerassets/tls/kubelet.go new file mode 100644 index 00000000000..364d07df76c --- /dev/null +++ b/pkg/installerassets/tls/kubelet.go @@ -0,0 +1,32 @@ +package tls + +import ( + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" + + "github.com/openshift/installer/pkg/installerassets" +) + +func init() { + installerassets.Defaults["tls/kubelet-client.key"] = privateKey + installerassets.Rebuilders["tls/kubelet-client.crt"] = certificateRebuilder( + "tls/kubelet-client.crt", + "tls/kubelet-client.key", + "tls/kube-ca.crt", + "tls/kube-ca.key", + &x509.Certificate{ + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + NotAfter: time.Now().Add(validityThirtyMinutes), + NotBefore: time.Now(), + SerialNumber: new(big.Int).SetInt64(0), + // system:masters is a hack to get the kubelet up without kube-core + // TODO(node): make kubelet bootstrapping secure with minimal permissions eventually switching to system:node:* CommonName + Subject: pkix.Name{CommonName: "system:serviceaccount:kube-system:default", Organization: []string{"system:serviceaccounts:kube-system", "system:masters"}}, + }, + nil, + ) +} diff --git a/pkg/installerassets/tls/machineconfigserver.go b/pkg/installerassets/tls/machineconfigserver.go new file mode 100644 index 00000000000..17a90525cba --- /dev/null +++ b/pkg/installerassets/tls/machineconfigserver.go @@ -0,0 +1,48 @@ +package tls + +import ( + "context" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "math/big" + "time" + + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" +) + +func init() { + installerassets.Defaults["tls/machine-config-server.key"] = privateKey + installerassets.Rebuilders["tls/machine-config-server.crt"] = certificateRebuilder( + "tls/machine-config-server.crt", + "tls/machine-config-server.key", + "tls/root-ca.crt", + "tls/root-ca.key", + &x509.Certificate{ + BasicConstraintsValid: true, + KeyUsage: x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + NotAfter: time.Now().Add(validityTenYears), + NotBefore: time.Now(), + SerialNumber: new(big.Int).SetInt64(0), + }, + func(ctx context.Context, asset *assets.Asset, getByName assets.GetByString, template *x509.Certificate) (err error) { + parents, err := asset.GetParents( + ctx, + getByName, + "base-domain", + "cluster-name", + ) + if err != nil { + return err + } + + hostname := fmt.Sprintf("%s-api.%s", string(parents["cluster-name"].Data), string(parents["base-domain"].Data)) + template.Subject = pkix.Name{CommonName: hostname} + template.DNSNames = []string{hostname} + + return nil + }, + ) +} diff --git a/pkg/installerassets/tls/root.go b/pkg/installerassets/tls/root.go new file mode 100644 index 00000000000..e75184afbcb --- /dev/null +++ b/pkg/installerassets/tls/root.go @@ -0,0 +1,64 @@ +package tls + +import ( + "context" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" + + "github.com/openshift/installer/pkg/assets" + "github.com/openshift/installer/pkg/installerassets" + "github.com/pkg/errors" +) + +func rootCARebuilder(ctx context.Context, getByName assets.GetByString) (asset *assets.Asset, err error) { + asset = &assets.Asset{ + Name: "tls/root-ca.crt", + RebuildHelper: rootCARebuilder, + } + + cert := &x509.Certificate{ + BasicConstraintsValid: true, + IsCA: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + NotAfter: time.Now().Add(validityTenYears), + NotBefore: time.Now(), + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{CommonName: "root-ca", OrganizationalUnit: []string{"openshift"}}, + } + + parents, err := asset.GetParents(ctx, getByName, "tls/root-ca.key") + if err != nil { + return nil, err + } + + key, err := PEMToPrivateKey(parents["tls/root-ca.key"].Data) + if err != nil { + return nil, err + } + pub := key.Public() + + cert.SubjectKeyId, err = generateSubjectKeyID(pub) + if err != nil { + return nil, errors.Wrap(err, "failed to set subject key identifier") + } + + der, err := x509.CreateCertificate(rand.Reader, cert, cert, key.Public(), key) + if err != nil { + return nil, errors.Wrap(err, "failed to create certificate") + } + + asset.Data = pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: der, + }) + return asset, nil +} + +func init() { + installerassets.Defaults["tls/root-ca.key"] = privateKey + installerassets.Rebuilders["tls/root-ca.crt"] = rootCARebuilder +} diff --git a/pkg/installerassets/tls/serviceaccount.go b/pkg/installerassets/tls/serviceaccount.go new file mode 100644 index 00000000000..6c2c78b55f3 --- /dev/null +++ b/pkg/installerassets/tls/serviceaccount.go @@ -0,0 +1,9 @@ +package tls + +import ( + "github.com/openshift/installer/pkg/installerassets" +) + +func init() { + installerassets.Defaults["tls/service-account.key"] = privateKey +} diff --git a/pkg/installerassets/tls/serviceservingca.go b/pkg/installerassets/tls/serviceservingca.go new file mode 100644 index 00000000000..e152bbadb13 --- /dev/null +++ b/pkg/installerassets/tls/serviceservingca.go @@ -0,0 +1,30 @@ +package tls + +import ( + "crypto/x509" + "crypto/x509/pkix" + "math/big" + "time" + + "github.com/openshift/installer/pkg/installerassets" +) + +func init() { + installerassets.Defaults["tls/service-serving-ca.key"] = privateKey + installerassets.Rebuilders["tls/service-serving-ca.crt"] = certificateRebuilder( + "tls/service-serving-ca.crt", + "tls/service-serving-ca.key", + "tls/root-ca.crt", + "tls/root-ca.key", + &x509.Certificate{ + BasicConstraintsValid: true, + IsCA: true, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + NotAfter: time.Now().Add(validityTenYears), + NotBefore: time.Now(), + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{CommonName: "service-serving", OrganizationalUnit: []string{"bootkube"}}, + }, + nil, + ) +} diff --git a/pkg/installerassets/tls/tls.go b/pkg/installerassets/tls/tls.go new file mode 100644 index 00000000000..0a25aaf01dc --- /dev/null +++ b/pkg/installerassets/tls/tls.go @@ -0,0 +1,179 @@ +// Package tls installs installerassets.Rebuilders for TLS assets. +package tls + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/asn1" + "encoding/pem" + "math/big" + "time" + + "github.com/openshift/installer/pkg/assets" + "github.com/pkg/errors" +) + +const ( + keySize = 2048 + validityTenYears = time.Hour * 24 * 365 * 10 + validityThirtyMinutes = time.Minute * 30 +) + +type templateAdjuster func(ctx context.Context, asset *assets.Asset, getByName assets.GetByString, template *x509.Certificate) (err error) + +// rsaPublicKey reflects the ASN.1 structure of a PKCS#1 public key. +type rsaPublicKey struct { + N *big.Int + E int +} + +func privateKey(ctx context.Context) (data []byte, err error) { + key, err := rsa.GenerateKey(rand.Reader, keySize) + if err != nil { + return nil, err + } + + keyInBytes := x509.MarshalPKCS1PrivateKey(key) + keyinPem := pem.EncodeToMemory( + &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: keyInBytes, + }, + ) + return keyinPem, nil +} + +// PEMToPrivateKey converts PEM data to a rsa.PrivateKey. +func PEMToPrivateKey(data []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(data) + if block == nil { + return nil, errors.Errorf("could not find a PEM block in the private key") + } + return x509.ParsePKCS1PrivateKey(block.Bytes) +} + +// PublicKeyToPEM converts an rsa.PublicKey object to PEM. +func PublicKeyToPEM(key *rsa.PublicKey) ([]byte, error) { + keyInBytes, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return nil, errors.Wrap(err, "failed to MarshalPKIXPublicKey") + } + keyinPem := pem.EncodeToMemory( + &pem.Block{ + Type: "RSA PUBLIC KEY", + Bytes: keyInBytes, + }, + ) + return keyinPem, nil +} + +func pemToCertificate(data []byte) (*x509.Certificate, error) { + block, _ := pem.Decode(data) + if block == nil { + return nil, errors.Errorf("could not find a PEM block in the certificate") + } + return x509.ParseCertificate(block.Bytes) +} + +// generateSubjectKeyID generates a SHA-1 hash of the subject public key. +func generateSubjectKeyID(pub crypto.PublicKey) ([]byte, error) { + var publicKeyBytes []byte + var err error + + switch pub := pub.(type) { + case *rsa.PublicKey: + publicKeyBytes, err = asn1.Marshal(rsaPublicKey{N: pub.N, E: pub.E}) + if err != nil { + return nil, errors.Wrap(err, "failed to Marshal ans1 public key") + } + case *ecdsa.PublicKey: + publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + default: + return nil, errors.New("only RSA and ECDSA public keys supported") + } + + hash := sha1.Sum(publicKeyBytes) + return hash[:], nil +} + +func certificateRebuilder(name string, key string, caCert string, caKey string, template *x509.Certificate, templateAdjust templateAdjuster) assets.Rebuild { + return func(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: name, + RebuildHelper: certificateRebuilder(name, key, caKey, caCert, template, templateAdjust), + } + + parents, err := asset.GetParents(ctx, getByName, key, caCert, caKey) + if err != nil { + return nil, err + } + + keys := make(map[string]*rsa.PrivateKey) + for _, keyName := range []string{key, caKey} { + keyPEM, err := PEMToPrivateKey(parents[keyName].Data) + if err != nil { + return nil, err + } + + keys[keyName] = keyPEM + } + + template.SubjectKeyId, err = generateSubjectKeyID(keys[key].Public()) + if err != nil { + return nil, errors.Wrap(err, "failed to set subject key identifier") + } + + cert, err := pemToCertificate(parents[caCert].Data) + if err != nil { + return nil, err + } + + if templateAdjust != nil { + err = templateAdjust(ctx, asset, getByName, template) + if err != nil { + return nil, err + } + } + + der, err := x509.CreateCertificate(rand.Reader, template, cert, keys[key].Public(), keys[caKey]) + if err != nil { + return nil, errors.Wrap(err, "failed to create certificate") + } + + asset.Data = pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: der, + }) + return asset, nil + } +} + +func certificateChainRebuilder(name string, parents ...string) assets.Rebuild { + return func(ctx context.Context, getByName assets.GetByString) (*assets.Asset, error) { + asset := &assets.Asset{ + Name: name, + RebuildHelper: certificateChainRebuilder(name, parents...), + } + + parentAssets, err := asset.GetParents(ctx, getByName, parents...) + if err != nil { + return nil, err + } + + data := make([][]byte, 0, len(parents)) + for _, parentName := range parents { + data = append(data, parentAssets[parentName].Data) + } + + asset.Data = bytes.Join(data, []byte("\n")) + + return asset, nil + } +} diff --git a/pkg/installerassets/vfs.go b/pkg/installerassets/vfs.go new file mode 100644 index 00000000000..5412c46745a --- /dev/null +++ b/pkg/installerassets/vfs.go @@ -0,0 +1,64 @@ +package installerassets + +import ( + "context" + "io/ioutil" + "path" + + "github.com/openshift/installer/data" +) + +func addAssetDefaults(defaults map[string]Defaulter, base string, rel string) error { + uri := path.Join(base, rel) + file, err := data.Assets.Open(uri) + if err != nil { + return err + } + defer file.Close() + + info, err := file.Stat() + if err != nil { + return err + } + + if info.IsDir() { + children, err := file.Readdir(0) + if err != nil { + return err + } + file.Close() + + for _, childInfo := range children { + name := childInfo.Name() + err = addAssetDefaults(defaults, base, path.Join(rel, name)) + if err != nil { + return err + } + } + + return nil + } + + if path.Base(rel) == "OWNERS" { + return nil + } + + defaults[rel] = func(ctx context.Context) ([]byte, error) { + file, err := data.Assets.Open(uri) + if err != nil { + return nil, err + } + defer file.Close() + + return ioutil.ReadAll(file) + } + + return nil +} + +func init() { + err := addAssetDefaults(Defaults, "bootstrap", "") + if err != nil { + panic(err) + } +} diff --git a/pkg/terraform/executor.go b/pkg/terraform/executor.go index 0d8779e4c6f..61b62d04bd0 100644 --- a/pkg/terraform/executor.go +++ b/pkg/terraform/executor.go @@ -64,7 +64,7 @@ func (ex *executor) execute(clusterDir string, args ...string) error { logrus.Debugf("Running %#v...", cmd) - if logrus.GetLevel() == logrus.DebugLevel { + if true || logrus.GetLevel() == logrus.DebugLevel { cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/pkg/terraform/terraform.go b/pkg/terraform/terraform.go index 69d0ed797ff..341fa506693 100644 --- a/pkg/terraform/terraform.go +++ b/pkg/terraform/terraform.go @@ -2,6 +2,7 @@ package terraform import ( "fmt" + "path" "path/filepath" "github.com/openshift/installer/data" @@ -72,12 +73,12 @@ func Destroy(dir string, platform string, extraArgs ...string) (err error) { // unpack unpacks the platform-specific Terraform modules into the // given directory. func unpack(dir string, platform string) (err error) { - err = data.Unpack(dir, platform) + err = data.Unpack(dir, path.Join("terraform", platform)) if err != nil { return err } - err = data.Unpack(filepath.Join(dir, "config.tf"), "config.tf") + err = data.Unpack(filepath.Join(dir, "config.tf"), path.Join("terraform", "config.tf")) if err != nil { return err } diff --git a/pkg/tfvars/aws/aws.go b/pkg/tfvars/aws/aws.go deleted file mode 100644 index 70af9d8118c..00000000000 --- a/pkg/tfvars/aws/aws.go +++ /dev/null @@ -1,56 +0,0 @@ -package aws - -// Endpoints is the type of the AWS endpoints. -type Endpoints string - -const ( - // EndpointsAll represents the configuration for using both private and public endpoints. - EndpointsAll Endpoints = "all" - // EndpointsPrivate represents the configuration for using only private endpoints. - EndpointsPrivate Endpoints = "private" - // EndpointsPublic represents the configuration for using only public endpoints. - EndpointsPublic Endpoints = "public" -) - -// AWS converts AWS related config. -type AWS struct { - EC2AMIOverride string `json:"tectonic_aws_ec2_ami_override,omitempty"` - Endpoints Endpoints `json:"tectonic_aws_endpoints,omitempty"` - External `json:",inline"` - ExtraTags map[string]string `json:"tectonic_aws_extra_tags,omitempty"` - InstallerRole string `json:"tectonic_aws_installer_role,omitempty"` - Master `json:",inline"` - Region string `json:"tectonic_aws_region,omitempty"` - VPCCIDRBlock string `json:"tectonic_aws_vpc_cidr_block,omitempty"` - Worker `json:",inline"` -} - -// External converts external related config. -type External struct { - MasterSubnetIDs []string `json:"tectonic_aws_external_master_subnet_ids,omitempty"` - PrivateZone string `json:"tectonic_aws_external_private_zone,omitempty"` - VPCID string `json:"tectonic_aws_external_vpc_id,omitempty"` - WorkerSubnetIDs []string `json:"tectonic_aws_external_worker_subnet_ids,omitempty"` -} - -// Master converts master related config. -type Master struct { - CustomSubnets map[string]string `json:"tectonic_aws_master_custom_subnets,omitempty"` - EC2Type string `json:"tectonic_aws_master_ec2_type,omitempty"` - ExtraSGIDs []string `json:"tectonic_aws_master_extra_sg_ids,omitempty"` - IAMRoleName string `json:"tectonic_aws_master_iam_role_name,omitempty"` - MasterRootVolume `json:",inline"` -} - -// MasterRootVolume converts master rool volume related config. -type MasterRootVolume struct { - IOPS int `json:"tectonic_aws_master_root_volume_iops,omitempty"` - Size int `json:"tectonic_aws_master_root_volume_size,omitempty"` - Type string `json:"tectonic_aws_master_root_volume_type,omitempty"` -} - -// Worker converts worker related config. -type Worker struct { - CustomSubnets map[string]string `json:"tectonic_aws_worker_custom_subnets,omitempty"` - IAMRoleName string `json:"tectonic_aws_worker_iam_role_name,omitempty"` -} diff --git a/pkg/tfvars/libvirt/libvirt.go b/pkg/tfvars/libvirt/libvirt.go deleted file mode 100644 index 1a761e0c7a8..00000000000 --- a/pkg/tfvars/libvirt/libvirt.go +++ /dev/null @@ -1,66 +0,0 @@ -package libvirt - -import ( - "fmt" - "net" - - "github.com/apparentlymart/go-cidr/cidr" -) - -// Libvirt encompasses configuration specific to libvirt. -type Libvirt struct { - URI string `json:"tectonic_libvirt_uri,omitempty"` - Image string `json:"tectonic_os_image,omitempty"` - Network `json:",inline"` - MasterIPs []string `json:"tectonic_libvirt_master_ips,omitempty"` - BootstrapIP string `json:"tectonic_libvirt_bootstrap_ip,omitempty"` -} - -// Network describes a libvirt network configuration. -type Network struct { - IfName string `json:"tectonic_libvirt_network_if,omitempty"` - IPRange string `json:"tectonic_libvirt_ip_range,omitempty"` -} - -// TFVars fills in computed Terraform variables. -func (l *Libvirt) TFVars(masterCount int) error { - _, network, err := net.ParseCIDR(l.Network.IPRange) - if err != nil { - return fmt.Errorf("failed to parse libvirt network ipRange: %v", err) - } - - if l.BootstrapIP == "" { - ip, err := cidr.Host(network, 10) - if err != nil { - return fmt.Errorf("failed to generate bootstrap IP: %v", err) - } - l.BootstrapIP = ip.String() - } - - if len(l.MasterIPs) > 0 { - if len(l.MasterIPs) != masterCount { - return fmt.Errorf("length of MasterIPs doesn't match master count") - } - } else { - if ips, err := generateIPs("master", network, masterCount, 11); err == nil { - l.MasterIPs = ips - } else { - return err - } - } - - return nil -} - -func generateIPs(name string, network *net.IPNet, count int, offset int) ([]string, error) { - var ips []string - for i := 0; i < count; i++ { - ip, err := cidr.Host(network, offset+i) - if err != nil { - return nil, fmt.Errorf("failed to generate %s IPs: %v", name, err) - } - ips = append(ips, ip.String()) - } - - return ips, nil -} diff --git a/pkg/tfvars/openstack/OWNERS b/pkg/tfvars/openstack/OWNERS deleted file mode 100644 index ea6fcb46def..00000000000 --- a/pkg/tfvars/openstack/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md -# This file just uses aliases defined in OWNERS_ALIASES. - -approvers: - - openstack-approvers diff --git a/pkg/tfvars/openstack/openstack.go b/pkg/tfvars/openstack/openstack.go deleted file mode 100644 index c6f9a6b1e44..00000000000 --- a/pkg/tfvars/openstack/openstack.go +++ /dev/null @@ -1,49 +0,0 @@ -package openstack - -// OpenStack converts OpenStack related config. -type OpenStack struct { - BaseImage string `json:"tectonic_openstack_base_image,omitempty"` - Credentials `json:",inline"` - External `json:",inline"` - ExternalNetwork string `json:"tectonic_openstack_external_network,omitempty"` - ExtraTags map[string]string `json:"tectonic_openstack_extra_tags,omitempty"` - Master `json:",inline"` - Region string `json:"tectonic_openstack_region,omitempty"` - NetworkCIDRBlock string `json:"tectonic_openstack_network_cidr_block,omitempty"` -} - -// External converts external related config. -type External struct { - MasterSubnetIDs []string `json:"tectonic_openstack_external_master_subnet_ids,omitempty"` -} - -// Master converts master related config. -type Master struct { - FlavorName string `json:"tectonic_openstack_master_flavor_name,omitempty"` - ExtraSGIDs []string `json:"tectonic_openstack_master_extra_sg_ids,omitempty"` -} - -// Credentials converts credentials related config. -type Credentials struct { - AuthURL string `json:"tectonic_openstack_credentials_auth_url,omitempty"` - Cert string `json:"tectonic_openstack_credentials_cert,omitempty"` - Cloud string `json:"tectonic_openstack_credentials_cloud,omitempty"` - DomainID string `json:"tectonic_openstack_credentials_domain_id,omitempty"` - DomainName string `json:"tectonic_openstack_credentials_domain_name,omitempty"` - EndpointType string `json:"tectonic_openstack_credentials_endpoint_type,omitempty"` - Insecure bool `json:"tectonic_openstack_credentials_insecure,omitempty"` - Key string `json:"tectonic_openstack_credentials_key,omitempty"` - Password string `json:"tectonic_openstack_credentials_password,omitempty"` - ProjectDomainID string `json:"tectonic_openstack_credentials_project_domain_id,omitempty"` - ProjectDomainName string `json:"tectonic_openstack_credentials_project_domain_name,omitempty"` - Region string `json:"tectonic_openstack_credentials_region,omitempty"` - Swauth bool `json:"tectonic_openstack_credentials_swauth,omitempty"` - TenantID string `json:"tectonic_openstack_credentials_tenant_id,omitempty"` - TenantName string `json:"tectonic_openstack_credentials_tenant_name,omitempty"` - Token string `json:"tectonic_openstack_credentials_token,omitempty"` - UseOctavia bool `json:"tectonic_openstack_credentials_use_octavia,omitempty"` - UserDomainID string `json:"tectonic_openstack_credentials_user_domain_id,omitempty"` - UserDomainName string `json:"tectonic_openstack_credentials_user_domain_name,omitempty"` - UserID string `json:"tectonic_openstack_credentials_user_id,omitempty"` - UserName string `json:"tectonic_openstack_credentials_user_name,omitempty"` -} diff --git a/pkg/tfvars/tfvars.go b/pkg/tfvars/tfvars.go deleted file mode 100644 index d73f68be4a8..00000000000 --- a/pkg/tfvars/tfvars.go +++ /dev/null @@ -1,125 +0,0 @@ -// Package tfvars converts an InstallConfig to Terraform variables. -package tfvars - -import ( - "context" - "encoding/json" - "time" - - "github.com/openshift/installer/pkg/rhcos" - "github.com/openshift/installer/pkg/tfvars/aws" - "github.com/openshift/installer/pkg/tfvars/libvirt" - "github.com/openshift/installer/pkg/tfvars/openstack" - "github.com/openshift/installer/pkg/types" - "github.com/pkg/errors" -) - -type config struct { - ClusterID string `json:"tectonic_cluster_id,omitempty"` - Name string `json:"tectonic_cluster_name,omitempty"` - BaseDomain string `json:"tectonic_base_domain,omitempty"` - Masters int `json:"tectonic_master_count,omitempty"` - - IgnitionBootstrap string `json:"ignition_bootstrap,omitempty"` - IgnitionMaster string `json:"ignition_master,omitempty"` - - aws.AWS `json:",inline"` - libvirt.Libvirt `json:",inline"` - openstack.OpenStack `json:",inline"` -} - -// TFVars converts the InstallConfig and Ignition content to -// terraform.tfvar JSON. -func TFVars(cfg *types.InstallConfig, bootstrapIgn, masterIgn string) ([]byte, error) { - config := &config{ - ClusterID: cfg.ClusterID, - Name: cfg.ObjectMeta.Name, - BaseDomain: cfg.BaseDomain, - - IgnitionMaster: masterIgn, - IgnitionBootstrap: bootstrapIgn, - } - - for _, m := range cfg.Machines { - switch m.Name { - case "master": - var replicas int - if m.Replicas == nil { - replicas = 1 - } else { - replicas = int(*m.Replicas) - } - - config.Masters += replicas - if m.Platform.AWS != nil { - config.AWS.Master = aws.Master{ - EC2Type: m.Platform.AWS.InstanceType, - IAMRoleName: m.Platform.AWS.IAMRoleName, - MasterRootVolume: aws.MasterRootVolume{ - IOPS: m.Platform.AWS.EC2RootVolume.IOPS, - Size: m.Platform.AWS.EC2RootVolume.Size, - Type: m.Platform.AWS.EC2RootVolume.Type, - }, - } - } - case "worker": - if m.Platform.AWS != nil { - config.AWS.Worker = aws.Worker{ - IAMRoleName: m.Platform.AWS.IAMRoleName, - } - } - default: - return nil, errors.Errorf("unrecognized machine pool %q", m.Name) - } - } - - if cfg.Platform.AWS != nil { - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - defer cancel() - ami, err := rhcos.AMI(ctx, rhcos.DefaultChannel, cfg.Platform.AWS.Region) - if err != nil { - return nil, errors.Wrap(err, "failed to determine default AMI") - } - - config.AWS = aws.AWS{ - Endpoints: aws.EndpointsAll, // Default value for endpoints. - Region: cfg.Platform.AWS.Region, - ExtraTags: cfg.Platform.AWS.UserTags, - External: aws.External{ - VPCID: cfg.Platform.AWS.VPCID, - }, - VPCCIDRBlock: cfg.Platform.AWS.VPCCIDRBlock, - EC2AMIOverride: ami, - } - } else if cfg.Platform.Libvirt != nil { - masterIPs := make([]string, len(cfg.Platform.Libvirt.MasterIPs)) - for i, ip := range cfg.Platform.Libvirt.MasterIPs { - masterIPs[i] = ip.String() - } - config.Libvirt = libvirt.Libvirt{ - URI: cfg.Platform.Libvirt.URI, - Network: libvirt.Network{ - IfName: cfg.Platform.Libvirt.Network.IfName, - IPRange: cfg.Platform.Libvirt.Network.IPRange, - }, - Image: cfg.Platform.Libvirt.DefaultMachinePlatform.Image, - MasterIPs: masterIPs, - } - if err := config.Libvirt.TFVars(config.Masters); err != nil { - return nil, errors.Wrap(err, "failed to insert libvirt variables") - } - if err := config.Libvirt.UseCachedImage(); err != nil { - return nil, errors.Wrap(err, "failed to use cached libvirt image") - } - } else if cfg.Platform.OpenStack != nil { - config.OpenStack = openstack.OpenStack{ - Region: cfg.Platform.OpenStack.Region, - NetworkCIDRBlock: cfg.Platform.OpenStack.NetworkCIDRBlock, - BaseImage: cfg.Platform.OpenStack.BaseImage, - } - config.OpenStack.Credentials.Cloud = cfg.Platform.OpenStack.Cloud - config.OpenStack.ExternalNetwork = cfg.Platform.OpenStack.ExternalNetwork - } - - return json.MarshalIndent(config, "", " ") -} diff --git a/pkg/types/clustermetadata.go b/pkg/types/clustermetadata.go index d7482334fd5..06e06808a9c 100644 --- a/pkg/types/clustermetadata.go +++ b/pkg/types/clustermetadata.go @@ -1,6 +1,10 @@ package types import ( + "encoding/json" + "io/ioutil" + "path/filepath" + "github.com/openshift/installer/pkg/types/aws" "github.com/openshift/installer/pkg/types/libvirt" "github.com/openshift/installer/pkg/types/openstack" @@ -38,3 +42,17 @@ func (cpm *ClusterPlatformMetadata) Platform() string { } return "" } + +// LoadClusterMetadata loads the cluster metadata from an asset directory. +func LoadClusterMetadata(dir string) (cmetadata *ClusterMetadata, err error) { + raw, err := ioutil.ReadFile(filepath.Join(dir, "metadata.json")) + if err != nil { + return nil, err + } + + if err = json.Unmarshal(raw, &cmetadata); err != nil { + return nil, err + } + + return cmetadata, err +} diff --git a/vendor/github.com/ajeddeloh/go-json/decode.go b/vendor/github.com/ajeddeloh/go-json/decode.go deleted file mode 100644 index 1dc2fdf0d49..00000000000 --- a/vendor/github.com/ajeddeloh/go-json/decode.go +++ /dev/null @@ -1,1226 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice to nil -// and then appends each element to the slice. -// -// To unmarshal a JSON object into a map, Unmarshal replaces the map -// with an empty map and then adds key-value pairs from the object to -// the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshalling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -type Node struct { - Start int - End int - KeyStart int // Only value if a member of a struct - KeyEnd int - Value interface{} -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - useNumber bool -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := int(d.data[d.off]) - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - if v.Type() == reflect.TypeOf(Node{}) { - // Decoding to Node? Switch to that code - v.Set(reflect.ValueOf(d.arrayNode())) - return - } - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } else if v.Type() == reflect.TypeOf(Node{}) { - // Decoding to Node? Switch to that code - v.Set(reflect.ValueOf(d.objectNode())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquoteBytes(item) - if !ok { - d.error(errPhase) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, key) { - f = ff - break - } - if f == nil && ff.equalFold(ff.nameBytes, key) { - f = ff - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64 or a Number -// depending on the setting of d.useNumber. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - if d.useNumber { - return Number(s), nil - } - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.Set(reflect.ValueOf(b[0:n])) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// valueNode is like valueInterface but returns a wrapped version that -// contains metadata about where it decoded from -func (d *decodeState) valueNode() Node { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayNode() - case scanBeginObject: - return d.objectNode() - case scanBeginLiteral: - return d.literalNode() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// arrayNode is like arrayInterface but returns Node. -func (d *decodeState) arrayNode() Node { - var v = make([]Node, 0) - node := Node{ - Start: d.off, - Value: v, - } - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueNode()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - node.Value = v - node.End = d.off - 1 - return node -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// objectNode is like object but returns Node. -func (d *decodeState) objectNode() Node { - m := make(map[string]Node) - node := Node{ - Start: d.off, - } - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - keyEnd := d.off - 1 - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - val := d.valueNode() - val.KeyStart = start - val.KeyEnd = keyEnd - m[key] = val - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - node.Value = m - node.End = d.off - 1 - return node -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -func (d *decodeState) literalNode() Node { - start := d.off - 1 - // Can just use the interface version since this has no children - node := Node{ - Start: start, - Value: d.literalInterface(), - } - node.End = d.off - 1 - return node -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/github.com/ajeddeloh/go-json/encode.go b/vendor/github.com/ajeddeloh/go-json/encode.go deleted file mode 100644 index 90782deb70b..00000000000 --- a/vendor/github.com/ajeddeloh/go-json/encode.go +++ /dev/null @@ -1,1194 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - e := &encodeState{} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML