Skip to content
This repository has been archived by the owner on Jun 29, 2022. It is now read-only.

(WIP) Refactoring loading of configuration, cluster operations management. #359

Closed
wants to merge 43 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
43 commits
Select commit Hold shift + click to select a range
415eb0a
Renaming RootConfig to ClusterConfig as part of refactoring
ipochi Mar 23, 2020
639640d
Rename Config to HCLConfig
ipochi Apr 24, 2020
638b3a3
Refactor LoadConfig into load and parse functions
ipochi Apr 24, 2020
c184dec
Remove usage of anonymous struct in Packet.
ipochi Apr 24, 2020
c8f1603
Remove usage of anonymous struct in AWS.
ipochi Apr 24, 2020
581807c
Remove usage of anonymous struct in Baremetal.
ipochi Apr 24, 2020
f8d7a12
Move verify.go to lokomotive package.
ipochi Apr 24, 2020
15e4814
Move install.go to terraform package.
ipochi Apr 24, 2020
c3cf386
Make unnecessary exported functions as unexported.
ipochi Apr 24, 2020
82e1a55
Move template.go to util package
ipochi Apr 16, 2020
70c9f2f
Add Render and Validate to Platform interface.
ipochi Apr 24, 2020
faaf2d8
LokomotiveConfig struct for managing configuration.
ipochi Apr 24, 2020
d0652dc
Centralize loading of HCL files in one place.
ipochi Apr 24, 2020
1716379
Add InitializeExecutor for terraform.Executor
ipochi Apr 24, 2020
e88b944
Add Manager interface for Lokomotive operations.
ipochi Apr 24, 2020
cf17899
Add implmentation of Manager interface.
ipochi Apr 24, 2020
6b2c9cc
Add method to create terraform cluster file.
ipochi Apr 25, 2020
1c0aa96
Refactor GetComponentBody
ipochi Apr 16, 2020
b51b5b5
Move utility functions to lokomotive package.
ipochi Apr 25, 2020
fe5abb9
Update lokomotive methods.
ipochi Apr 25, 2020
2e73fea
Remove not needed functions from cmd/cluster.go
ipochi Apr 25, 2020
11049a7
Update initialize method to use new methods.
ipochi Apr 25, 2020
f2b5e3c
Update cluster-apply to use new changes.
ipochi Apr 25, 2020
4c2fea2
Update cluster-destroy to use new changes.
ipochi Apr 25, 2020
aa48e04
Update component-apply to use new changes.
ipochi Apr 25, 2020
4ead062
Update component-render-manifest to use new changes.
ipochi Apr 25, 2020
4ec3146
Update health to use new changes.
ipochi Apr 25, 2020
bbf785d
Change const to reflect consistency
ipochi Apr 16, 2020
064f9d0
Update component-delete to use new changes.
ipochi Apr 26, 2020
5ddfb39
Rename ContextLogger to Logger.
ipochi Apr 26, 2020
a397c04
Move verifyCluster to utils.go
ipochi Apr 26, 2020
0020b44
Delete not needed utils.go
ipochi Apr 26, 2020
9e4687c
Update Platform interface to remove LoadConfig
ipochi Apr 26, 2020
5e38dce
Update Platform.Render method
ipochi Apr 26, 2020
de469e4
Remove Initialize method from Platform interface.
ipochi Apr 26, 2020
e19d449
Add validation of LokomotiveConfig
ipochi Apr 26, 2020
3d14689
Make methods unexported if they are not needed
ipochi Apr 26, 2020
09ba186
Add validation to all platforms.
ipochi Apr 26, 2020
b01bbac
Add unit tests for loading HCL configurations.
ipochi Apr 27, 2020
2377691
Add unit tests for HCLLoader.
ipochi Apr 27, 2020
9dff2ce
Add Unit tests for Platform.
ipochi Apr 27, 2020
e8a0019
Disabling the 'gomnd' linter.
ipochi Apr 27, 2020
b13a203
Resolve linter errors.
ipochi Apr 27, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ test: run-unit-tests
lint: build-slim build-test
# Note: Make sure that you run `git config diff.noprefix false` in this repo
# See this issue for more details: https://github.com/golangci/golangci-lint/issues/948
golangci-lint run --enable-all --disable=godox,gochecknoglobals --max-same-issues=0 --max-issues-per-linter=0 --build-tags $(ALL_BUILD_TAGS) --new-from-rev=$$(git merge-base $$(cat .git/resource/base_sha 2>/dev/null || echo "origin/master") HEAD) --modules-download-mode=$(MOD) --timeout=5m --exclude-use-default=false ./...
golangci-lint run --enable-all --disable=gomnd,godox,gochecknoglobals --max-same-issues=0 --max-issues-per-linter=0 --build-tags $(ALL_BUILD_TAGS) --new-from-rev=$$(git merge-base $$(cat .git/resource/base_sha 2>/dev/null || echo "master") HEAD) --modules-download-mode=$(MOD) --timeout=5m --exclude-use-default=false ./...

.PHONY: lint-docker
lint-docker:
Expand Down
90 changes: 3 additions & 87 deletions cli/cmd/cluster-apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,19 +11,11 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package cmd

import (
"fmt"

"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"

"github.com/kinvolk/lokomotive/pkg/install"
"github.com/kinvolk/lokomotive/pkg/k8sutil"
"github.com/kinvolk/lokomotive/pkg/lokomotive"
)

var (
Expand Down Expand Up @@ -57,83 +49,7 @@ func runClusterApply(cmd *cobra.Command, args []string) {
"args": args,
})

ex, p, lokoConfig, assetDir := initialize(ctxLogger)

exists := clusterExists(ctxLogger, ex)
if exists && !confirm {
// TODO: We could plan to a file and use it when installing.
if err := ex.Plan(); err != nil {
ctxLogger.Fatalf("Failed to reconsile cluster state: %v", err)
}

if !askForConfirmation("Do you want to proceed with cluster apply?") {
ctxLogger.Println("Cluster apply cancelled")

return
}
}

if err := p.Apply(ex); err != nil {
ctxLogger.Fatalf("error applying cluster: %v", err)
}

fmt.Printf("\nYour configurations are stored in %s\n", assetDir)

kubeconfigPath := assetsKubeconfig(assetDir)
if err := verifyCluster(kubeconfigPath, p.GetExpectedNodes()); err != nil {
ctxLogger.Fatalf("Verify cluster: %v", err)
}

// Do controlplane upgrades only if cluster already exists.
if exists {
fmt.Printf("\nEnsuring that cluster controlplane is up to date.\n")

cu := controlplaneUpdater{
kubeconfigPath: kubeconfigPath,
assetDir: assetDir,
ctxLogger: *ctxLogger,
ex: *ex,
}

releases := []string{"pod-checkpointer", "kube-apiserver", "kubernetes", "calico"}

if upgradeKubelets {
releases = append(releases, "kubelet")
}

for _, c := range releases {
cu.upgradeComponent(c)
}
}

if skipComponents {
return
}

componentsToApply := []string{}
for _, component := range lokoConfig.RootConfig.Components {
componentsToApply = append(componentsToApply, component.Name)
}

ctxLogger.Println("Applying component configuration")

if len(componentsToApply) > 0 {
if err := applyComponents(lokoConfig, kubeconfigPath, componentsToApply...); err != nil {
ctxLogger.Fatalf("Applying component configuration failed: %v", err)
}
}
}

func verifyCluster(kubeconfigPath string, expectedNodes int) error {
client, err := k8sutil.NewClientset(kubeconfigPath)
if err != nil {
return errors.Wrapf(err, "failed to set up clientset")
}

cluster, err := lokomotive.NewCluster(client, expectedNodes)
if err != nil {
return errors.Wrapf(err, "failed to set up cluster client")
}

return install.Verify(cluster)
l, options := initialize(ctxLogger)
// Apply the user configuration to create Lokomotive cluster
l.Apply(options)
}
22 changes: 2 additions & 20 deletions cli/cmd/cluster-destroy.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package cmd

import (
Expand Down Expand Up @@ -40,25 +39,8 @@ func runClusterDestroy(cmd *cobra.Command, args []string) {
"args": args,
})

ex, p, _, _ := initialize(ctxLogger)

if !clusterExists(ctxLogger, ex) {
ctxLogger.Println("Cluster already destroyed, nothing to do")

return
}

if !confirm {
confirmation := askForConfirmation("WARNING: This action cannot be undone. Do you really want to destroy the cluster?")
if !confirmation {
ctxLogger.Println("Cluster destroy canceled")
return
}
}

if err := p.Destroy(ex); err != nil {
ctxLogger.Fatalf("error destroying cluster: %v", err)
}
l, options := initialize(ctxLogger)
l.Destroy(options)

ctxLogger.Println("Cluster destroyed successfully")
ctxLogger.Println("You can safely remove the assets directory now")
Expand Down
211 changes: 21 additions & 190 deletions cli/cmd/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,27 +11,15 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package cmd

import (
"fmt"
"path/filepath"

"github.com/mitchellh/go-homedir"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader"
"sigs.k8s.io/yaml"

"github.com/kinvolk/lokomotive/pkg/backend"
"github.com/kinvolk/lokomotive/pkg/backend/local"
"github.com/kinvolk/lokomotive/pkg/components/util"
"github.com/kinvolk/lokomotive/pkg/config"
"github.com/kinvolk/lokomotive/pkg/platform"
"github.com/kinvolk/lokomotive/pkg/terraform"
"github.com/kinvolk/lokomotive/pkg/lokomotive"
"github.com/spf13/viper"
)

var clusterCmd = &cobra.Command{
Expand All @@ -43,198 +31,41 @@ func init() {
RootCmd.AddCommand(clusterCmd)
}

// initialize does common initialization actions between cluster operations
// and returns created objects to the caller for further use.
func initialize(ctxLogger *logrus.Entry) (*terraform.Executor, platform.Platform, *config.Config, string) {
lokoConfig, diags := getLokoConfig()
if len(diags) > 0 {
ctxLogger.Fatal(diags)
func initialize(ctxLogger *logrus.Entry) (lokomotive.Manager, *lokomotive.Options) {
// get lokocfg files and lokocfg vars path
lokocfgPath := viper.GetString("lokocfg")
variablesPath := viper.GetString("lokocfg-vars")
// HCLLoader loads the user configuration in lokocfg files into concrete
// LokomotiveConfig struct which is to be passed around for further operations
hclLoader := &config.HCLLoader{
ConfigPath: lokocfgPath,
VariablesPath: variablesPath,
}

p, diags := getConfiguredPlatform()
cfg, diags := hclLoader.Load()
if diags.HasErrors() {
for _, diagnostic := range diags {
ctxLogger.Error(diagnostic.Error())
}

ctxLogger.Fatal("Errors found while loading cluster configuration")
ctxLogger.Fatal("Errors found while loading configuration")
}

if p == nil {
ctxLogger.Fatal("No cluster configured")
options := &lokomotive.Options{
Verbose: verbose,
SkipComponents: skipComponents,
UpgradeKubelets: upgradeKubelets,
Confirm: confirm,
}

// Get the configured backend for the cluster. Backend types currently supported: local, s3.
b, diags := getConfiguredBackend(lokoConfig)
lokomotive, diags := lokomotive.NewLokomotive(ctxLogger, cfg, options)
if diags.HasErrors() {
for _, diagnostic := range diags {
ctxLogger.Error(diagnostic.Error())
}

ctxLogger.Fatal("Errors found while loading cluster configuration")
}

// Use a local backend if no backend is configured.
if b == nil {
b = local.NewLocalBackend()
}

assetDir, err := homedir.Expand(p.GetAssetDir())
if err != nil {
ctxLogger.Fatalf("Error expanding path: %v", err)
}

// Validate backend configuration.
if err = b.Validate(); err != nil {
ctxLogger.Fatalf("Failed to validate backend configuration: %v", err)
}

ex := initializeTerraform(ctxLogger, p, b)

return ex, p, lokoConfig, assetDir
}

// initializeTerraform initialized Terraform directory using given backend and platform
// and returns configured executor.
func initializeTerraform(ctxLogger *logrus.Entry, p platform.Platform, b backend.Backend) *terraform.Executor {
assetDir, err := homedir.Expand(p.GetAssetDir())
if err != nil {
ctxLogger.Fatalf("Error expanding path: %v", err)
}

// Render backend configuration.
renderedBackend, err := b.Render()
if err != nil {
ctxLogger.Fatalf("Failed to render backend configuration file: %v", err)
}

// Configure Terraform directory, module and backend.
if err := terraform.Configure(assetDir, renderedBackend); err != nil {
ctxLogger.Fatalf("Failed to configure Terraform : %v", err)
}

conf := terraform.Config{
WorkingDir: terraform.GetTerraformRootDir(assetDir),
Verbose: verbose,
}

ex, err := terraform.NewExecutor(conf)
if err != nil {
ctxLogger.Fatalf("Failed to create Terraform executor: %v", err)
}

if err := p.Initialize(ex); err != nil {
ctxLogger.Fatalf("Failed to initialize Platform: %v", err)
}

if err := ex.Init(); err != nil {
ctxLogger.Fatalf("Failed to initialize Terraform: %v", err)
}

return ex
}

// clusterExists determines if cluster has already been created by getting all
// outputs from the Terraform. If there is any output defined, it means 'terraform apply'
// run at least once.
func clusterExists(ctxLogger *logrus.Entry, ex *terraform.Executor) bool {
o := map[string]interface{}{}

if err := ex.Output("", &o); err != nil {
ctxLogger.Fatalf("Failed to check if cluster exists: %v", err)
}

return len(o) != 0
}

type controlplaneUpdater struct {
kubeconfigPath string
assetDir string
ctxLogger logrus.Entry
ex terraform.Executor
}

func (c controlplaneUpdater) getControlplaneChart(name string) (*chart.Chart, error) {
helmChart, err := loader.Load(filepath.Join(c.assetDir, "/lokomotive-kubernetes/bootkube/resources/charts", name))
if err != nil {
return nil, fmt.Errorf("loading chart from assets failed: %w", err)
}

if err := helmChart.Validate(); err != nil {
return nil, fmt.Errorf("chart is invalid: %w", err)
}

return helmChart, nil
}

func (c controlplaneUpdater) getControlplaneValues(name string) (map[string]interface{}, error) {
valuesRaw := ""
if err := c.ex.Output(fmt.Sprintf("%s_values", name), &valuesRaw); err != nil {
return nil, fmt.Errorf("failed to get controlplane component values.yaml from Terraform: %w", err)
}

values := map[string]interface{}{}
if err := yaml.Unmarshal([]byte(valuesRaw), &values); err != nil {
return nil, fmt.Errorf("failed to parse values.yaml for controlplane component: %w", err)
}

return values, nil
}

func (c controlplaneUpdater) upgradeComponent(component string) {
ctxLogger := c.ctxLogger.WithFields(logrus.Fields{
"action": "controlplane-upgrade",
"component": component,
})

actionConfig, err := util.HelmActionConfig("kube-system", c.kubeconfigPath)
if err != nil {
ctxLogger.Fatalf("Failed initializing helm: %v", err)
}

helmChart, err := c.getControlplaneChart(component)
if err != nil {
ctxLogger.Fatalf("Loading chart from assets failed: %v", err)
}

values, err := c.getControlplaneValues(component)
if err != nil {
ctxLogger.Fatalf("Failed to get kubernetes values.yaml from Terraform: %v", err)
}

exists, err := util.ReleaseExists(*actionConfig, component)
if err != nil {
ctxLogger.Fatalf("Failed checking if controlplane component is installed: %v", err)
}

if !exists {
fmt.Printf("Controlplane component '%s' is missing, reinstalling...", component)

install := action.NewInstall(actionConfig)
install.ReleaseName = component
install.Namespace = "kube-system"
install.Atomic = true

if _, err := install.Run(helmChart, map[string]interface{}{}); err != nil {
fmt.Println("Failed!")

ctxLogger.Fatalf("Installing controlplane component failed: %v", err)
}

fmt.Println("Done.")
}

update := action.NewUpgrade(actionConfig)

update.Atomic = true

fmt.Printf("Ensuring controlplane component '%s' is up to date... ", component)

if _, err := update.Run(component, helmChart, values); err != nil {
fmt.Println("Failed!")

ctxLogger.Fatalf("Updating chart failed: %v", err)
ctxLogger.Fatal("Errors found while initializing Lokomotive")
}

fmt.Println("Done.")
return lokomotive, options
}
Loading