Skip to content

Commit

Permalink
Release/0 5 1 (#73)
Browse files Browse the repository at this point in the history
* Modified docs

Signed-off-by: niki-1905 <nikkikokitkar@gmail.com>

* Remove jmeter submodule, literatebee key, and empty vendor folder.

Signed-off-by: Arush Salil <me@aru.sh>

* Update Dockerfile

Signed-off-by: Arush Salil <me@aru.sh>

* Update Readme

Signed-off-by: Arush Salil <me@aru.sh>

* Update AWS documentation

Signed-off-by: Arush Salil <me@aru.sh>

* Add lifecycle documentation.

Signed-off-by: Arush Salil <me@aru.sh>

* Fix typo

Signed-off-by: Arush Salil <me@aru.sh>

* Refactor and add tests

Signed-off-by: Manuel Müller <mueller.m.h@gmail.com>

* Update documentation

* Update .gitignore

* add pkg provisioner

* Update Makefile

* TK8-47  add infrastructure only flag

https://kubernauts.atlassian.net/browse/TK8-47
  • Loading branch information
MuellerMH committed Oct 21, 2018
1 parent b67a266 commit 73f114b
Show file tree
Hide file tree
Showing 39 changed files with 409 additions and 1,350 deletions.
5 changes: 4 additions & 1 deletion .gitignore
Expand Up @@ -32,4 +32,7 @@ golint

# Ignore personal configs
config.yml
config.yaml
config.yaml
.scannerwork
report.xml
coverage.xml
16 changes: 14 additions & 2 deletions Makefile
Expand Up @@ -7,6 +7,7 @@ default: bin

.PHONY: bin
bin:
go get -u ./...
go build ${BUILD_FLAGS} -o tk8 main.go

.PHONY: install
Expand All @@ -28,10 +29,21 @@ lint:
vet:
go vet $(PKGS)

.PHONY: test
test:
gocov test ./... | gocov-xml > coverage.xml
gometalinter.v1 --checkstyle > report.xml
sonar-scanner \
-Dsonar.projectKey=mmmac \
-Dsonar.host.url=http://localhost:9000 \
-Dsonar.login=616782f26ee441b650bd709eff9f8acee0a0fd75 \
-X

.PHONY: release
release:
go get -u ./...
./scripts/check-gofmt.sh
go build -o golint github.com/golang/lint/golint
./golint $(PKGS)
golint $(PKGS)
go vet $(PKGS)
go build ${BUILD_FLAGS} -o tk8 main.go
GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build ${BUILD_FLAGS} -o tk8-darwin-amd64 main.go
Expand Down
87 changes: 0 additions & 87 deletions README_old.md

This file was deleted.

28 changes: 15 additions & 13 deletions cmd/addon.go → cmd/cli/addon.go
Expand Up @@ -24,7 +24,7 @@ import (
"github.com/spf13/cobra"
)

var monitor, rancher bool
var Addon addon.Addon

// addonCmd represents the addon command
var addonCmd = &cobra.Command{
Expand All @@ -51,7 +51,7 @@ var addonInstallCmd = &cobra.Command{
cmd.Help()
os.Exit(1)
}
addon.InstallAddon(args[0])
Addon.Install(args[0])
},
}

Expand All @@ -66,23 +66,23 @@ var addonDestroyCmd = &cobra.Command{
cmd.Help()
os.Exit(1)
}
addon.DestroyAddon(args[0])
Addon.Destroy(args[0])
},
}

// addonCmd represents the addon command
var addonCreateCmd = &cobra.Command{
Use: "create [addon name]",
Short: "create a new kubernetes addon packages on your local machine for development",
Long: `Create your own addons for your kubernetes cluster.
Long: `Create your own addons for your kubernetes cluster.
This command will prepare a example package in a folder with the addon name`,
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
cmd.Help()
os.Exit(1)
}
addon.PrepareExample(args[0])
Addon.Create(args[0])
},
}

Expand All @@ -97,15 +97,16 @@ var addonGetCmd = &cobra.Command{
cmd.Help()
os.Exit(1)
}
addon.GetAddon(args[0])
Addon.Get(args[0])
},
}

/*
* This function gets the path to the kubeconfig, cluster details and auth
* for using with the kubectl.
* Then use this to install the addon on this cluster
*/
func getKubeConfig() string {
/* This function gets the path to the kubeconfig, cluster details and auth
for using with the kubectl.
Then use this to install the addon on this cluster
*/
fmt.Println("Please enter the path to your kubeconfig:")
var kubeConfig string
fmt.Scanln(&kubeConfig)
Expand All @@ -117,10 +118,11 @@ func getKubeConfig() string {
return kubeConfig
}

/*
* This function is used to check the whether kubectl command is installed &
* it works with the kubeConfig provided
*/
func checkKubectl(kubeConfig string) {
/*This function is used to check the whether kubectl command is installed &
it works with the kubeConfig provided
*/
kerr, err := exec.LookPath("kubectl")
if err != nil {
log.Fatal("kubectl command not found, kindly check")
Expand Down
File renamed without changes.
10 changes: 5 additions & 5 deletions cmd/completion.go → cmd/cli/completion.go
Expand Up @@ -18,7 +18,7 @@ import (
"fmt"
"os"

"github.com/kubernauts/tk8/internal/cluster"
"github.com/kubernauts/tk8/pkg/common"
"github.com/spf13/cobra"
)

Expand Down Expand Up @@ -47,11 +47,11 @@ var bashCompletion = &cobra.Command{
Long: `It will produce the bash completion script which can later be used for the autocompletion of commands in Bash.`,
Run: func(cmd *cobra.Command, args []string) {
script, err := os.OpenFile("tk8.sh", os.O_CREATE|os.O_WRONLY, 0600)
cluster.ErrorCheck("Error creating autocompletion script file.", err)
common.ErrorCheck("Error creating autocompletion script file.", err)
defer script.Close()

err = rootCmd.GenBashCompletion(script)
cluster.ErrorCheck("Error writing to Bash script file", err)
common.ErrorCheck("Error writing to Bash script file", err)
fmt.Printf("Successfully created the Bash completion script. Move the 'tk8.sh' file under /etc/bash_completion.d/ folder and login again.")
},
}
Expand All @@ -63,12 +63,12 @@ var zshCompletion = &cobra.Command{
Long: `It will produce the bash completion script which can later be used for the autocompletion of commands in Zsh.`,
Run: func(cmd *cobra.Command, args []string) {
script, err := os.OpenFile("tk8.plugin.zsh", os.O_CREATE|os.O_WRONLY, 0600)
cluster.ErrorCheck("Error creating autocompletion script file.", err)
common.ErrorCheck("Error creating autocompletion script file.", err)
defer script.Close()

fmt.Fprintf(script, "__tk8_tool_complete() {\n")
err = rootCmd.GenZshCompletion(script)
cluster.ErrorCheck("Error writing to Zsh plugin file", err)
common.ErrorCheck("Error writing to Zsh plugin file", err)
fmt.Fprintf(script, "}\ncompdef __tk8_tool_complete tk8")
fmt.Printf("Successfully created the Zsh plugin. Move the 'tk8.plugin.zsh' file under your plugins folder and login again.")
},
Expand Down
24 changes: 13 additions & 11 deletions cmd/provisioner.go → cmd/cli/provisioner.go
Expand Up @@ -19,21 +19,20 @@ import (
"os"
"strings"

"github.com/kubernauts/tk8/internal"

aws "github.com/kubernauts/tk8-provisioner-aws"
azure "github.com/kubernauts/tk8-provisioner-azure"
baremetal "github.com/kubernauts/tk8-provisioner-baremetal"
eks "github.com/kubernauts/tk8-provisioner-eks"
nutanix "github.com/kubernauts/tk8-provisioner-nutanix"
openstack "github.com/kubernauts/tk8-provisioner-openstack"
"github.com/kubernauts/tk8/internal/cluster"
"github.com/kubernauts/tk8/pkg/common"
"github.com/kubernauts/tk8/pkg/provisioner"

"github.com/spf13/cobra"
)

var name string
var provisioners = map[string]cluster.Provisioner{
var provisioners = map[string]provisioner.Provisioner{
"aws": aws.NewAWS(),
"azure": azure.NewAzure(),
"baremetal": baremetal.NewBaremetal(),
Expand All @@ -52,7 +51,9 @@ var provisionerInstallCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) {
if val, ok := provisioners[args[0]]; ok {
val.Init(args[1:])
val.Setup(args[1:])
if !provisioner.IOnly {
val.Setup(args[1:])
}
}
},
}
Expand Down Expand Up @@ -160,10 +161,11 @@ func init() {
clusterCmd.AddCommand(provisionerUpgradeCmd)
clusterCmd.AddCommand(provisionerDestroyCmd)

provisionerInstallCmd.Flags().StringVar(&cluster.Name, "name", cluster.Name, "name of the cluster workspace")
provisionerScaleCmd.Flags().StringVar(&cluster.Name, "name", cluster.Name, "name of the cluster workspace")
provisionerResetCmd.Flags().StringVar(&cluster.Name, "name", cluster.Name, "name of the cluster workspace")
provisionerRemoveCmd.Flags().StringVar(&cluster.Name, "name", cluster.Name, "name of the cluster workspace")
provisionerUpgradeCmd.Flags().StringVar(&cluster.Name, "name", cluster.Name, "name of the cluster workspace")
provisionerDestroyCmd.Flags().StringVar(&cluster.Name, "name", cluster.Name, "name of the cluster workspace")
provisionerInstallCmd.Flags().StringVar(&common.Name, "name", common.Name, "name of the cluster workspace")
provisionerInstallCmd.Flags().BoolVarP(&provisioner.IOnly, "ionly", "i", provisioner.IOnly, "setup only the infrastructure")
provisionerScaleCmd.Flags().StringVar(&common.Name, "name", common.Name, "name of the cluster workspace")
provisionerResetCmd.Flags().StringVar(&common.Name, "name", common.Name, "name of the cluster workspace")
provisionerRemoveCmd.Flags().StringVar(&common.Name, "name", common.Name, "name of the cluster workspace")
provisionerUpgradeCmd.Flags().StringVar(&common.Name, "name", common.Name, "name of the cluster workspace")
provisionerDestroyCmd.Flags().StringVar(&common.Name, "name", common.Name, "name of the cluster workspace")
}
File renamed without changes.
File renamed without changes.
16 changes: 15 additions & 1 deletion docs/en/provisioner/aws/lifecycle.md
Expand Up @@ -33,4 +33,18 @@ To reset the provisioned cluster run:
tk8 cluster reset aws
```

Once executed a confirmation would be needed to remove Kubernetes from your infrastructure..
Once executed the current kubernetes installation get removed and a new setup will run.

## Remove the cluster

Make sure you are in the same directory where you executed `tk8 cluster install aws` with the inventory directory.
If you use a different workspace name with the --name flag please provide it on resetting too.

To reset the provisioned cluster run:

```shell
tk8 cluster remove aws
```

Once executed the current kubernetes installation get removed from the infrastructure.

33 changes: 0 additions & 33 deletions internal/addon/common.go

This file was deleted.

24 changes: 0 additions & 24 deletions internal/addon/create.go

This file was deleted.

0 comments on commit 73f114b

Please sign in to comment.