Skip to content

Commit

Permalink
Merge pull request kubernetes-sigs#36 from lreciomelero/feature/EOS-1…
Browse files Browse the repository at this point in the history
…0927_Create-kind-without-apply-manifest

[EOS-10927] Feature/eos 10927 create kind without apply manifest
  • Loading branch information
lreciomelero committed Feb 13, 2023
2 parents 22199c0 + 63e83c1 commit 91f831c
Show file tree
Hide file tree
Showing 6 changed files with 141 additions and 118 deletions.
4 changes: 4 additions & 0 deletions pkg/cluster/createoption.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,10 @@ func CreateWithMove(move bool) CreateOption {
return CreateWithRetain(move)
}

func CreateWithAvoidCreation(avoidCreation bool) CreateOption {
return CreateWithRetain(avoidCreation)
}

// CreateWithWaitForReady configures a maximum wait time for the control plane
// node(s) to be ready. By default no waiting is performed
func CreateWithWaitForReady(waitTime time.Duration) CreateOption {
Expand Down
199 changes: 102 additions & 97 deletions pkg/cluster/internal/create/actions/createworker/createworker.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ type action struct {
vaultPassword string
descriptorName string
moveManagement bool
avoidCreation bool
}

// // SecretsFile represents the YAML structure in the secrets.yml file
Expand All @@ -56,11 +57,12 @@ spec:
const kubeconfigPath = "/kind/worker-cluster.kubeconfig"

// NewAction returns a new action for installing default CAPI
func NewAction(vaultPassword string, descriptorName string, moveManagement bool) actions.Action {
func NewAction(vaultPassword string, descriptorName string, moveManagement bool, avoidCreation bool) actions.Action {
return &action{
vaultPassword: vaultPassword,
descriptorName: descriptorName,
moveManagement: moveManagement,
avoidCreation: avoidCreation,
}
}

Expand Down Expand Up @@ -154,26 +156,15 @@ func (a *action) Execute(ctx *actions.ActionContext) error {
return err
}

//rewriteDescriptorFile(descriptorFile)
defer ctx.Status.End(true)

ctx.Status.Start("Creating the worker cluster 💥")
defer ctx.Status.End(false)

// Create namespace for CAPI clusters (it must exists)
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "create", "ns", capiClustersNamespace)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to create cluster's Namespace")
}

// Apply cluster manifests
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "create", "-n", capiClustersNamespace, "-f", descriptorPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to apply manifests")
}

var machineHealthCheck = `
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineHealthCheck
Expand All @@ -194,39 +185,13 @@ spec:
timeout: 60s`

// Create the MachineHealthCheck manifest file in the container
machineHealthCheckPath := "/kind/machinehealthcheck.yaml"
machineHealthCheckPath := "/kind/manifests/machinehealthcheck.yaml"
raw = bytes.Buffer{}
cmd = node.Command("sh", "-c", "echo \""+machineHealthCheck+"\" > "+machineHealthCheckPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to write the MachineHealthCheck manifest")
}

// Enable the cluster's self-healing
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "-n", capiClustersNamespace, "apply", "-f", machineHealthCheckPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to apply the MachineHealthCheck manifest")
}

// Wait for the worker cluster creation
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "-n", capiClustersNamespace, "wait", "--for=condition=ready", "--timeout", "25m", "cluster", descriptorFile.ClusterID)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to create the worker Cluster")
}

// Wait for machines creation
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "-n", capiClustersNamespace, "wait", "--for=condition=ready", "--timeout", "20m", "--all", "md")
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to create the Machines")
}

ctx.Status.End(true) // End Creating the worker cluster

ctx.Status.Start("Installing CAPx in EKS 🎖️")
defer ctx.Status.End(false)

// Create the allow-all-egress network policy file in the container
allowAllEgressNetPolPath := "/kind/allow-all-egress_netpol.yaml"
raw = bytes.Buffer{}
Expand All @@ -235,67 +200,42 @@ spec:
return errors.Wrap(err, "failed to write the allow-all-egress network policy")
}

// Get worker cluster's kubeconfig file (in EKS the token last 10m, which should be enough)
raw = bytes.Buffer{}
cmd = node.Command("sh", "-c", "clusterctl -n "+capiClustersNamespace+" get kubeconfig "+descriptorFile.ClusterID+" > "+kubeconfigPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to get the kubeconfig file")
}

// AWS/EKS specific
err = installCAPAWorker(aws, github_token, node, kubeconfigPath, allowAllEgressNetPolPath)
if err != nil {
return err
}

//Scale CAPI to 2 replicas
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "--kubeconfig", kubeconfigPath, "-n", "capi-system", "scale", "--replicas", "2", "deploy", "capi-controller-manager")
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to scale the CAPI Deployment")
}
if !a.avoidCreation {

// Allow egress in CAPI's Namespaces
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "--kubeconfig", kubeconfigPath, "-n", "capi-system", "apply", "-f", allowAllEgressNetPolPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to apply CAPI's NetworkPolicy")
}
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "--kubeconfig", kubeconfigPath, "-n", "capi-kubeadm-bootstrap-system", "apply", "-f", allowAllEgressNetPolPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to apply CAPI's NetworkPolicy")
}
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "--kubeconfig", kubeconfigPath, "-n", "capi-kubeadm-control-plane-system", "apply", "-f", allowAllEgressNetPolPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to apply CAPI's NetworkPolicy")
}
ctx.Status.Start("Creating the worker cluster 💥")
defer ctx.Status.End(false)

// Allow egress in cert-manager Namespace
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "--kubeconfig", kubeconfigPath, "-n", "cert-manager", "apply", "-f", allowAllEgressNetPolPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to apply cert-manager's NetworkPolicy")
}
// Apply cluster manifests
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "create", "-n", capiClustersNamespace, "-f", descriptorPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to apply manifests")
}

ctx.Status.End(true) // End Installing CAPx in worker cluster
// Enable the cluster's self-healing
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "-n", capiClustersNamespace, "apply", "-f", machineHealthCheckPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to apply the MachineHealthCheck manifest")
}

if descriptorFile.DeployAutoscaler {
ctx.Status.Start("Adding Cluster-Autoescaler 🗚")
defer ctx.Status.End(false)
// Wait for the worker cluster creation
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "-n", capiClustersNamespace, "wait", "--for=condition=ready", "--timeout", "25m", "cluster", descriptorFile.ClusterID)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to create the worker Cluster")
}

// Wait for machines creation
raw = bytes.Buffer{}
cmd = integrateClusterAutoscaler(node, kubeconfigPath, descriptorFile.ClusterID, "clusterapi")
cmd = node.Command("kubectl", "-n", capiClustersNamespace, "wait", "--for=condition=ready", "--timeout", "20m", "--all", "md")
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to install chart cluster-autoscaler")
return errors.Wrap(err, "failed to create the Machines")
}

ctx.Status.End(true)
}
ctx.Status.End(true) // End Creating the worker cluster

if !a.moveManagement {
ctx.Status.Start("Moving the management role 🗝️")
ctx.Status.Start("Installing CAPx in EKS 🎖️")
defer ctx.Status.End(false)

// Get worker cluster's kubeconfig file (in EKS the token last 10m, which should be enough)
Expand All @@ -305,22 +245,87 @@ spec:
return errors.Wrap(err, "failed to get the kubeconfig file")
}

// Create namespace for CAPI clusters (it must exists) in worker cluster
// AWS/EKS specific
err = installCAPAWorker(aws, github_token, node, kubeconfigPath, allowAllEgressNetPolPath)
if err != nil {
return err
}

//Scale CAPI to 2 replicas
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "--kubeconfig", kubeconfigPath, "create", "ns", capiClustersNamespace)
cmd = node.Command("kubectl", "--kubeconfig", kubeconfigPath, "-n", "capi-system", "scale", "--replicas", "2", "deploy", "capi-controller-manager")
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to create manifests Namespace")
return errors.Wrap(err, "failed to scale the CAPI Deployment")
}

// EKS specific: Pivot management role to worker cluster
// Allow egress in CAPI's Namespaces
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "--kubeconfig", kubeconfigPath, "-n", "capi-system", "apply", "-f", allowAllEgressNetPolPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to apply CAPI's NetworkPolicy")
}
raw = bytes.Buffer{}
cmd = node.Command("sh", "-c", "clusterctl move -n "+capiClustersNamespace+" --to-kubeconfig "+kubeconfigPath)
cmd = node.Command("kubectl", "--kubeconfig", kubeconfigPath, "-n", "capi-kubeadm-bootstrap-system", "apply", "-f", allowAllEgressNetPolPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to apply CAPI's NetworkPolicy")
}
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "--kubeconfig", kubeconfigPath, "-n", "capi-kubeadm-control-plane-system", "apply", "-f", allowAllEgressNetPolPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to apply CAPI's NetworkPolicy")
}

// Allow egress in cert-manager Namespace
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "--kubeconfig", kubeconfigPath, "-n", "cert-manager", "apply", "-f", allowAllEgressNetPolPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to pivot management role to worker cluster")
return errors.Wrap(err, "failed to apply cert-manager's NetworkPolicy")
}

ctx.Status.End(true) // End Installing CAPx in worker cluster

if descriptorFile.DeployAutoscaler {
ctx.Status.Start("Adding Cluster-Autoescaler 🗚")
defer ctx.Status.End(false)

raw = bytes.Buffer{}
cmd = integrateClusterAutoscaler(node, kubeconfigPath, descriptorFile.ClusterID, "clusterapi")
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to install chart cluster-autoscaler")
}

ctx.Status.End(true)
}

if !a.moveManagement {
ctx.Status.Start("Moving the management role 🗝️")
defer ctx.Status.End(false)

// Get worker cluster's kubeconfig file (in EKS the token last 10m, which should be enough)
raw = bytes.Buffer{}
cmd = node.Command("sh", "-c", "clusterctl -n "+capiClustersNamespace+" get kubeconfig "+descriptorFile.ClusterID+" > "+kubeconfigPath)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to get the kubeconfig file")
}

// Create namespace for CAPI clusters (it must exists) in worker cluster
raw = bytes.Buffer{}
cmd = node.Command("kubectl", "--kubeconfig", kubeconfigPath, "create", "ns", capiClustersNamespace)
if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to create manifests Namespace")
}

// EKS specific: Pivot management role to worker cluster
raw = bytes.Buffer{}
cmd = node.Command("sh", "-c", "clusterctl move -n "+capiClustersNamespace+" --to-kubeconfig "+kubeconfigPath)

if err := cmd.SetStdout(&raw).Run(); err != nil {
return errors.Wrap(err, "failed to pivot management role to worker cluster")
}

ctx.Status.End(true)
}

ctx.Status.End(true) // End Transfering the management role
}

return nil
Expand Down
41 changes: 22 additions & 19 deletions pkg/cluster/internal/create/actions/createworker/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,29 +164,32 @@ func rewriteDescriptorFile(descriptorName string) error {
return err
}

deleteKey("aws", descriptorMap)
deleteKey("github_token", descriptorMap)
if descriptorMap["aws"] != nil || descriptorMap["github_token"] != nil {
deleteKey("aws", descriptorMap)
deleteKey("github_token", descriptorMap)

d, err := yaml.Marshal(&descriptorMap)
if err != nil {
fmt.Println("error: %v", err)
return err
}
d, err := yaml.Marshal(&descriptorMap)
if err != nil {
fmt.Println("error: %v", err)
return err
}

// write to file
f, err := os.Create(currentDir + descriptorName)
if err != nil {
fmt.Println(err)
return nil
}
// write to file
f, err := os.Create(currentDir + descriptorName)
if err != nil {
fmt.Println(err)
return nil
}

err = ioutil.WriteFile(descriptorName, d, 0755)
if err != nil {
fmt.Println("error: %v", err)
return err
}
err = ioutil.WriteFile(descriptorName, d, 0755)
if err != nil {
fmt.Println("error: %v", err)
return err
}

f.Close()
f.Close()

}

return nil

Expand Down
3 changes: 2 additions & 1 deletion pkg/cluster/internal/create/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ type ClusterOptions struct {
VaultPassword string
DescriptorName string
MoveManagement bool
AvoidCreation bool
// NodeImage overrides the nodes' images in Config if non-zero
NodeImage string
Retain bool
Expand Down Expand Up @@ -137,7 +138,7 @@ func Cluster(logger log.Logger, p providers.Provider, opts *ClusterOptions) erro

// add Stratio step
actionsToRun = append(actionsToRun,
createworker.NewAction(opts.VaultPassword, opts.DescriptorName, opts.MoveManagement), // create worker k8s cluster
createworker.NewAction(opts.VaultPassword, opts.DescriptorName, opts.MoveManagement, opts.AvoidCreation), // create worker k8s cluster
)
}

Expand Down
3 changes: 2 additions & 1 deletion pkg/cluster/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,13 +169,14 @@ func ProviderWithPodman() ProviderOption {
}

// Create provisions and starts a kubernetes-in-docker cluster
func (p *Provider) Create(name string, vaultPassword string, descriptorName string, moveManagement bool, options ...CreateOption) error {
func (p *Provider) Create(name string, vaultPassword string, descriptorName string, moveManagement bool, avoidCreation bool, options ...CreateOption) error {
// apply options
opts := &internalcreate.ClusterOptions{
NameOverride: name,
VaultPassword: vaultPassword,
DescriptorName: descriptorName,
MoveManagement: moveManagement,
AvoidCreation: avoidCreation,
}
for _, o := range options {
if err := o.apply(opts); err != nil {
Expand Down
Loading

0 comments on commit 91f831c

Please sign in to comment.