Skip to content

Commit

Permalink
Readded the ability to delete a cluster before it's finished creating
Browse files Browse the repository at this point in the history
A previous PR (#1010) made it a prerequisite that an EKS cluster must
be in an ACTIVE state before `eksctl delete cluster` would be able to
delete the cluster.  This was done to find all the ELBs that were
created due to services in the cluster.  However, if a cluster isn't
created, there can be no ELBs created for the cluster, so this
PR removes that restriction and adds some integration tests for
this use case.
  • Loading branch information
D3nn committed Aug 20, 2019
1 parent 89b5258 commit 6c40445
Show file tree
Hide file tree
Showing 4 changed files with 109 additions and 12 deletions.
13 changes: 13 additions & 0 deletions integration/common_test.go
Expand Up @@ -74,3 +74,16 @@ func eksctlFail(args ...string) *gexec.Session {
Expect(session.ExitCode()).To(Not(Equal(0)))
return session
}

//eksctlStart starts running an eksctl command, waits 45 seconds, but doesn't wait for it to finish the command
//This is primarily so that we can run eksctl create ... and then subsequently call eksctl delete on the same cluster.
func eksctlStart(args ...string) error {
cmd := exec.Command(eksctlPath, args...)
fmt.Fprintf(GinkgoWriter, "calling %q with %v\n", eksctlPath, args)
err := cmd.Start()
if err != nil {
return err
}
time.Sleep(45 * time.Second)
return nil
}
83 changes: 83 additions & 0 deletions integration/createdeletebeforeactive_test.go
@@ -0,0 +1,83 @@
// +build integration

package integration_test

import (
"fmt"

awseks "github.com/aws/aws-sdk-go/service/eks"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"

"github.com/weaveworks/eksctl/pkg/ctl/cmdutils"
"github.com/weaveworks/eksctl/pkg/testutils/aws"
. "github.com/weaveworks/eksctl/pkg/testutils/matchers"
)

var _ = Describe("(Integration) Create & Delete before Active", func() {
const (
initNG = "ng-0"
testNG = "ng-1"
)

Describe("when creating a cluster with 1 node", func() {
var clName string
Context("for deleting a creating cluster", func() {
It("should run eksctl and not wait for it to finish", func() {

fmt.Fprintf(GinkgoWriter, "Using kubeconfig: %s\n", kubeconfigPath)

if clName == "" {
clName = cmdutils.ClusterName("", "") + "-delb4active"
}

eksctlStart("create", "cluster",
"--verbose", "4",
"--name", clName,
"--tags", "alpha.eksctl.io/description=eksctl delete before active test",
"--nodegroup-name", initNG,
"--node-labels", "ng-name="+initNG,
"--node-type", "t2.medium",
"--nodes", "1",
"--region", region,
"--version", version,
)
})
})

Context("when deleting the (creating) cluster", func() {

It("should not return an error", func() {

eksctlSuccess("delete", "cluster",
"--verbose", "4",
"--name", clName,
"--region", region,
"--wait",
)
})

It("and should have deleted the EKS cluster and both CloudFormation stacks", func() {

awsSession := aws.NewSession(region)

Expect(awsSession).ToNot(HaveExistingCluster(clName, awseks.ClusterStatusActive, version))

Expect(awsSession).ToNot(HaveExistingStack(fmt.Sprintf("eksctl-%s-cluster", clName)))
Expect(awsSession).ToNot(HaveExistingStack(fmt.Sprintf("eksctl-%s-nodegroup-ng-%d", clName, 0)))
})
})

Context("when trying to delete the cluster again", func() {

It("should return an a non-zero exit code", func() {

eksctlFail("delete", "cluster",
"--verbose", "4",
"--name", clName,
"--region", region,
)
})
})
})
})
23 changes: 11 additions & 12 deletions pkg/ctl/delete/cluster.go
Expand Up @@ -108,19 +108,18 @@ func doDeleteCluster(cmd *cmdutils.Cmd) error {
{

logger.Info("cleaning up LoadBalancer services")
if err := ctl.RefreshClusterConfig(cfg); err != nil {
return err
}
cs, err := ctl.NewStdClientSet(cfg)
if err != nil {
return err
}
ctx, cleanup := context.WithTimeout(context.Background(), 10*time.Minute)
defer cleanup()
if err := elb.Cleanup(ctx, ctl.Provider.EC2(), ctl.Provider.ELB(), ctl.Provider.ELBV2(), cs, cfg); err != nil {
return err
// only need to cleanup ELBs if the cluster has already been created.
if err := ctl.RefreshClusterConfig(cfg); err == nil {
cs, err := ctl.NewStdClientSet(cfg)
if err != nil {
return err
}
ctx, cleanup := context.WithTimeout(context.Background(), 10*time.Minute)
defer cleanup()
if err := elb.Cleanup(ctx, ctl.Provider.EC2(), ctl.Provider.ELB(), ctl.Provider.ELBV2(), cs, cfg); err != nil {
return err
}
}

tasks, err := stackManager.NewTasksToDeleteClusterWithNodeGroups(cmd.Wait, func(errs chan error, _ string) error {
logger.Info("trying to cleanup dangling network interfaces")
if err := ctl.LoadClusterVPC(cfg); err != nil {
Expand Down
2 changes: 2 additions & 0 deletions pkg/eks/eks.go
Expand Up @@ -33,6 +33,7 @@ func (c *ClusterProvider) DescribeControlPlane(cl *api.ClusterMeta) (*awseks.Clu
}

// DescribeControlPlaneMustBeActive describes the cluster control plane and checks if status is active
// If status isn't active and error will be returned unless strict is set to false.
func (c *ClusterProvider) DescribeControlPlaneMustBeActive(cl *api.ClusterMeta) (*awseks.Cluster, error) {
cluster, err := c.DescribeControlPlane(cl)
if err != nil {
Expand All @@ -56,6 +57,7 @@ func (c *ClusterProvider) RefreshClusterConfig(spec *api.ClusterConfig) error {
if err != nil {
return err
}

logger.Debug("cluster = %#v", cluster)

data, err := base64.StdEncoding.DecodeString(*cluster.CertificateAuthority.Data)
Expand Down

0 comments on commit 6c40445

Please sign in to comment.