Skip to content

Commit

Permalink
use autoscaling group/EKS nodegroup summary as source of truth for no…
Browse files Browse the repository at this point in the history
…degroups scaling config
  • Loading branch information
Jake committed Apr 9, 2021
1 parent cf6d0ed commit 58916cb
Show file tree
Hide file tree
Showing 8 changed files with 308 additions and 645 deletions.
6 changes: 6 additions & 0 deletions pkg/actions/nodegroup/export_test.go
@@ -1,5 +1,11 @@
package nodegroup

import "github.com/weaveworks/eksctl/pkg/cfn/manager"

func (m *Manager) SetWaiter(wait WaitFunc) {
m.wait = wait
}

func (m *Manager) SetStackManager(stackManager manager.StackManager) {
m.stackManager = stackManager
}
24 changes: 5 additions & 19 deletions pkg/actions/nodegroup/get.go
Expand Up @@ -10,36 +10,22 @@ import (
)

func (m *Manager) GetAll() ([]*manager.NodeGroupSummary, error) {
summaries, err := m.stackManager.GetNodeGroupSummaries("")
summaries, err := m.stackManager.GetUnmanagedNodeGroupSummaries("")
if err != nil {
return nil, errors.Wrap(err, "getting nodegroup stack summaries")
}

nodeGroups, err := m.ctl.Provider.EKS().ListNodegroups(&eks.ListNodegroupsInput{
managedNodeGroups, err := m.ctl.Provider.EKS().ListNodegroups(&eks.ListNodegroupsInput{
ClusterName: &m.cfg.Metadata.Name,
})
if err != nil {
return nil, err
}

var nodeGroupsWithoutStacks []string
for _, ng := range nodeGroups.Nodegroups {
found := false
for _, summary := range summaries {
if summary.Name == *ng {
found = true
}
}

if !found {
nodeGroupsWithoutStacks = append(nodeGroupsWithoutStacks, *ng)
}
}

for _, nodeGroupWithoutStack := range nodeGroupsWithoutStacks {
for _, managedNodeGroup := range managedNodeGroups.Nodegroups {
describeOutput, err := m.ctl.Provider.EKS().DescribeNodegroup(&eks.DescribeNodegroupInput{
ClusterName: &m.cfg.Metadata.Name,
NodegroupName: &nodeGroupWithoutStack,
NodegroupName: managedNodeGroup,
})
if err != nil {
return nil, err
Expand Down Expand Up @@ -72,7 +58,7 @@ func (m *Manager) GetAll() ([]*manager.NodeGroupSummary, error) {
}

func (m *Manager) Get(name string) (*manager.NodeGroupSummary, error) {
summaries, err := m.stackManager.GetNodeGroupSummaries(name)
summaries, err := m.stackManager.GetUnmanagedNodeGroupSummaries(name)
if err != nil {
return nil, errors.Wrap(err, "getting nodegroup stack summaries")
}
Expand Down
62 changes: 56 additions & 6 deletions pkg/actions/nodegroup/scale.go
Expand Up @@ -4,7 +4,9 @@ import (
"fmt"

"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/kris-nova/logger"
"github.com/weaveworks/eksctl/pkg/cfn/manager"
"github.com/weaveworks/eksctl/pkg/utils/waiters"

"github.com/aws/aws-sdk-go/aws"
Expand All @@ -16,17 +18,27 @@ import (

func (m *Manager) Scale(ng *api.NodeGroup) error {
logger.Info("scaling nodegroup %q in cluster %s", ng.Name, m.cfg.Metadata.Name)
stackManager := m.ctl.NewStackManager(m.cfg)

hasStacks, err := m.hasStacks(ng.Name)
nodegroupStackInfos, err := m.stackManager.DescribeNodeGroupStacksAndResources()
if err != nil {
return err
}

if hasStacks {
err = stackManager.ScaleNodeGroup(ng)
var stackInfo manager.StackInfo
var ok, isUnmanagedNodegroup bool
stackInfo, ok = nodegroupStackInfos[ng.Name]
if ok {
nodegroupType, err := manager.GetNodeGroupType(stackInfo.Stack.Tags)
if err != nil {
return err
}
isUnmanagedNodegroup = nodegroupType == api.NodeGroupTypeUnmanaged
}

if isUnmanagedNodegroup {
err = m.scaleUnmanagedNodeGroup(ng, stackInfo)
} else {
err = m.scale(ng)
err = m.scaleManagedNodeGroup(ng)
}

if err != nil {
Expand All @@ -36,7 +48,45 @@ func (m *Manager) Scale(ng *api.NodeGroup) error {
return nil
}

func (m *Manager) scale(ng *api.NodeGroup) error {
func (m *Manager) scaleUnmanagedNodeGroup(ng *api.NodeGroup, stackInfo manager.StackInfo) error {
asgName := ""
for _, resource := range stackInfo.Resources {
if *resource.LogicalResourceId == "NodeGroup" {
asgName = *resource.PhysicalResourceId
break
}
}

if asgName == "" {
return fmt.Errorf("failed to find NodeGroup auto scaling group")
}

input := &autoscaling.UpdateAutoScalingGroupInput{
AutoScalingGroupName: &asgName,
}

if ng.MaxSize != nil {
input.MaxSize = aws.Int64(int64(*ng.MaxSize))
}

if ng.MinSize != nil {
input.MinSize = aws.Int64(int64(*ng.MinSize))
}

if ng.DesiredCapacity != nil {
input.DesiredCapacity = aws.Int64(int64(*ng.DesiredCapacity))
}
out, err := m.ctl.Provider.ASG().UpdateAutoScalingGroup(input)
if err != nil {
logger.Debug("ASG update output: %s", out.String())
return err
}
logger.Info("nodegroup successfully scaled")

return nil
}

func (m *Manager) scaleManagedNodeGroup(ng *api.NodeGroup) error {
scalingConfig := &eks.NodegroupScalingConfig{}

if ng.MaxSize != nil {
Expand Down
146 changes: 120 additions & 26 deletions pkg/actions/nodegroup/scale_test.go
Expand Up @@ -8,42 +8,68 @@ import (

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/cloudformation"
awseks "github.com/aws/aws-sdk-go/service/eks"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/weaveworks/eksctl/pkg/actions/nodegroup"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
"github.com/weaveworks/eksctl/pkg/cfn/manager"
"github.com/weaveworks/eksctl/pkg/cfn/manager/fakes"
"github.com/weaveworks/eksctl/pkg/eks"
"github.com/weaveworks/eksctl/pkg/testutils/mockprovider"
)

var _ = Describe("Scale", func() {
When("the nodegroup was not created by eksctl", func() {
var (
clusterName, ngName string
p *mockprovider.MockProvider
cfg *api.ClusterConfig
ng *api.NodeGroup
manager *nodegroup.Manager
)
var (
clusterName, ngName string
p *mockprovider.MockProvider
cfg *api.ClusterConfig
ng *api.NodeGroup
m *nodegroup.Manager
fakeStackManager *fakes.FakeStackManager
)
BeforeEach(func() {
clusterName = "my-cluster"
ngName = "my-ng"
p = mockprovider.NewMockProvider()
cfg = api.NewClusterConfig()
cfg.Metadata.Name = clusterName

ng = &api.NodeGroup{
NodeGroupBase: &api.NodeGroupBase{
Name: ngName,
ScalingConfig: &api.ScalingConfig{
MinSize: aws.Int(1),
DesiredCapacity: aws.Int(3),
},
},
}
m = nodegroup.New(cfg, &eks.ClusterProvider{Provider: p}, nil)
fakeStackManager = new(fakes.FakeStackManager)
m.SetStackManager(fakeStackManager)
p.MockCloudFormation().On("ListStacksPages", mock.Anything, mock.Anything).Return(nil, nil)
})

Describe("Managed NodeGroup", func() {
BeforeEach(func() {
clusterName = "my-cluster"
ngName = "my-ng"
p = mockprovider.NewMockProvider()
cfg = api.NewClusterConfig()
cfg.Metadata.Name = clusterName

ng = &api.NodeGroup{
NodeGroupBase: &api.NodeGroupBase{
Name: ngName,
ScalingConfig: &api.ScalingConfig{
MinSize: aws.Int(1),
DesiredCapacity: aws.Int(3),
nodegroups := make(map[string]manager.StackInfo)
nodegroups["my-ng"] = manager.StackInfo{
Stack: &manager.Stack{
Tags: []*cloudformation.Tag{
{
Key: aws.String(api.NodeGroupNameTag),
Value: aws.String("my-ng"),
},
{
Key: aws.String(api.NodeGroupTypeTag),
Value: aws.String(string(api.NodeGroupTypeManaged)),
},
},
},
}
manager = nodegroup.New(cfg, &eks.ClusterProvider{Provider: p}, nil)
p.MockCloudFormation().On("ListStacksPages", mock.Anything, mock.Anything).Return(nil, nil)
fakeStackManager.DescribeNodeGroupStacksAndResourcesReturns(nodegroups, nil)
})

It("scales the nodegroup using the values provided", func() {
Expand All @@ -62,18 +88,18 @@ var _ = Describe("Scale", func() {
}).Return(&request.Request{}, nil)

waitCallCount := 0
manager.SetWaiter(func(name, msg string, acceptors []request.WaiterAcceptor, newRequest func() *request.Request, waitTimeout time.Duration, troubleshoot func(string) error) error {
m.SetWaiter(func(name, msg string, acceptors []request.WaiterAcceptor, newRequest func() *request.Request, waitTimeout time.Duration, troubleshoot func(string) error) error {
waitCallCount++
return nil
})

err := manager.Scale(ng)
err := m.Scale(ng)

Expect(err).NotTo(HaveOccurred())
Expect(waitCallCount).To(Equal(1))
})

When("upgrade fails", func() {
When("update fails", func() {
It("returns an error", func() {
p.MockEKS().On("UpdateNodegroupConfig", &awseks.UpdateNodegroupConfigInput{
ScalingConfig: &awseks.NodegroupScalingConfig{
Expand All @@ -84,11 +110,79 @@ var _ = Describe("Scale", func() {
NodegroupName: &ngName,
}).Return(nil, fmt.Errorf("foo"))

err := manager.Scale(ng)
err := m.Scale(ng)

Expect(err).To(MatchError(fmt.Sprintf("failed to scale nodegroup for cluster %q, error: foo", clusterName)))
})
})
})

Describe("Unmanaged Nodegroup", func() {
When("the ASG exists", func() {
BeforeEach(func() {
nodegroups := make(map[string]manager.StackInfo)
nodegroups["my-ng"] = manager.StackInfo{
Stack: &manager.Stack{
Tags: []*cloudformation.Tag{
{
Key: aws.String(api.NodeGroupNameTag),
Value: aws.String("my-ng"),
},
{
Key: aws.String(api.NodeGroupTypeTag),
Value: aws.String(string(api.NodeGroupTypeUnmanaged)),
},
},
},
Resources: []*cloudformation.StackResource{
{
PhysicalResourceId: aws.String("asg-name"),
LogicalResourceId: aws.String("NodeGroup"),
},
},
}
fakeStackManager.DescribeNodeGroupStacksAndResourcesReturns(nodegroups, nil)

p.MockASG().On("UpdateAutoScalingGroup", &autoscaling.UpdateAutoScalingGroupInput{
AutoScalingGroupName: aws.String("asg-name"),
MinSize: aws.Int64(1),
DesiredCapacity: aws.Int64(3),
}).Return(nil, nil)

})

It("scales the nodegroup", func() {
err := m.Scale(ng)
Expect(err).NotTo(HaveOccurred())
})
})

When("the asg resource doesn't exist", func() {
BeforeEach(func() {
nodegroups := make(map[string]manager.StackInfo)
nodegroups["my-ng"] = manager.StackInfo{
Stack: &manager.Stack{
Tags: []*cloudformation.Tag{
{
Key: aws.String(api.NodeGroupNameTag),
Value: aws.String("my-ng"),
},
{
Key: aws.String(api.NodeGroupTypeTag),
Value: aws.String(string(api.NodeGroupTypeUnmanaged)),
},
},
},
Resources: []*cloudformation.StackResource{},
}
fakeStackManager.DescribeNodeGroupStacksAndResourcesReturns(nodegroups, nil)

})

It("returns an error", func() {
err := m.Scale(ng)
Expect(err).To(MatchError(ContainSubstring("failed to find NodeGroup auto scaling group")))
})
})
})
})

0 comments on commit 58916cb

Please sign in to comment.