Skip to content

Commit

Permalink
[EKSCTL create cluster command] Authorise self-managed nodes via `aws…
Browse files Browse the repository at this point in the history
…-auth configmap` when EKS access entries are disabled (#7698)

* Disable access entry creation for self-managed nodes on clusters with CONFIG_MAP only

* fix logic for updating aws-auth configmap
  • Loading branch information
TiberiuGC committed Apr 4, 2024
1 parent f9475f8 commit 2addd3a
Show file tree
Hide file tree
Showing 7 changed files with 67 additions and 34 deletions.
26 changes: 21 additions & 5 deletions integration/tests/accessentries/accessentries_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ var (
namespaceRoleARN string
err error

apiEnabledCluster = "accessentries-api-enabled-2"
apiDisabledCluster = "accessentries-api-disabled-2"
apiEnabledCluster = "accessentries-api-enabled"
apiDisabledCluster = "accessentries-api-disabled"
)

func init() {
Expand Down Expand Up @@ -123,24 +123,39 @@ var _ = Describe("(Integration) [AccessEntries Test]", func() {
cfg = makeClusterConfig(apiDisabledCluster)
})

It("should create a cluster with authenticationMode set to CONFIG_MAP", func() {
It("should create a cluster with authenticationMode set to CONFIG_MAP and allow self-managed nodes to join via aws-auth", func() {
cfg.AccessConfig.AuthenticationMode = ekstypes.AuthenticationModeConfigMap

cfg.NodeGroups = append(cfg.NodeGroups, &api.NodeGroup{
NodeGroupBase: &api.NodeGroupBase{
Name: "aws-auth-ng",
ScalingConfig: &api.ScalingConfig{
DesiredCapacity: aws.Int(1),
},
},
})
data, err := json.Marshal(cfg)
Expect(err).NotTo(HaveOccurred())

Expect(params.EksctlCreateCmd.
WithArgs(
"cluster",
"--config-file", "-",
"--without-nodegroup",
"--verbose", "4",
).
WithoutArg("--region", params.Region).
WithStdin(bytes.NewReader(data))).To(RunSuccessfully())

Expect(ctl.RefreshClusterStatus(context.Background(), cfg)).NotTo(HaveOccurred())
Expect(ctl.IsAccessEntryEnabled()).To(BeFalse())

Expect(params.EksctlGetCmd.WithArgs(
"nodegroup",
"--cluster", apiDisabledCluster,
"--name", "aws-auth-ng",
"-o", "yaml",
)).To(runner.RunSuccessfullyWithOutputStringLines(
ContainElement(ContainSubstring("Status: CREATE_COMPLETE")),
))
})

It("should fail early when trying to create access entries", func() {
Expand Down Expand Up @@ -400,6 +415,7 @@ var _ = SynchronizedAfterSuite(func() {}, func() {
WithArgs(
"cluster",
"--name", apiDisabledCluster,
"--disable-nodegroup-eviction",
"--wait",
)).To(RunSuccessfully())

Expand Down
11 changes: 9 additions & 2 deletions pkg/actions/nodegroup/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -285,11 +285,17 @@ func (m *Manager) postNodeCreationTasks(ctx context.Context, clientSet kubernete
timeoutCtx, cancel := context.WithTimeout(ctx, m.ctl.AWSProvider.WaitTimeout())
defer cancel()

if (!m.accessEntry.IsEnabled() && !api.IsDisabled(options.UpdateAuthConfigMap)) || api.IsEnabled(options.UpdateAuthConfigMap) {
// authorize self-managed nodes to join the cluster via aws-auth configmap
// if EKS access entries are disabled OR
if (!m.accessEntry.IsEnabled() && !api.IsDisabled(options.UpdateAuthConfigMap)) ||
// if explicitly requested by the user
api.IsEnabled(options.UpdateAuthConfigMap) {
if err := eks.UpdateAuthConfigMap(m.cfg.NodeGroups, clientSet); err != nil {
return err
}
}

// only wait for self-managed nodes to join if either authorization method is being used
if !api.IsDisabled(options.UpdateAuthConfigMap) {
for _, ng := range m.cfg.NodeGroups {
if err := eks.WaitForNodes(timeoutCtx, clientSet, ng); err != nil {
Expand All @@ -298,6 +304,7 @@ func (m *Manager) postNodeCreationTasks(ctx context.Context, clientSet kubernete
}
}
logger.Success("created %d nodegroup(s) in cluster %q", len(m.cfg.NodeGroups), m.cfg.Metadata.Name)

for _, ng := range m.cfg.ManagedNodeGroups {
if err := eks.WaitForNodes(timeoutCtx, clientSet, ng); err != nil {
if m.cfg.PrivateCluster.Enabled {
Expand All @@ -308,8 +315,8 @@ func (m *Manager) postNodeCreationTasks(ctx context.Context, clientSet kubernete
}
}
}

logger.Success("created %d managed nodegroup(s) in cluster %q", len(m.cfg.ManagedNodeGroups), m.cfg.Metadata.Name)

return nil
}

Expand Down
11 changes: 7 additions & 4 deletions pkg/cfn/manager/create_tasks.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ import (
"github.com/pkg/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"

ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types"

"github.com/weaveworks/eksctl/pkg/actions/accessentry"
api "github.com/weaveworks/eksctl/pkg/apis/eksctl.io/v1alpha5"
iamoidc "github.com/weaveworks/eksctl/pkg/iam/oidc"
Expand All @@ -24,7 +26,7 @@ const (
// NewTasksToCreateCluster defines all tasks required to create a cluster along
// with some nodegroups; see CreateAllNodeGroups for how onlyNodeGroupSubset works.
func (c *StackCollection) NewTasksToCreateCluster(ctx context.Context, nodeGroups []*api.NodeGroup,
managedNodeGroups []*api.ManagedNodeGroup, accessEntries []api.AccessEntry, accessEntryCreator accessentry.CreatorInterface, postClusterCreationTasks ...tasks.Task) *tasks.TaskTree {
managedNodeGroups []*api.ManagedNodeGroup, accessConfig *api.AccessConfig, accessEntryCreator accessentry.CreatorInterface, postClusterCreationTasks ...tasks.Task) *tasks.TaskTree {
taskTree := tasks.TaskTree{Parallel: false}

taskTree.Append(&createClusterTask{
Expand All @@ -34,8 +36,8 @@ func (c *StackCollection) NewTasksToCreateCluster(ctx context.Context, nodeGroup
ctx: ctx,
})

if len(accessEntries) > 0 {
taskTree.Append(accessEntryCreator.CreateTasks(ctx, accessEntries))
if len(accessConfig.AccessEntries) > 0 {
taskTree.Append(accessEntryCreator.CreateTasks(ctx, accessConfig.AccessEntries))
}

appendNodeGroupTasksTo := func(taskTree *tasks.TaskTree) {
Expand All @@ -44,7 +46,8 @@ func (c *StackCollection) NewTasksToCreateCluster(ctx context.Context, nodeGroup
Parallel: true,
IsSubTask: true,
}
if unmanagedNodeGroupTasks := c.NewUnmanagedNodeGroupTask(ctx, nodeGroups, false, false, false, vpcImporter); unmanagedNodeGroupTasks.Len() > 0 {
disableAccessEntryCreation := accessConfig.AuthenticationMode == ekstypes.AuthenticationModeConfigMap
if unmanagedNodeGroupTasks := c.NewUnmanagedNodeGroupTask(ctx, nodeGroups, false, false, disableAccessEntryCreation, vpcImporter); unmanagedNodeGroupTasks.Len() > 0 {
unmanagedNodeGroupTasks.IsSubTask = true
nodeGroupTasks.Append(unmanagedNodeGroupTasks)
}
Expand Down
21 changes: 8 additions & 13 deletions pkg/cfn/manager/fakes/fake_stack_manager.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pkg/cfn/manager/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ type StackManager interface {
NewTasksToDeleteClusterWithNodeGroups(ctx context.Context, clusterStack *Stack, nodeGroupStacks []NodeGroupStack, clusterOperable bool, newOIDCManager NewOIDCManager, newTasksToDeleteAddonIAM NewTasksToDeleteAddonIAM, newTasksToDeletePodIdentityRole NewTasksToDeletePodIdentityRole, cluster *ekstypes.Cluster, clientSetGetter kubernetes.ClientSetGetter, wait, force bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error)
NewTasksToCreateIAMServiceAccounts(serviceAccounts []*api.ClusterIAMServiceAccount, oidc *iamoidc.OpenIDConnectManager, clientSetGetter kubernetes.ClientSetGetter) *tasks.TaskTree
NewTaskToDeleteUnownedNodeGroup(ctx context.Context, clusterName, nodegroup string, nodeGroupDeleter NodeGroupDeleter, waitCondition *DeleteWaitCondition) tasks.Task
NewTasksToCreateCluster(ctx context.Context, nodeGroups []*api.NodeGroup, managedNodeGroups []*api.ManagedNodeGroup, accessEntries []api.AccessEntry, accessEntryCreator accessentry.CreatorInterface, postClusterCreationTasks ...tasks.Task) *tasks.TaskTree
NewTasksToCreateCluster(ctx context.Context, nodeGroups []*api.NodeGroup, managedNodeGroups []*api.ManagedNodeGroup, accessConfig *api.AccessConfig, accessEntryCreator accessentry.CreatorInterface, postClusterCreationTasks ...tasks.Task) *tasks.TaskTree
NewTasksToDeleteIAMServiceAccounts(ctx context.Context, serviceAccounts []string, clientSetGetter kubernetes.ClientSetGetter, wait bool) (*tasks.TaskTree, error)
NewTasksToDeleteNodeGroups(stacks []NodeGroupStack, shouldDelete func(_ string) bool, wait bool, cleanup func(chan error, string) error) (*tasks.TaskTree, error)
NewTasksToDeleteOIDCProviderWithIAMServiceAccounts(ctx context.Context, newOIDCManager NewOIDCManager, cluster *ekstypes.Cluster, clientSetGetter kubernetes.ClientSetGetter, force bool) (*tasks.TaskTree, error)
Expand Down
15 changes: 8 additions & 7 deletions pkg/cfn/manager/tasks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ var _ = Describe("StackCollection Tasks", func() {

It("should have nice description", func() {
fakeVPCImporter := new(vpcfakes.FakeImporter)
accessConfig := &api.AccessConfig{}
// TODO use DescribeTable

// The supportsManagedNodes argument has no effect on the Describe call, so the values are alternated
Expand All @@ -99,7 +100,7 @@ var _ = Describe("StackCollection Tasks", func() {
Expect(tasks.Describe()).To(Equal(`no tasks`))
}
{
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), nil, nil, nil)
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), nil, accessConfig, nil)
Expect(tasks.Describe()).To(Equal(`
2 sequential tasks: { create cluster control plane "test-cluster",
2 parallel sub-tasks: {
Expand All @@ -110,18 +111,18 @@ var _ = Describe("StackCollection Tasks", func() {
`))
}
{
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar"), nil, nil, nil)
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar"), nil, accessConfig, nil)
Expect(tasks.Describe()).To(Equal(`
2 sequential tasks: { create cluster control plane "test-cluster", create nodegroup "bar"
}
`))
}
{
tasks := stackManager.NewTasksToCreateCluster(context.Background(), nil, nil, nil, nil)
tasks := stackManager.NewTasksToCreateCluster(context.Background(), nil, nil, accessConfig, nil)
Expect(tasks.Describe()).To(Equal(`1 task: { create cluster control plane "test-cluster" }`))
}
{
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), makeManagedNodeGroups("m1", "m2"), nil, nil)
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), makeManagedNodeGroups("m1", "m2"), accessConfig, nil)
Expect(tasks.Describe()).To(Equal(`
2 sequential tasks: { create cluster control plane "test-cluster",
2 parallel sub-tasks: {
Expand All @@ -138,7 +139,7 @@ var _ = Describe("StackCollection Tasks", func() {
`))
}
{
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), makeManagedNodeGroupsWithPropagatedTags("m1", "m2"), nil, nil)
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar", "foo"), makeManagedNodeGroupsWithPropagatedTags("m1", "m2"), accessConfig, nil)
Expect(tasks.Describe()).To(Equal(`
2 sequential tasks: { create cluster control plane "test-cluster",
2 parallel sub-tasks: {
Expand All @@ -161,7 +162,7 @@ var _ = Describe("StackCollection Tasks", func() {
`))
}
{
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("foo"), makeManagedNodeGroups("m1"), nil, nil)
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("foo"), makeManagedNodeGroups("m1"), accessConfig, nil)
Expect(tasks.Describe()).To(Equal(`
2 sequential tasks: { create cluster control plane "test-cluster",
2 parallel sub-tasks: {
Expand All @@ -172,7 +173,7 @@ var _ = Describe("StackCollection Tasks", func() {
`))
}
{
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar"), nil, nil, nil, &task{id: 1})
tasks := stackManager.NewTasksToCreateCluster(context.Background(), makeNodeGroups("bar"), nil, accessConfig, nil, &task{id: 1})
Expect(tasks.Describe()).To(Equal(`
2 sequential tasks: { create cluster control plane "test-cluster",
2 sequential sub-tasks: {
Expand Down
15 changes: 13 additions & 2 deletions pkg/ctl/create/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"sync"

"github.com/aws/aws-sdk-go-v2/aws"
ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types"

"github.com/aws/amazon-ec2-instance-selector/v2/pkg/selector"
"github.com/kris-nova/logger"
Expand Down Expand Up @@ -360,7 +361,7 @@ func doCreateCluster(cmd *cmdutils.Cmd, ngFilter *filter.NodeGroupFilter, params
postClusterCreationTasks.Append(preNodegroupAddons)
}

taskTree := stackManager.NewTasksToCreateCluster(ctx, cfg.NodeGroups, cfg.ManagedNodeGroups, cfg.AccessConfig.AccessEntries, makeAccessEntryCreator(cfg.Metadata.Name, stackManager), postClusterCreationTasks)
taskTree := stackManager.NewTasksToCreateCluster(ctx, cfg.NodeGroups, cfg.ManagedNodeGroups, cfg.AccessConfig, makeAccessEntryCreator(cfg.Metadata.Name, stackManager), postClusterCreationTasks)

logger.Info(taskTree.Describe())
if errs := taskTree.DoAllSync(); len(errs) > 0 {
Expand Down Expand Up @@ -426,18 +427,28 @@ func doCreateCluster(cmd *cmdutils.Cmd, ngFilter *filter.NodeGroupFilter, params
} else {
ngCtx, cancel := context.WithTimeout(ctx, cmd.ProviderConfig.WaitTimeout)
defer cancel()

// authorize self-managed nodes to join the cluster via aws-auth configmap
// only if EKS access entries are disabled
if cfg.AccessConfig.AuthenticationMode == ekstypes.AuthenticationModeConfigMap {
if err := eks.UpdateAuthConfigMap(cfg.NodeGroups, clientSet); err != nil {
return err
}
}

for _, ng := range cfg.NodeGroups {
// wait for nodes to join
if err := eks.WaitForNodes(ngCtx, clientSet, ng); err != nil {
return err
}
}
logger.Success("created %d nodegroup(s) in cluster %q", len(cfg.NodeGroups), cfg.Metadata.Name)

for _, ng := range cfg.ManagedNodeGroups {
if err := eks.WaitForNodes(ngCtx, clientSet, ng); err != nil {
return err
}
}
logger.Success("created %d managed nodegroup(s) in cluster %q", len(cfg.ManagedNodeGroups), cfg.Metadata.Name)
}
}
if postNodegroupAddons != nil && postNodegroupAddons.Len() > 0 {
Expand Down

0 comments on commit 2addd3a

Please sign in to comment.