Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Upgrade eks module #358

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 11 additions & 11 deletions modules/cluster/local.tf
Original file line number Diff line number Diff line change
Expand Up @@ -18,20 +18,20 @@ locals {
boot_iam_role = var.create_asm_role ? module.iam_assumable_role_secrets-secrets-manager.this_iam_role_arn : var.boot_iam_role

node_group_defaults = {
ami_type = var.node_group_ami
disk_size = var.node_group_disk_size
desired_capacity = var.desired_node_count
max_capacity = var.max_node_count
min_capacity = var.min_node_count
instance_types = [var.node_machine_type]
ami_type = var.node_group_ami
disk_size = var.node_group_disk_size
desired_size = var.desired_node_count
max_size = var.max_node_count
min_size = var.min_node_count
instance_types = [var.node_machine_type]

launch_template_id = null
launch_template_version = null

# Provider default which is 'ON_DEMAND'. We don't set it explicitly to avoid changes to existing clusters provisioned with this module
capacity_type = var.enable_spot_instances ? "SPOT" : null

k8s_labels = {
labels = {
"jenkins-x.io/name" = var.cluster_name
"jenkins-x.io/part-of" = "jx-platform"
"jenkins-x.io/managed-by" = "terraform"
Expand All @@ -45,11 +45,11 @@ locals {
node_groups_extended = { for k, v in var.node_groups : k => merge(
local.node_group_defaults,
v,
contains(keys(v), "k8s_labels") ? {
contains(keys(v), "labels") ? {
# Deep merge isn't a thing in terraform, yet, so we commit these atrocities.
k8s_labels = merge(
local.node_group_defaults["k8s_labels"],
v["k8s_labels"],
labels = merge(
local.node_group_defaults["labels"],
v["labels"],
)
} : {},
) }
Expand Down
173 changes: 93 additions & 80 deletions modules/cluster/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ provider "kubernetes" {
// ----------------------------------------------------------------------------
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 2.70"
# version = "~> 2.70"
create_vpc = var.create_vpc
name = var.vpc_name
cidr = var.vpc_cidr_block
Expand Down Expand Up @@ -59,87 +59,100 @@ module "vpc" {
// See https://github.com/terraform-aws-modules/terraform-aws-eks
// ----------------------------------------------------------------------------
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = ">= 14.0, < 18.0"
create_eks = var.create_eks
cluster_name = var.cluster_name
cluster_version = var.cluster_version
subnets = var.create_vpc ? (var.cluster_in_private_subnet ? module.vpc.private_subnets : module.vpc.public_subnets) : var.subnets
vpc_id = var.create_vpc ? module.vpc.vpc_id : var.vpc_id
enable_irsa = true
source = "terraform-aws-modules/eks/aws"
version = ">= 18.0"


prefix_separator = ""
iam_role_name = var.cluster_name
cluster_security_group_name = var.cluster_name
cluster_security_group_description = "EKS cluster security group."

create = var.create_eks
cluster_name = var.cluster_name
cluster_version = var.cluster_version
control_plane_subnet_ids = var.create_vpc ? (var.cluster_in_private_subnet ? module.vpc.private_subnets : module.vpc.public_subnets) : var.subnets
subnet_ids = var.create_vpc ? (var.cluster_in_private_subnet ? module.vpc.private_subnets : module.vpc.public_subnets) : var.subnets
vpc_id = var.create_vpc ? module.vpc.vpc_id : var.vpc_id
enable_irsa = true
tags = var.eks_cluster_tags

worker_groups_launch_template = var.enable_worker_group && var.enable_worker_groups_launch_template ? [
for subnet in(var.create_vpc ? module.vpc.public_subnets : var.subnets) :
{
subnets = [subnet]
asg_desired_capacity = var.lt_desired_nodes_per_subnet
asg_min_size = var.lt_min_nodes_per_subnet
asg_max_size = var.lt_max_nodes_per_subnet
spot_price = (var.enable_spot_instances ? var.spot_price : null)
instance_type = var.node_machine_type
root_volume_type = var.volume_type
root_volume_size = var.volume_size
root_encrypted = var.encrypt_volume_self
override_instance_types = var.allowed_spot_instance_types
autoscaling_enabled = "true"
public_ip = true
tags = [
{
key = "k8s.io/cluster-autoscaler/enabled"
propagate_at_launch = "false"
value = "true"
},
{
key = "k8s.io/cluster-autoscaler/${var.cluster_name}"
propagate_at_launch = "false"
value = "true"
}
]
}
] : []

worker_groups = var.enable_worker_group && !var.enable_worker_groups_launch_template ? [
{
name = "worker-group-${var.cluster_name}"
instance_type = var.node_machine_type
asg_desired_capacity = var.desired_node_count
asg_min_size = var.min_node_count
asg_max_size = var.max_node_count
spot_price = (var.enable_spot_instances ? var.spot_price : null)
key_name = (var.enable_key_name ? var.key_name : null)
root_volume_type = var.volume_type
root_volume_size = var.volume_size
root_iops = var.iops
tags = [
{
key = "k8s.io/cluster-autoscaler/enabled"
propagate_at_launch = "false"
value = "true"
},
{
key = "k8s.io/cluster-autoscaler/${var.cluster_name}"
propagate_at_launch = "false"
value = "true"
}
]
}
] : []

node_groups = !var.enable_worker_group ? local.node_groups_extended : {}

workers_additional_policies = [
"arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEC2ContainerRegistryPowerUser"
]

map_users = var.map_users
map_roles = var.map_roles
map_accounts = var.map_accounts
cluster_endpoint_private_access = var.cluster_endpoint_private_access
cluster_endpoint_public_access = var.cluster_endpoint_public_access
cluster_endpoint_private_access_cidrs = var.cluster_endpoint_private_access_cidrs
cluster_endpoint_public_access_cidrs = var.cluster_endpoint_public_access_cidrs
cluster_encryption_config = var.cluster_encryption_config
# TODO: Handle self managed node groups

# worker_groups_launch_template = var.enable_worker_group && var.enable_worker_groups_launch_template ? [
# for subnet in(var.create_vpc ? module.vpc.public_subnets : var.subnets) :
# {
# subnets = [subnet]
# asg_desired_capacity = var.lt_desired_nodes_per_subnet
# asg_min_size = var.lt_min_nodes_per_subnet
# asg_max_size = var.lt_max_nodes_per_subnet
# spot_price = (var.enable_spot_instances ? var.spot_price : null)
# instance_type = var.node_machine_type
# root_volume_type = var.volume_type
# root_volume_size = var.volume_size
# root_encrypted = var.encrypt_volume_self
# override_instance_types = var.allowed_spot_instance_types
# autoscaling_enabled = "true"
# public_ip = true
# tags = [
# {
# key = "k8s.io/cluster-autoscaler/enabled"
# propagate_at_launch = "false"
# value = "true"
# },
# {
# key = "k8s.io/cluster-autoscaler/${var.cluster_name}"
# propagate_at_launch = "false"
# value = "true"
# }
# ]
# }
# ] : []

# worker_groups = var.enable_worker_group && !var.enable_worker_groups_launch_template ? [
# {
# name = "worker-group-${var.cluster_name}"
# instance_type = var.node_machine_type
# asg_desired_capacity = var.desired_node_count
# asg_min_size = var.min_node_count
# asg_max_size = var.max_node_count
# spot_price = (var.enable_spot_instances ? var.spot_price : null)
# key_name = (var.enable_key_name ? var.key_name : null)
# root_volume_type = var.volume_type
# root_volume_size = var.volume_size
# root_iops = var.iops
# tags = [
# {
# key = "k8s.io/cluster-autoscaler/enabled"
# propagate_at_launch = "false"
# value = "true"
# },
# {
# key = "k8s.io/cluster-autoscaler/${var.cluster_name}"
# propagate_at_launch = "false"
# value = "true"
# }
# ]
# }
# ] : []

eks_managed_node_groups = !var.enable_worker_group ? local.node_groups_extended : {}

# Nodes get AmazonEC2ContainerRegistryReadOnly by default. Should be enough.
# workers_additional_policies = [
# "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEC2ContainerRegistryPowerUser"
# ]
# TODO: Handle aws_auth some other way
# map_users = var.map_users
# map_roles = var.map_roles
# map_accounts = var.map_accounts

cluster_endpoint_private_access = var.cluster_endpoint_private_access
cluster_endpoint_public_access = var.cluster_endpoint_public_access
# TODO: Is there a replacement?
# cluster_endpoint_private_access_cidrs = var.cluster_endpoint_private_access_cidrs
cluster_endpoint_public_access_cidrs = var.cluster_endpoint_public_access_cidrs
cluster_encryption_config = var.cluster_encryption_config
}

// ----------------------------------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion versions.tf
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ terraform {
required_version = ">= 0.12.17, < 2.0.0"

required_providers {
aws = "> 4.0, < 5.0"
aws = "> 4.0"
kubernetes = "~> 2.0"
local = "~> 2.0"
null = "~> 3.0"
Expand Down