From 1b6a59e067446abbb305cd77923f27e9ddd99129 Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Wed, 8 May 2024 16:38:23 -0400 Subject: [PATCH] feat: Add Karpenter on EKS managed node group with EKS Pod Identity and cluster access entry (#1942) --- docs/patterns/karpenter-mng.md | 7 ++ docs/patterns/karpenter.md | 2 +- patterns/karpenter-mng/README.md | 87 ++++++++++++++++ patterns/karpenter-mng/eks.tf | 139 ++++++++++++++++++++++++++ patterns/karpenter-mng/example.yaml | 21 ++++ patterns/karpenter-mng/karpenter.yaml | 44 ++++++++ patterns/karpenter-mng/main.tf | 68 +++++++++++++ patterns/karpenter-mng/vpc.tf | 26 +++++ patterns/karpenter/README.md | 2 +- 9 files changed, 394 insertions(+), 2 deletions(-) create mode 100644 docs/patterns/karpenter-mng.md create mode 100644 patterns/karpenter-mng/README.md create mode 100644 patterns/karpenter-mng/eks.tf create mode 100644 patterns/karpenter-mng/example.yaml create mode 100644 patterns/karpenter-mng/karpenter.yaml create mode 100644 patterns/karpenter-mng/main.tf create mode 100644 patterns/karpenter-mng/vpc.tf diff --git a/docs/patterns/karpenter-mng.md b/docs/patterns/karpenter-mng.md new file mode 100644 index 0000000000..75d251a271 --- /dev/null +++ b/docs/patterns/karpenter-mng.md @@ -0,0 +1,7 @@ +--- +title: Karpenter on EKS MNG +--- + +{% + include-markdown "../../patterns/karpenter-mng/README.md" +%} diff --git a/docs/patterns/karpenter.md b/docs/patterns/karpenter.md index fe784dece3..2d2311703d 100644 --- a/docs/patterns/karpenter.md +++ b/docs/patterns/karpenter.md @@ -1,5 +1,5 @@ --- -title: Karpenter +title: Karpenter on EKS Fargate --- {% diff --git a/patterns/karpenter-mng/README.md b/patterns/karpenter-mng/README.md new file mode 100644 index 0000000000..80f0494b3d --- /dev/null +++ b/patterns/karpenter-mng/README.md @@ -0,0 +1,87 @@ +# Karpenter on EKS MNG + +This pattern demonstrates how to provision Karpenter on an EKS managed node group. Deploying onto standard EC2 instances created by an EKS managed node group will allow for daemonsets to run on the nodes created for the Karpenter controller, and therefore better unification of tooling across your data plane. This solution is comprised of the following components: + +1. An EKS managed node group that applies both a taint as well as a label for the Karpenter controller. We want the Karpenter controller to target these nodes via a `nodeSelector` in order to avoid the controller pods from running on nodes that Karpenter itself creates and manages. In addition, we are applying a taint to keep other pods off of these nodes as they are primarily intended for the controller pods. We apply a toleration to the CoreDNS addon, to allow those pods to run on the controller nodes as well. This is needed so that when a cluster is created, the CoreDNS pods have a place to run in order for the Karpenter controller to be provisioned and start managing the additional compute requirements for the cluster. Without letting CoreDNS run on these nodes, the controllers would fail to deploy and the data plane would be in a "deadlock" waiting for resources to deploy but unable to do so. +2. The `eks-pod-identity-agent` addon has been provisioned to allow the Karpenter controller to utilize EKS Pod Identity for AWS permissions via an IAM role. +3. The VPC subnets and node security group have been tagged with `"karpenter.sh/discovery" = local.name` for discoverability by the controller. The controller will discover these resources and use them to provision EC2 resources for the cluster. +4. An IAM role for the Karpenter controller has been created with a trust policy that trusts the EKS Pod Identity service principal. This allows the EKS Pod Identity service to provide AWS credentials to the Karpenter controller pods in order to call AWS APIs. +5. An IAM role for the nodes that Karpenter will create has been created along with a cluster access entry which allows the nodes to acquire permissions to join the cluster. Karpenter will create and manage the instance profile that utilizes this IAM role. +6. An SQS queue has been created that is subscribed to certain EC2 CloudWatch events. This queue is used by Karpenter, allowing it to respond to certain EC2 lifecycle events and gracefully migrate pods off the instance before it is terminated. + +## Code + +The areas of significance related to this pattern are highlighted in the code provided below: + +```terraform hl_lines="20-28 31 49-62 67-70 89-91 97-100 108-132" +{% include "../../patterns/karpenter-mng/eks.tf" %} +``` + +### VPC + +```terraform hl_lines="21-22" +{% include "../../patterns/karpenter-mng/vpc.tf" %} +``` + +### EC2NodeClass and NodePool + +```yaml hl_lines="8-16" +{% include "../../patterns/karpenter-mng/karpenter.yaml" %} +``` + +## Deploy + +See [here](https://aws-ia.github.io/terraform-aws-eks-blueprints/getting-started/#prerequisites) for the prerequisites and steps to deploy this pattern. + +## Validate + +1. Test by listing the nodes in the cluster. You should see four Fargate nodes in the cluster: + + ```sh + kubectl get nodes + + NAME STATUS ROLES AGE VERSION + ip-10-0-38-5.us-west-2.compute.internal Ready 2m40s v1.29.3-eks-ae9a62a + ip-10-0-9-38.us-west-2.compute.internal Ready 2m35s v1.29.3-eks-ae9a62a + ``` + +2. Provision the Karpenter `EC2NodeClass` and `NodePool` resources which provide Karpenter the necessary configurations to provision EC2 resources: + + ```sh + kubectl apply -f karpenter.yaml + ``` + +3. Once the Karpenter resources are in place, Karpenter will provision the necessary EC2 resources to satisfy any pending pods in the scheduler's queue. You can demonstrate this with the example deployment provided. First deploy the example deployment which has the initial number replicas set to 0: + + ```sh + kubectl apply -f example.yaml + ``` + +4. When you scale the example deployment, you should see Karpenter respond by quickly provisioning EC2 resources to satisfy those pending pod requests: + + ```sh + kubectl scale deployment inflate --replicas=3 + ``` + +5. Listing the nodes should now show some EC2 compute that Karpenter has created for the example deployment: + + ```sh + kubectl get nodes + + NAME STATUS ROLES AGE VERSION + ip-10-0-38-109.us-west-2.compute.internal Ready 11s v1.29.3-eks-ae9a62a # <== EC2 created by Karpenter + ip-10-0-38-5.us-west-2.compute.internal Ready 3m54s v1.29.3-eks-ae9a62a + ip-10-0-9-38.us-west-2.compute.internal Ready 3m49s v1.29.3-eks-ae9a62a + ``` + +## Destroy + +Scale down the deployment to de-provision Karpenter created resources first: + +```sh +kubectl delete -f example.yaml +``` + +{% + include-markdown "../../docs/_partials/destroy.md" +%} diff --git a/patterns/karpenter-mng/eks.tf b/patterns/karpenter-mng/eks.tf new file mode 100644 index 0000000000..a1214cb5c8 --- /dev/null +++ b/patterns/karpenter-mng/eks.tf @@ -0,0 +1,139 @@ +################################################################################ +# Cluster +################################################################################ + +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "~> 20.9" + + cluster_name = local.name + cluster_version = "1.29" + + # Give the Terraform identity admin access to the cluster + # which will allow it to deploy resources into the cluster + enable_cluster_creator_admin_permissions = true + cluster_endpoint_public_access = true + + cluster_addons = { + coredns = { + configuration_values = jsonencode({ + tolerations = [ + # Allow CoreDNS to run on the same nodes as the Karpenter controller + # for use during cluster creation when Karpenter nodes do not yet exist + { + key = "karpenter.sh/controller" + value = "true" + effect = "NoSchedule" + } + ] + }) + } + eks-pod-identity-agent = {} + kube-proxy = {} + vpc-cni = {} + } + + enable_efa_support = true + + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets + + eks_managed_node_groups = { + karpenter = { + instance_types = ["m5.large"] + + min_size = 2 + max_size = 3 + desired_size = 2 + + labels = { + # Used to ensure Karpenter runs on nodes that it does not manage + "karpenter.sh/controller" = "true" + } + + taints = { + # The pods that do not tolerate this taint should run on nodes + # created by Karpenter + karpenter = { + key = "karpenter.sh/controller" + value = "true" + effect = "NO_SCHEDULE" + } + } + } + } + + tags = merge(local.tags, { + # NOTE - if creating multiple security groups with this module, only tag the + # security group that Karpenter should utilize with the following tag + # (i.e. - at most, only one security group should have this tag in your account) + "karpenter.sh/discovery" = local.name + }) +} + +output "configure_kubectl" { + description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" + value = "aws eks --region ${local.region} update-kubeconfig --name ${module.eks.cluster_name}" +} + +################################################################################ +# Controller & Node IAM roles, SQS Queue, Eventbridge Rules +################################################################################ + +module "karpenter" { + source = "terraform-aws-modules/eks/aws//modules/karpenter" + version = "~> 20.9" + + cluster_name = module.eks.cluster_name + + # Name needs to match role name passed to the EC2NodeClass + node_iam_role_use_name_prefix = false + node_iam_role_name = local.name + + tags = local.tags +} + +resource "aws_eks_pod_identity_association" "karpenter" { + cluster_name = module.eks.cluster_name + namespace = "kube-system" + service_account = "karpenter" + role_arn = module.karpenter.iam_role_arn +} + +################################################################################ +# Helm charts +################################################################################ + +resource "helm_release" "karpenter" { + namespace = "kube-system" + name = "karpenter" + repository = "oci://public.ecr.aws/karpenter" + repository_username = data.aws_ecrpublic_authorization_token.token.user_name + repository_password = data.aws_ecrpublic_authorization_token.token.password + chart = "karpenter" + version = "0.36.1" + wait = false + + values = [ + <<-EOT + nodeSelector: + karpenter.sh/controller: 'true' + tolerations: + - key: CriticalAddonsOnly + operator: Exists + - key: karpenter.sh/controller + operator: Exists + effect: NoSchedule + settings: + clusterName: ${module.eks.cluster_name} + clusterEndpoint: ${module.eks.cluster_endpoint} + interruptionQueue: ${module.karpenter.queue_name} + EOT + ] + + lifecycle { + ignore_changes = [ + repository_password + ] + } +} diff --git a/patterns/karpenter-mng/example.yaml b/patterns/karpenter-mng/example.yaml new file mode 100644 index 0000000000..0ac0500df7 --- /dev/null +++ b/patterns/karpenter-mng/example.yaml @@ -0,0 +1,21 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: inflate +spec: + replicas: 0 + selector: + matchLabels: + app: inflate + template: + metadata: + labels: + app: inflate + spec: + terminationGracePeriodSeconds: 0 + containers: + - name: inflate + image: public.ecr.aws/eks-distro/kubernetes/pause:3.7 + resources: + requests: + cpu: 1 diff --git a/patterns/karpenter-mng/karpenter.yaml b/patterns/karpenter-mng/karpenter.yaml new file mode 100644 index 0000000000..7267db986b --- /dev/null +++ b/patterns/karpenter-mng/karpenter.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: karpenter.k8s.aws/v1beta1 +kind: EC2NodeClass +metadata: + name: default +spec: + amiFamily: AL2 + role: ex-karpenter-mng + subnetSelectorTerms: + - tags: + karpenter.sh/discovery: ex-karpenter-mng + securityGroupSelectorTerms: + - tags: + karpenter.sh/discovery: ex-karpenter-mng + tags: + karpenter.sh/discovery: ex-karpenter-mng +--- +apiVersion: karpenter.sh/v1beta1 +kind: NodePool +metadata: + name: default +spec: + template: + spec: + nodeClassRef: + name: default + requirements: + - key: "karpenter.k8s.aws/instance-category" + operator: In + values: ["c", "m", "r"] + - key: "karpenter.k8s.aws/instance-cpu" + operator: In + values: ["4", "8", "16", "32"] + - key: "karpenter.k8s.aws/instance-hypervisor" + operator: In + values: ["nitro"] + - key: "karpenter.k8s.aws/instance-generation" + operator: Gt + values: ["2"] + limits: + cpu: 1000 + disruption: + consolidationPolicy: WhenEmpty + consolidateAfter: 30s diff --git a/patterns/karpenter-mng/main.tf b/patterns/karpenter-mng/main.tf new file mode 100644 index 0000000000..c7faab9153 --- /dev/null +++ b/patterns/karpenter-mng/main.tf @@ -0,0 +1,68 @@ +terraform { + required_version = ">= 1.3" + + required_providers { + aws = { + source = "hashicorp/aws" + version = ">= 5.34" + } + helm = { + source = "hashicorp/helm" + version = ">= 2.9" + } + } + + # ## Used for end-to-end testing on project; update to suit your needs + # backend "s3" { + # bucket = "terraform-ssp-github-actions-state" + # region = "us-west-2" + # key = "e2e/karpenter-mng/terraform.tfstate" + # } +} + +provider "aws" { + region = local.region +} + +# Required for public ECR where Karpenter artifacts are hosted +provider "aws" { + region = "us-east-1" + alias = "virginia" +} + +provider "helm" { + kubernetes { + host = module.eks.cluster_endpoint + cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } + } +} + +################################################################################ +# Common data/locals +################################################################################ + +data "aws_ecrpublic_authorization_token" "token" { + provider = aws.virginia +} + +data "aws_availability_zones" "available" {} + +locals { + name = "ex-${basename(path.cwd)}" + region = "us-west-2" + + vpc_cidr = "10.0.0.0/16" + azs = slice(data.aws_availability_zones.available.names, 0, 3) + + tags = { + Blueprint = local.name + GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" + } +} diff --git a/patterns/karpenter-mng/vpc.tf b/patterns/karpenter-mng/vpc.tf new file mode 100644 index 0000000000..2eb92e2dc8 --- /dev/null +++ b/patterns/karpenter-mng/vpc.tf @@ -0,0 +1,26 @@ +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 5.0" + + name = local.name + cidr = local.vpc_cidr + + azs = local.azs + private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)] + public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)] + + enable_nat_gateway = true + single_nat_gateway = true + + public_subnet_tags = { + "kubernetes.io/role/elb" = 1 + } + + private_subnet_tags = { + "kubernetes.io/role/internal-elb" = 1 + # Tags subnets for Karpenter auto-discovery + "karpenter.sh/discovery" = local.name + } + + tags = local.tags +} diff --git a/patterns/karpenter/README.md b/patterns/karpenter/README.md index 7b03643de0..95f26ff52c 100644 --- a/patterns/karpenter/README.md +++ b/patterns/karpenter/README.md @@ -1,4 +1,4 @@ -# Karpenter +# Karpenter on EKS Fargate This pattern demonstrates how to provision Karpenter on a serverless cluster (serverless data plane) using Fargate Profiles.