From f900d5b2a829fe5432d370e15499c644cb73ea6a Mon Sep 17 00:00:00 2001 From: "docs-sourcer[bot]" <99042413+docs-sourcer[bot]@users.noreply.github.com> Date: Thu, 18 Sep 2025 00:52:46 +0000 Subject: [PATCH] Updated with the [latest changes](https://github.com/gruntwork-io/terraform-aws-eks/releases/tag/v2.1.0) from the `terraform-aws-eks@v2.1.0` source branch. --- .../eks-alb-ingress-controller-iam-policy.md | 20 +++--- .../eks-alb-ingress-controller.md | 22 +++--- .../eks-aws-auth-merger.md | 42 ++++++------ .../eks-cloudwatch-agent.md | 16 ++--- .../eks-cluster-control-plane.md | 26 +++---- .../eks-cluster-managed-workers.md | 43 ++++++++---- .../eks-cluster-workers-cross-access.md | 16 ++--- .../eks-cluster-workers.md | 32 ++++----- .../eks-container-logs/eks-container-logs.md | 18 ++--- .../eks-ebs-csi-driver/eks-ebs-csi-driver.md | 18 ++--- .../eks-fargate-container-logs.md | 18 ++--- ...-assume-role-policy-for-service-account.md | 18 ++--- .../eks-k8s-argocd/eks-k8s-argocd.md | 20 +++--- .../eks-k8s-cluster-autoscaler-iam-policy.md | 20 +++--- .../eks-k8s-cluster-autoscaler.md | 20 +++--- .../eks-k8s-external-dns-iam-policy.md | 20 +++--- .../eks-k8s-external-dns.md | 20 +++--- .../eks-k8s-karpenter/eks-k8s-karpenter.md | 68 ++++++++++--------- .../eks-k8s-role-mapping.md | 20 +++--- .../eks-scripts/eks-scripts.md | 18 ++--- .../eks-vpc-tags/eks-vpc-tags.md | 16 ++--- 21 files changed, 265 insertions(+), 246 deletions(-) diff --git a/docs/reference/modules/terraform-aws-eks/eks-alb-ingress-controller-iam-policy/eks-alb-ingress-controller-iam-policy.md b/docs/reference/modules/terraform-aws-eks/eks-alb-ingress-controller-iam-policy/eks-alb-ingress-controller-iam-policy.md index 003eb2202f..2cd974f869 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-alb-ingress-controller-iam-policy/eks-alb-ingress-controller-iam-policy.md +++ b/docs/reference/modules/terraform-aws-eks/eks-alb-ingress-controller-iam-policy/eks-alb-ingress-controller-iam-policy.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # ALB Ingress Controller IAM Policy Module -View Source +View Source Release Notes @@ -23,14 +23,14 @@ defines the minimal set of permissions necessary for the [AWS ALB Ingress Controller](https://github.com/kubernetes-sigs/aws-alb-ingress-controller). This policy can then be attached to EC2 instances or IAM roles so that the controller deployed has enough permissions to manage an ALB. -See [the eks-alb-ingress-controller module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-alb-ingress-controller) for a module that deploys the Ingress +See [the eks-alb-ingress-controller module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-alb-ingress-controller) for a module that deploys the Ingress Controller on to your EKS cluster. ## Attaching IAM policy to workers To allow the ALB Ingress Controller to manage ALBs, it needs IAM permissions to use the AWS API to manage ALBs. Currently, the way to grant Pods IAM privileges is to use the worker IAM profiles provisioned by [the -eks-cluster-workers module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-workers/README.md#how-do-you-add-additional-iam-policies). +eks-cluster-workers module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-workers/README.md#how-do-you-add-additional-iam-policies). The Terraform templates in this module create an IAM policy that has the required permissions. You then need to use an [aws_iam_policy_attachment](https://www.terraform.io/docs/providers/aws/r/iam_policy_attachment.html) to attach that @@ -64,7 +64,7 @@ resource "aws_iam_role_policy_attachment" "attach_alb_ingress_controller_iam_pol module "eks_alb_ingress_controller_iam_policy" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-alb-ingress-controller-iam-policy?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-alb-ingress-controller-iam-policy?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -110,7 +110,7 @@ module "eks_alb_ingress_controller_iam_policy" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-alb-ingress-controller-iam-policy?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-alb-ingress-controller-iam-policy?ref=v2.1.0" } inputs = { @@ -241,11 +241,11 @@ The name of the IAM policy created with the permissions for the ALB ingress cont diff --git a/docs/reference/modules/terraform-aws-eks/eks-alb-ingress-controller/eks-alb-ingress-controller.md b/docs/reference/modules/terraform-aws-eks/eks-alb-ingress-controller/eks-alb-ingress-controller.md index 9ef5158af5..3b85552a30 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-alb-ingress-controller/eks-alb-ingress-controller.md +++ b/docs/reference/modules/terraform-aws-eks/eks-alb-ingress-controller/eks-alb-ingress-controller.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # ALB Ingress Controller Module -View Source +View Source Release Notes @@ -110,7 +110,7 @@ correctly. You can use the `alb.ingress.kubernetes.io/subnets` annotation on `Ingress` resources to specify which subnets the controller should configure the ALB for. -You can also omit the `alb.ingress.kubernetes.io/subnets` annotation, and the controller will [automatically discover subnets](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/controller/config/#subnet-auto-discovery) based on their tags. This method should work "out of the box", so long as you are using the [`eks-vpc-tags`](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-vpc-tags) module to tag your VPC subnets. +You can also omit the `alb.ingress.kubernetes.io/subnets` annotation, and the controller will [automatically discover subnets](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/controller/config/#subnet-auto-discovery) based on their tags. This method should work "out of the box", so long as you are using the [`eks-vpc-tags`](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-vpc-tags) module to tag your VPC subnets. ### Security Groups @@ -125,7 +125,7 @@ nodes. ### IAM permissions The container deployed in this module requires IAM permissions to manage ALB resources. See [the -eks-alb-ingress-controller-iam-policy module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-alb-ingress-controller-iam-policy) for more information. +eks-alb-ingress-controller-iam-policy module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-alb-ingress-controller-iam-policy) for more information. ## Using the Ingress Controller @@ -200,7 +200,7 @@ nature of the controller in provisioning the ALBs. The AWS ALB Ingress Controller has first class support for [external-dns](https://github.com/kubernetes-incubator/external-dns), a third party tool that configures external DNS providers with domains to route to `Services` and `Ingresses` in Kubernetes. See our [eks-k8s-external-dns -module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-external-dns) for more information on how to setup the tool. +module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-external-dns) for more information on how to setup the tool. ## How do I deploy the Pods to Fargate? @@ -234,7 +234,7 @@ instances under the hood, and thus the ALB can not be configured to route by ins module "eks_alb_ingress_controller" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-alb-ingress-controller?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-alb-ingress-controller?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -386,7 +386,7 @@ module "eks_alb_ingress_controller" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-alb-ingress-controller?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-alb-ingress-controller?ref=v2.1.0" } inputs = { @@ -537,11 +537,11 @@ inputs = { diff --git a/docs/reference/modules/terraform-aws-eks/eks-aws-auth-merger/eks-aws-auth-merger.md b/docs/reference/modules/terraform-aws-eks/eks-aws-auth-merger/eks-aws-auth-merger.md index fff5f2fc8b..02edeaf48a 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-aws-auth-merger/eks-aws-auth-merger.md +++ b/docs/reference/modules/terraform-aws-eks/eks-aws-auth-merger/eks-aws-auth-merger.md @@ -9,13 +9,13 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS AWS Auth Merger -View Source +View Source -Release Notes +Release Notes This module contains a go CLI, docker container, and terraform module for deploying a Kubernetes controller for managing mappings between AWS IAM roles and users to RBAC groups in Kubernetes. The official way to manage the mapping is to add values in a single, central `ConfigMap`. This module allows you to break up the central `ConfigMap` across multiple, separate `ConfigMaps` each configuring a subset of the mappings you ultimately want to use, allowing you to update entries in the `ConfigMap` in isolated modules (e.g., when you add a new IAM role in a separate module from the EKS cluster). The `aws-auth-merger` watches for `aws-auth` compatible `ConfigMaps` that can be merged to manage the `aws-auth` authentication `ConfigMap` for EKS. @@ -35,21 +35,21 @@ This repo is a part of [the Gruntwork Infrastructure as Code Library](https://gr ### Core concepts -* *[What is Kubernetes RBAC?](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-role-mapping/README.md#what-is-kubernetes-role-based-access-control-rbac)*: overview of Kubernetes RBAC, the underlying system managing authentication and authorization in Kubernetes. +* *[What is Kubernetes RBAC?](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-role-mapping/README.md#what-is-kubernetes-role-based-access-control-rbac)*: overview of Kubernetes RBAC, the underlying system managing authentication and authorization in Kubernetes. -* *[What is AWS IAM role?](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-role-mapping/README.md#what-is-aws-iam-role)*: overview of AWS IAM Roles, the underlying system managing authentication and authorization in AWS. +* *[What is AWS IAM role?](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-role-mapping/README.md#what-is-aws-iam-role)*: overview of AWS IAM Roles, the underlying system managing authentication and authorization in AWS. * *[Managing users or IAM roles for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html)*: The official AWS docs on how the `aws-auth` Kubernetes `ConfigMap` works. -* *[What is the aws-auth-merger?](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-aws-auth-merger/core-concepts.md#what-is-the-aws-auth-merger)*: overview of the `aws-auth-merger` and how it works to manage the `aws-auth` Kubernetes `ConfigMap`. +* *[What is the aws-auth-merger?](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-aws-auth-merger/core-concepts.md#what-is-the-aws-auth-merger)*: overview of the `aws-auth-merger` and how it works to manage the `aws-auth` Kubernetes `ConfigMap`. ### Repo organization -* [modules](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules. +* [modules](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules): the main implementation code for this repo, broken down into multiple standalone, orthogonal submodules. -* [examples](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/examples): This folder contains working examples of how to use the submodules. +* [examples](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/examples): This folder contains working examples of how to use the submodules. -* [test](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/test): Automated tests for the modules and examples. +* [test](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/test): Automated tests for the modules and examples. ## Deploy @@ -57,7 +57,7 @@ This repo is a part of [the Gruntwork Infrastructure as Code Library](https://gr If you just want to try this repo out for experimenting and learning, check out the following resources: -* [examples folder](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/examples): The `examples` folder contains sample code optimized for learning, experimenting, and testing (but not production usage). +* [examples folder](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/examples): The `examples` folder contains sample code optimized for learning, experimenting, and testing (but not production usage). ### Production deployment @@ -69,15 +69,15 @@ If you want to deploy this repo in production, check out the following resources ## Manage -* [How to deploy and use the aws-auth-merger](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-aws-auth-merger/core-concepts.md#how-do-i-use-the-aws-auth-merger) +* [How to deploy and use the aws-auth-merger](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-aws-auth-merger/core-concepts.md#how-do-i-use-the-aws-auth-merger) -* [How to handle conflicts with automatic updates to the aws-auth ConfigMap by EKS](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-aws-auth-merger/core-concepts.md#how-do-i-handle-conflicts-with-automatic-updates-by-eks) +* [How to handle conflicts with automatic updates to the aws-auth ConfigMap by EKS](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-aws-auth-merger/core-concepts.md#how-do-i-handle-conflicts-with-automatic-updates-by-eks) -* [How to restrict users to specific actions on the EKS cluster](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-role-mapping/README.md#restricting-specific-actions) +* [How to restrict users to specific actions on the EKS cluster](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-role-mapping/README.md#restricting-specific-actions) -* [How to restrict users to specific namespaces on the EKS cluster](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-role-mapping/README.md#restricting-by-namespace) +* [How to restrict users to specific namespaces on the EKS cluster](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-role-mapping/README.md#restricting-by-namespace) -* [How to authenticate kubectl to EKS](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/core-concepts.md#how-to-authenticate-kubectl) +* [How to authenticate kubectl to EKS](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/core-concepts.md#how-to-authenticate-kubectl) ## Sample Usage @@ -92,7 +92,7 @@ If you want to deploy this repo in production, check out the following resources module "eks_aws_auth_merger" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-aws-auth-merger?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-aws-auth-merger?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -214,7 +214,7 @@ module "eks_aws_auth_merger" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-aws-auth-merger?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-aws-auth-merger?ref=v2.1.0" } inputs = { @@ -658,11 +658,11 @@ The name of the namespace that is used. If create_namespace is true, this output diff --git a/docs/reference/modules/terraform-aws-eks/eks-cloudwatch-agent/eks-cloudwatch-agent.md b/docs/reference/modules/terraform-aws-eks/eks-cloudwatch-agent/eks-cloudwatch-agent.md index 59a066ee2d..d984e30b0e 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-cloudwatch-agent/eks-cloudwatch-agent.md +++ b/docs/reference/modules/terraform-aws-eks/eks-cloudwatch-agent/eks-cloudwatch-agent.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS CloudWatch Agent Module -View Source +View Source Release Notes @@ -67,7 +67,7 @@ docs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerIn module "eks_cloudwatch_agent" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cloudwatch-agent?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cloudwatch-agent?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -157,7 +157,7 @@ module "eks_cloudwatch_agent" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cloudwatch-agent?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cloudwatch-agent?ref=v2.1.0" } inputs = { @@ -246,11 +246,11 @@ inputs = { diff --git a/docs/reference/modules/terraform-aws-eks/eks-cluster-control-plane/eks-cluster-control-plane.md b/docs/reference/modules/terraform-aws-eks/eks-cluster-control-plane/eks-cluster-control-plane.md index 75433ff9b1..6b48002f4b 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-cluster-control-plane/eks-cluster-control-plane.md +++ b/docs/reference/modules/terraform-aws-eks/eks-cluster-control-plane/eks-cluster-control-plane.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS Cluster Control Plane Module -View Source +View Source Release Notes @@ -22,7 +22,7 @@ Cluster](https://docs.aws.amazon.com/eks/latest/userguide/clusters.html). This module is responsible for the EKS Control Plane in [the EKS cluster topology](#what-is-an-eks-cluster). You must launch worker nodes in order to be able to schedule pods on your cluster. See the [eks-cluster-workers -module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-workers) for managing EKS worker nodes. +module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-workers) for managing EKS worker nodes. ## What is the EKS Control Plane? @@ -46,7 +46,7 @@ Specifically, the control plane consists of: This includes resources like the [`LoadBalancers`](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/). -You can read more about the different components of EKS in [the project README](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/core-concepts.md#what-is-an-eks-cluster). +You can read more about the different components of EKS in [the project README](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/core-concepts.md#what-is-an-eks-cluster). ## What security group rules are created? @@ -134,7 +134,7 @@ role that is being assumed. Specifically, you need to: that role). You can use the -[eks-iam-role-assume-role-policy-for-service-account module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-iam-role-assume-role-policy-for-service-account) to +[eks-iam-role-assume-role-policy-for-service-account module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-iam-role-assume-role-policy-for-service-account) to construct the policy using a more convenient interface. Refer to the module documentation for more info. Once you have an IAM Role that can be assumed by the Kubernetes Service Account, you can configure your Pods to exchange @@ -242,7 +242,7 @@ Some additional notes on using Fargate: [the `aws_eks_fargate_profile` resource](https://www.terraform.io/docs/providers/aws/r/eks_fargate_profile.html) to provision Fargate Profiles with Terraform). The Pod Execution Role created by the module may be reused for other Fargate Profiles. -* Fargate does not support DaemonSets. This means that you can't rely on the [eks-container-logs](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-container-logs) +* Fargate does not support DaemonSets. This means that you can't rely on the [eks-container-logs](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-container-logs) module to forward logs to CloudWatch. Instead, you need to manually configure a sidecar `fluentd` container that forwards the log entries to CloudWatch Logs. Refer to [this AWS blog post](https://aws.amazon.com/blogs/containers/how-to-capture-application-logs-when-using-amazon-eks-on-aws-fargate/) @@ -284,7 +284,7 @@ If you omit the `addon_version`, correct versions are automatically applied. Note that you must update the nodes to use the corresponding `kubelet` version as well. This means that when you update minor versions, you will also need to update the AMIs used by the worker nodes to match the version and rotate the workers. For more information on rotating worker nodes, refer to [How do I roll out an update to the -instances?](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-workers/README.md#how-do-i-roll-out-an-update-to-the-instances) in the `eks-cluster-workers` +instances?](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-workers/README.md#how-do-i-roll-out-an-update-to-the-instances) in the `eks-cluster-workers` module README. ### Detailed upgrade steps @@ -417,7 +417,7 @@ approaches: module "eks_cluster_control_plane" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-control-plane?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-control-plane?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -849,7 +849,7 @@ module "eks_cluster_control_plane" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-control-plane?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-control-plane?ref=v2.1.0" } inputs = { @@ -2403,11 +2403,11 @@ The path to the kubergrunt binary, if in use. diff --git a/docs/reference/modules/terraform-aws-eks/eks-cluster-managed-workers/eks-cluster-managed-workers.md b/docs/reference/modules/terraform-aws-eks/eks-cluster-managed-workers/eks-cluster-managed-workers.md index 0cfa65d76f..e9fea75cb4 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-cluster-managed-workers/eks-cluster-managed-workers.md +++ b/docs/reference/modules/terraform-aws-eks/eks-cluster-managed-workers/eks-cluster-managed-workers.md @@ -9,23 +9,23 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS Cluster Managed Workers Module -View Source +View Source -Release Notes +Release Notes -**This module provisions [EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html), as opposed to self managed ASGs. See the [eks-cluster-workers](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-workers) module for a module to provision self managed worker groups.** +**This module provisions [EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html), as opposed to self managed ASGs. See the [eks-cluster-workers](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-workers) module for a module to provision self managed worker groups.** This Terraform module launches worker nodes using [EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) that you can use to run Kubernetes Pods and Deployments. This module is responsible for the EKS Worker Nodes in [the EKS cluster -topology](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-control-plane/README.md#what-is-an-eks-cluster). You must launch a control plane in order -for the worker nodes to function. See the [eks-cluster-control-plane module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-control-plane) for +topology](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-control-plane/README.md#what-is-an-eks-cluster). You must launch a control plane in order +for the worker nodes to function. See the [eks-cluster-control-plane module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-control-plane) for managing an EKS control plane. ## Differences with self managed workers @@ -61,7 +61,7 @@ Here is a list of additional tradeoffs to consider between the two flavors: This module will not automatically scale in response to resource usage by default, the `autoscaling_group_configurations.*.max_size` option is only used to give room for new instances during rolling updates. -To enable auto-scaling in response to resource utilization, deploy the [Kubernetes Cluster Autoscaler module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-cluster-autoscaler). +To enable auto-scaling in response to resource utilization, deploy the [Kubernetes Cluster Autoscaler module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-cluster-autoscaler). Note that the cluster autoscaler supports ASGs that manage nodes in a single availability zone or ASGs that manage nodes in multiple availability zones. However, there is a caveat: @@ -159,7 +159,7 @@ The following are the steps you can take to perform a blue-green release for thi module "eks_cluster_managed_workers" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-managed-workers?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-managed-workers?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -312,6 +312,10 @@ module "eks_cluster_managed_workers" { # limitations with for_each. node_group_names = null + # Configuration block for node auto repair in EKS node groups. If null, auto + # repair will not be configured. + node_repair_config = null + # ARN of permissions boundary to apply to the worker IAM role - the IAM role # created for the EKS worker nodes. worker_iam_role_permissions_boundary = null @@ -331,7 +335,7 @@ module "eks_cluster_managed_workers" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-managed-workers?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-managed-workers?ref=v2.1.0" } inputs = { @@ -487,6 +491,10 @@ inputs = { # limitations with for_each. node_group_names = null + # Configuration block for node auto repair in EKS node groups. If null, auto + # repair will not be configured. + node_repair_config = null + # ARN of permissions boundary to apply to the worker IAM role - the IAM role # created for the EKS worker nodes. worker_iam_role_permissions_boundary = null @@ -934,6 +942,15 @@ The names of the node groups. When null, this value is automatically calculated + + + +Configuration block for node auto repair in EKS node groups. If null, auto repair will not be configured. + + + + + @@ -992,11 +1009,11 @@ Map of Node Group names to ARNs of the created EKS Node Groups diff --git a/docs/reference/modules/terraform-aws-eks/eks-cluster-workers-cross-access/eks-cluster-workers-cross-access.md b/docs/reference/modules/terraform-aws-eks/eks-cluster-workers-cross-access/eks-cluster-workers-cross-access.md index 60a6cd8355..fddb64dda0 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-cluster-workers-cross-access/eks-cluster-workers-cross-access.md +++ b/docs/reference/modules/terraform-aws-eks/eks-cluster-workers-cross-access/eks-cluster-workers-cross-access.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS Cluster Workers Cross Access Module -View Source +View Source Release Notes @@ -63,7 +63,7 @@ module. module "eks_cluster_workers_cross_access" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-workers-cross-access?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-workers-cross-access?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -101,7 +101,7 @@ module "eks_cluster_workers_cross_access" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-workers-cross-access?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-workers-cross-access?ref=v2.1.0" } inputs = { @@ -138,11 +138,11 @@ inputs = { diff --git a/docs/reference/modules/terraform-aws-eks/eks-cluster-workers/eks-cluster-workers.md b/docs/reference/modules/terraform-aws-eks/eks-cluster-workers/eks-cluster-workers.md index b84a8ae6a9..04b0df72c1 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-cluster-workers/eks-cluster-workers.md +++ b/docs/reference/modules/terraform-aws-eks/eks-cluster-workers/eks-cluster-workers.md @@ -9,39 +9,39 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS Cluster Workers Module -View Source +View Source Release Notes -**This module provisions self managed ASGs, in contrast to [EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html). See the [eks-cluster-managed-workers](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-managed-workers) module for a module to deploy Managed Node Groups.** +**This module provisions self managed ASGs, in contrast to [EKS Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html). See the [eks-cluster-managed-workers](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-managed-workers) module for a module to deploy Managed Node Groups.** This Terraform Module launches worker nodes for an [Elastic Container Service for Kubernetes Cluster](https://docs.aws.amazon.com/eks/latest/userguide/clusters.html) that you can use to run Kubernetes Pods and Deployments. This module is responsible for the EKS Worker Nodes in [the EKS cluster -topology](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-control-plane/README.md#what-is-an-eks-cluster). You must launch a control plane in order -for the worker nodes to function. See the [eks-cluster-control-plane module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-control-plane) for +topology](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-control-plane/README.md#what-is-an-eks-cluster). You must launch a control plane in order +for the worker nodes to function. See the [eks-cluster-control-plane module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-control-plane) for managing an EKS control plane. ## Differences with managed node groups See the \[Differences with self managed workers] section in the documentation for [eks-cluster-managed-workers -module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-managed-workers) for a detailed overview of differences with EKS Managed Node Groups. +module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-managed-workers) for a detailed overview of differences with EKS Managed Node Groups. ## What should be included in the user-data script? In order for the EKS worker nodes to function, it must register itself to the Kubernetes API run by the EKS control plane. This is handled by the bootstrap script provided in the EKS optimized AMI. The user-data script should call the bootstrap script at some point during its execution. You can get this information from the [eks-cluster-control-plane -module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-control-plane). +module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-control-plane). For an example of a user data script, see the [eks-cluster example's user-data.sh -script](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/examples/eks-cluster-with-iam-role-mappings/user-data/user-data.sh). +script](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/examples/eks-cluster-with-iam-role-mappings/user-data/user-data.sh). You can read more about the bootstrap script in [the official documentation for EKS](https://docs.aws.amazon.com/eks/latest/userguide/launch-workers.html). @@ -144,7 +144,7 @@ EOF ``` **Note**: The IAM policies you add will apply to ALL Pods running on these EC2 Instances. See the [How do I associate -IAM roles to the Pods?](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-control-plane/README.md#how-do-i-associate-iam-roles-to-the-pods) section of the +IAM roles to the Pods?](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-control-plane/README.md#how-do-i-associate-iam-roles-to-the-pods) section of the `eks-cluster-control-plane` module README for more fine-grained allocation of IAM credentials to Pods. ## How do I SSH into the nodes? @@ -228,7 +228,7 @@ The following are the steps you can take to perform a blue-green release for thi This module will not automatically scale in response to resource usage by default, the `autoscaling_group_configurations.*.max_size` option is only used to give room for new instances during rolling updates. To enable auto-scaling in response to resource utilization, you must set the `include_autoscaler_discovery_tags` input -variable to `true` and also deploy the [Kubernetes Cluster Autoscaler module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-cluster-autoscaler). +variable to `true` and also deploy the [Kubernetes Cluster Autoscaler module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-cluster-autoscaler). Note that the cluster autoscaler supports ASGs that manage nodes in a single availability zone or ASGs that manage nodes in multiple availability zones. However, there is a caveat: @@ -253,7 +253,7 @@ Refer to the [Kubernetes Autoscaler](https://github.com/kubernetes/autoscaler) d module "eks_cluster_workers" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-workers?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-workers?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -560,7 +560,7 @@ module "eks_cluster_workers" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-workers?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-cluster-workers?ref=v2.1.0" } inputs = { @@ -1671,11 +1671,11 @@ AWS ID of the security group created for the EKS worker nodes. diff --git a/docs/reference/modules/terraform-aws-eks/eks-container-logs/eks-container-logs.md b/docs/reference/modules/terraform-aws-eks/eks-container-logs/eks-container-logs.md index 433a1dfcd2..b25b491aaa 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-container-logs/eks-container-logs.md +++ b/docs/reference/modules/terraform-aws-eks/eks-container-logs/eks-container-logs.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS Container Logs Module -View Source +View Source Release Notes @@ -25,7 +25,7 @@ Kinesis Firehose. This module uses the community helm chart, with a set of best practices inputs. **This module is for setting up log aggregation for EKS Pods on EC2 workers (self-managed or managed node groups). For -Fargate pods, take a look at the [eks-fargate-container-logs](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-fargate-container-logs) module.** +Fargate pods, take a look at the [eks-fargate-container-logs](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-fargate-container-logs) module.** ## How does this work? @@ -151,7 +151,7 @@ fields @timestamp, @message module "eks_container_logs" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-container-logs?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-container-logs?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -375,7 +375,7 @@ module "eks_container_logs" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-container-logs?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-container-logs?ref=v2.1.0" } inputs = { @@ -598,11 +598,11 @@ inputs = { diff --git a/docs/reference/modules/terraform-aws-eks/eks-ebs-csi-driver/eks-ebs-csi-driver.md b/docs/reference/modules/terraform-aws-eks/eks-ebs-csi-driver/eks-ebs-csi-driver.md index a3fe083a6c..5a84885eb9 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-ebs-csi-driver/eks-ebs-csi-driver.md +++ b/docs/reference/modules/terraform-aws-eks/eks-ebs-csi-driver/eks-ebs-csi-driver.md @@ -9,17 +9,17 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS EBS CSI Driver Module -View Source +View Source Release Notes This Terraform module installs the [Amazon EBS CSI Driver](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) to an EKS cluster as an EKS Managed AddOn. The EBS CSI Driver manages the lifecycle of EBS Volumes when used as Kubernetes Volumes. The EBS CSI Driver is enabled by default in EKS clusters >= `1.23`, but not installed. The EBS CSI Driver was installed by default on earlier versions of EKS. This module will create all of the required resources to run the EBS CSI Driver and can be configured as needed without the bounds of the EBS CSI Driver as a Managed AddOn. See the [official documentation](https://docs.aws.amazon.com/eks/latest/userguide/managing-ebs-csi.html) for more details. -This module is exposed directly on the [eks-cluster-control](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-control-plane/) module as with the other available EKS AddOns, but this module can also be used independently by toggling the `enable_ebs_csi_driver` to `false` (`false` by default on the `eks-control-plane` module) on the `eks-control-plane` module and instead declaring this module elsewhere within the codebase. +This module is exposed directly on the [eks-cluster-control](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-control-plane/) module as with the other available EKS AddOns, but this module can also be used independently by toggling the `enable_ebs_csi_driver` to `false` (`false` by default on the `eks-control-plane` module) on the `eks-control-plane` module and instead declaring this module elsewhere within the codebase. > NOTE: currently enabling/deploying this module in a new cluster will take ~15 mins to deploy due to a limitation on the AWS side. The health status of the AddOn itself isn't reported in a timely manner which causes the deployment to take extra time even though the AddOn is deployed and healthy. Please be aware of this as it will increase the initial deployment time of a new EKS cluster. @@ -36,7 +36,7 @@ This module is exposed directly on the [eks-cluster-control](https://github.com/ module "eks_ebs_csi_driver" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-ebs-csi-driver?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-ebs-csi-driver?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -110,7 +110,7 @@ module "eks_ebs_csi_driver" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-ebs-csi-driver?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-ebs-csi-driver?ref=v2.1.0" } inputs = { @@ -383,11 +383,11 @@ The latest available version of the EBS CSI AddOn. diff --git a/docs/reference/modules/terraform-aws-eks/eks-fargate-container-logs/eks-fargate-container-logs.md b/docs/reference/modules/terraform-aws-eks/eks-fargate-container-logs/eks-fargate-container-logs.md index b582c0d23e..a5ded664df 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-fargate-container-logs/eks-fargate-container-logs.md +++ b/docs/reference/modules/terraform-aws-eks/eks-fargate-container-logs/eks-fargate-container-logs.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS Fargate Container Logs Module -View Source +View Source Release Notes @@ -25,7 +25,7 @@ Bit](https://fluentbit.io/) instance that runs on Fargate worker nodes. This all aggregation on Fargate Pods in EKS without setting up a side car container. **This module is for setting up log aggregation for EKS Fargate Pods. For other pods, take a look at the -[eks-container-logs](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-container-logs) module.** +[eks-container-logs](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-container-logs) module.** ## How does this work? @@ -106,7 +106,7 @@ fields @timestamp, @message module "eks_fargate_container_logs" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-fargate-container-logs?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-fargate-container-logs?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -211,7 +211,7 @@ module "eks_fargate_container_logs" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-fargate-container-logs?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-fargate-container-logs?ref=v2.1.0" } inputs = { @@ -666,11 +666,11 @@ The ID of the Kubernetes ConfigMap containing the logging configuration. This ca diff --git a/docs/reference/modules/terraform-aws-eks/eks-iam-role-assume-role-policy-for-service-account/eks-iam-role-assume-role-policy-for-service-account.md b/docs/reference/modules/terraform-aws-eks/eks-iam-role-assume-role-policy-for-service-account/eks-iam-role-assume-role-policy-for-service-account.md index d7aaf84f8b..aeabb993f9 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-iam-role-assume-role-policy-for-service-account/eks-iam-role-assume-role-policy-for-service-account.md +++ b/docs/reference/modules/terraform-aws-eks/eks-iam-role-assume-role-policy-for-service-account/eks-iam-role-assume-role-policy-for-service-account.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS IAM Role Assume Role Policy for Kubernetes Service Accounts -View Source +View Source Release Notes @@ -22,7 +22,7 @@ Kubernetes Service Accounts. This requires a compatible EKS cluster that support Accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) feature. See the [corresponding section of the eks-cluster-control-plane module -README](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-control-plane/README.md#how-do-i-associate-iam-roles-to-the-pods) for information on how to set +README](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-control-plane/README.md#how-do-i-associate-iam-roles-to-the-pods) for information on how to set up IRSA and how it works. ## Sample Usage @@ -38,7 +38,7 @@ up IRSA and how it works. module "eks_iam_role_assume_role_policy_for_service_account" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-iam-role-assume-role-policy-for-service-account?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-iam-role-assume-role-policy-for-service-account?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -89,7 +89,7 @@ module "eks_iam_role_assume_role_policy_for_service_account" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-iam-role-assume-role-policy-for-service-account?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-iam-role-assume-role-policy-for-service-account?ref=v2.1.0" } inputs = { @@ -216,11 +216,11 @@ JSON value for IAM Role Assume Role Policy that allows Kubernetes Service Accoun diff --git a/docs/reference/modules/terraform-aws-eks/eks-k8s-argocd/eks-k8s-argocd.md b/docs/reference/modules/terraform-aws-eks/eks-k8s-argocd/eks-k8s-argocd.md index 39a6aa49e4..fc0fe212c8 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-k8s-argocd/eks-k8s-argocd.md +++ b/docs/reference/modules/terraform-aws-eks/eks-k8s-argocd/eks-k8s-argocd.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # Gruntwork GitOps "GruntOps" -View Source +View Source Release Notes @@ -21,9 +21,9 @@ GitOps is an operational framework that is built around DevOps best practices fo ## Getting Started -To use this module, you will need to have a running EKS cluster prior to deploying this module. See the [Argo CD Example](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/examples/eks-cluster-with-argocd/) for an example of how to deploy this module. +To use this module, you will need to have a running EKS cluster prior to deploying this module. See the [Argo CD Example](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/examples/eks-cluster-with-argocd/) for an example of how to deploy this module. -This module deploys the Helm Chart version of [Argo CD](https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd). All available configurations can be found in the repository of the [Argo CD Helm Chart](https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd). The default configuration of this module will deploy the Argo CD Helm Chart with it's default values. To configure the Helm Chart to meet the needs of your deployment, configure the `argocd_chart_additional_values` variable in the [variables](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-argocd/variables.tf) file. +This module deploys the Helm Chart version of [Argo CD](https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd). All available configurations can be found in the repository of the [Argo CD Helm Chart](https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd). The default configuration of this module will deploy the Argo CD Helm Chart with it's default values. To configure the Helm Chart to meet the needs of your deployment, configure the `argocd_chart_additional_values` variable in the [variables](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-argocd/variables.tf) file. ## Connecting to the Argo CD UI @@ -64,7 +64,7 @@ Detailed documentation coming soon... module "eks_k_8_s_argocd" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-argocd?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-argocd?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # OPTIONAL VARIABLES @@ -161,7 +161,7 @@ module "eks_k_8_s_argocd" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-argocd?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-argocd?ref=v2.1.0" } inputs = { @@ -468,11 +468,11 @@ The status of the Argo CD Helm chart. diff --git a/docs/reference/modules/terraform-aws-eks/eks-k8s-cluster-autoscaler-iam-policy/eks-k8s-cluster-autoscaler-iam-policy.md b/docs/reference/modules/terraform-aws-eks/eks-k8s-cluster-autoscaler-iam-policy/eks-k8s-cluster-autoscaler-iam-policy.md index 44fa3da974..06417092af 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-k8s-cluster-autoscaler-iam-policy/eks-k8s-cluster-autoscaler-iam-policy.md +++ b/docs/reference/modules/terraform-aws-eks/eks-k8s-cluster-autoscaler-iam-policy/eks-k8s-cluster-autoscaler-iam-policy.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # K8S Cluster Autoscaler IAM Policy Module -View Source +View Source Release Notes @@ -24,14 +24,14 @@ Autoscaler](https://github.com/kubernetes/autoscaler/blob/b6d53e8/cluster-autosc attached to the EC2 instance profile of the worker nodes in a Kubernetes cluster which will allow the autoscaler to manage scaling up and down EC2 instances in targeted Auto Scaling Groups in response to resource utilization. -See [the eks-k8s-cluster-autoscaler module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-cluster-autoscaler) for a module that deploys the Cluster +See [the eks-k8s-cluster-autoscaler module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-cluster-autoscaler) for a module that deploys the Cluster Autoscaler to your EKS cluster. ## Attaching IAM policy to workers To allow the Cluster Autoscaler to manage Auto Scaling Groups, it needs IAM permissions to monitor and adjust them. Currently, the way to grant Pods IAM privileges is to use the worker IAM profiles provisioned by [the -eks-cluster-workers module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-workers/README.md#how-do-you-add-additional-iam-policies). +eks-cluster-workers module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-workers/README.md#how-do-you-add-additional-iam-policies). The Terraform templates in this module create an IAM policy that has the required permissions. You then need to use an [aws_iam_policy_attachment](https://www.terraform.io/docs/providers/aws/r/iam_policy_attachment.html) to attach that @@ -66,7 +66,7 @@ resource "aws_iam_role_policy_attachment" "attach_k8s_cluster_autoscaler_iam_pol module "eks_k_8_s_cluster_autoscaler_iam_policy" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-cluster-autoscaler-iam-policy?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-cluster-autoscaler-iam-policy?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -119,7 +119,7 @@ module "eks_k_8_s_cluster_autoscaler_iam_policy" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-cluster-autoscaler-iam-policy?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-cluster-autoscaler-iam-policy?ref=v2.1.0" } inputs = { @@ -266,11 +266,11 @@ The name of the IAM policy created with the permissions for the Kubernetes clust diff --git a/docs/reference/modules/terraform-aws-eks/eks-k8s-cluster-autoscaler/eks-k8s-cluster-autoscaler.md b/docs/reference/modules/terraform-aws-eks/eks-k8s-cluster-autoscaler/eks-k8s-cluster-autoscaler.md index a80d8ffb81..6f09c36c06 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-k8s-cluster-autoscaler/eks-k8s-cluster-autoscaler.md +++ b/docs/reference/modules/terraform-aws-eks/eks-k8s-cluster-autoscaler/eks-k8s-cluster-autoscaler.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # K8S Cluster Autoscaler Module -View Source +View Source Release Notes @@ -21,9 +21,9 @@ This Terraform Module installs a [Cluster Autoscaler](https://github.com/kuberne to automatically scale up and down the nodes in a cluster in response to resource utilization. This module is responsible for manipulating each Auto Scaling Group (ASG) that was created by the [EKS cluster -workers](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-workers) module. By default, the ASG is configured to allow zero-downtime +workers](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-workers) module. By default, the ASG is configured to allow zero-downtime deployments but is not configured to scale automatically. You must launch an [EKS control -plane](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-control-plane) with worker nodes for this module to function. +plane](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-control-plane) with worker nodes for this module to function. ## IAM Policy Considerations @@ -62,7 +62,7 @@ variables. module "eks_k_8_s_cluster_autoscaler" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-cluster-autoscaler?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-cluster-autoscaler?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -244,7 +244,7 @@ module "eks_k_8_s_cluster_autoscaler" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-cluster-autoscaler?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-cluster-autoscaler?ref=v2.1.0" } inputs = { @@ -425,11 +425,11 @@ inputs = { diff --git a/docs/reference/modules/terraform-aws-eks/eks-k8s-external-dns-iam-policy/eks-k8s-external-dns-iam-policy.md b/docs/reference/modules/terraform-aws-eks/eks-k8s-external-dns-iam-policy/eks-k8s-external-dns-iam-policy.md index 6b6d0c4db3..172e86417a 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-k8s-external-dns-iam-policy/eks-k8s-external-dns-iam-policy.md +++ b/docs/reference/modules/terraform-aws-eks/eks-k8s-external-dns-iam-policy/eks-k8s-external-dns-iam-policy.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # K8S External DNS IAM Policy Module -View Source +View Source Release Notes @@ -23,14 +23,14 @@ defines the minimal set of permissions necessary for the [external-dns application](https://github.com/kubernetes-incubator/external-dns). This policy can then be attached to EC2 instances or IAM roles so that the app deployed has enough permissions to manage Route 53 Hosted Zones. -See [the eks-k8s-external-dns module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-external-dns) for a module that deploys the external-dns +See [the eks-k8s-external-dns module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-external-dns) for a module that deploys the external-dns application on to your EKS cluster. ## Attaching IAM policy to workers To allow the external-dns app to manage Route 53 Hosted Zones, it needs IAM permissions to use the AWS API to manage the zones. Currently, the way to grant Pods IAM privileges is to use the worker IAM profiles provisioned by [the -eks-cluster-workers module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-workers/README.md#how-do-you-add-additional-iam-policies). +eks-cluster-workers module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-workers/README.md#how-do-you-add-additional-iam-policies). The Terraform templates in this module create an IAM policy that has the required permissions. You then need to use an [aws_iam_policy_attachment](https://www.terraform.io/docs/providers/aws/r/iam_policy_attachment.html) to attach that @@ -64,7 +64,7 @@ resource "aws_iam_role_policy_attachment" "attach_k8s_external_dns_iam_policy" { module "eks_k_8_s_external_dns_iam_policy" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-external-dns-iam-policy?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-external-dns-iam-policy?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -110,7 +110,7 @@ module "eks_k_8_s_external_dns_iam_policy" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-external-dns-iam-policy?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-external-dns-iam-policy?ref=v2.1.0" } inputs = { @@ -241,11 +241,11 @@ The name of the IAM policy created with the permissions for the external-dns Kub diff --git a/docs/reference/modules/terraform-aws-eks/eks-k8s-external-dns/eks-k8s-external-dns.md b/docs/reference/modules/terraform-aws-eks/eks-k8s-external-dns/eks-k8s-external-dns.md index dd12bda8c9..388181046e 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-k8s-external-dns/eks-k8s-external-dns.md +++ b/docs/reference/modules/terraform-aws-eks/eks-k8s-external-dns/eks-k8s-external-dns.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # K8S External DNS Module -View Source +View Source Release Notes @@ -35,7 +35,7 @@ work, you need to map the domain name to the `Ingress` endpoint, so that request been created and provisioned. However, this can be cumbersome due to the asynchronous nature of Kubernetes operations. For example, if you are using an `Ingress` controller that maps to actual physical loadbalancers in the cloud (e.g the -[ALB Ingress Controller deployed using the eks-alb-ingress-controller module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-alb-ingress-controller)), the +[ALB Ingress Controller deployed using the eks-alb-ingress-controller module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-alb-ingress-controller)), the endpoint may take several minutes before it is available. You will have to wait for that time, continuously polling the `Ingress` resource until the underlying resource is provisioned and the endpoint is available before you can configure the DNS setting. @@ -61,7 +61,7 @@ This module uses [`helm` v3](https://helm.sh/docs/) to deploy the controller to ### IAM permissions The container deployed in this module requires IAM permissions to manage Route 53 Hosted Zones. See [the -eks-k8s-external-dns-iam-policy module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-external-dns-iam-policy) for more information. +eks-k8s-external-dns-iam-policy module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-external-dns-iam-policy) for more information. ## How do I restrict which Hosted Zones the app should manage? @@ -116,7 +116,7 @@ zones_cache_duration = "3h" module "eks_k_8_s_external_dns" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-external-dns?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-external-dns?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -312,7 +312,7 @@ module "eks_k_8_s_external_dns" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-external-dns?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-external-dns?ref=v2.1.0" } inputs = { @@ -507,11 +507,11 @@ inputs = { diff --git a/docs/reference/modules/terraform-aws-eks/eks-k8s-karpenter/eks-k8s-karpenter.md b/docs/reference/modules/terraform-aws-eks/eks-k8s-karpenter/eks-k8s-karpenter.md index 1452a0b228..7ede8499ea 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-k8s-karpenter/eks-k8s-karpenter.md +++ b/docs/reference/modules/terraform-aws-eks/eks-k8s-karpenter/eks-k8s-karpenter.md @@ -9,24 +9,24 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS K8s Karpenter Module -View Source +View Source -Release Notes +Release Notes This Module can be used to deploy [Karpenter](https://karpenter.sh/) as an alternative to the [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/b6d53e8/cluster-autoscaler) for autoscaling capabilities of an EKS cluster. -This module will create all of the necessary resources for a functional installation of Karpenter as well as the installation of Karpenter. This module does not create Karpenter [Provisioners](https://karpenter.sh/v0.27.0/concepts/provisioners/) or [Node Templates](https://karpenter.sh/v0.27.0/concepts/node-templates/), only the installation of the Karpenter Controller. See the [Karpenter Example](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/examples/eks-cluster-with-karpenter/) for an example of how to deploy the additional `CRDs` (Provisioners, Node Templates, etc) to the EKS cluster. +This module will create all of the necessary resources for a functional installation of Karpenter as well as the installation of Karpenter. This module does not create Karpenter [Provisioners](https://karpenter.sh/v0.27.0/concepts/provisioners/) or [Node Templates](https://karpenter.sh/v0.27.0/concepts/node-templates/), only the installation of the Karpenter Controller. See the [Karpenter Example](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/examples/eks-cluster-with-karpenter/) for an example of how to deploy the additional `CRDs` (Provisioners, Node Templates, etc) to the EKS cluster. -> Note: For EKS cluster autoscaling capabilities, either `Karpenter` OR the `cluster-autoscaler` should be used; not both. To migrate to using `karpenter` instead of the `cluster-autoscaler` see [Migrating to Karpenter from the Cluster Autoscaler](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-karpenter/migrating-to-karpenter-from-cas.md) +> Note: For EKS cluster autoscaling capabilities, either `Karpenter` OR the `cluster-autoscaler` should be used; not both. To migrate to using `karpenter` instead of the `cluster-autoscaler` see [Migrating to Karpenter from the Cluster Autoscaler](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-karpenter/migrating-to-karpenter-from-cas.md) -To leverage the full power and potential of Karpenter, one must understand the [Karpenter Core Concepts](https://karpenter.sh/v0.27.0/concepts/). Deploying this module without additional configuration (ie deploying Karpenter CRDs) will not enable EKS cluster autoscaling. As use-cases are presented, we will do our best effort to continue to add meaningful examples to the [examples](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/examples/) folder to help ease the complexities of configuring Karpenter. At minimum, one should configure and deploy a default `Provisioner` and `Node Template` for just in time node provisioning via Karpenter. +To leverage the full power and potential of Karpenter, one must understand the [Karpenter Core Concepts](https://karpenter.sh/v0.27.0/concepts/). Deploying this module without additional configuration (ie deploying Karpenter CRDs) will not enable EKS cluster autoscaling. As use-cases are presented, we will do our best effort to continue to add meaningful examples to the [examples](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/examples/) folder to help ease the complexities of configuring Karpenter. At minimum, one should configure and deploy a default `Provisioner` and `Node Template` for just in time node provisioning via Karpenter. ### Resources Created -This module will create the following core resources, some of which are optional which are noted in the [input variables](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-karpenter/variables.tf): +This module will create the following core resources, some of which are optional which are noted in the [input variables](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-karpenter/variables.tf): | AWS Resource | Description | | --- | --- | @@ -66,7 +66,7 @@ This is particularly useful to users that rely on Spot Instances that can be ter For more information read the [Karpenter Intrerruption section](https://karpenter.sh/preview/concepts/deprovisioning/#interruption) -* From [variables.tf](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-k8s-karpenter/variables.tf) enable `create_karpenter_deprovisioning_queue`t +* From [variables.tf](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-k8s-karpenter/variables.tf) enable `create_karpenter_deprovisioning_queue`t ## Sample Usage @@ -81,7 +81,7 @@ For more information read the [Karpenter Intrerruption section](https://karpente module "eks_k_8_s_karpenter" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-karpenter?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-karpenter?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -152,12 +152,12 @@ module "eks_k_8_s_karpenter" { karpenter_chart_repository_username = null # Whether or not to install CRDs with the Karpenter Helm Chart. This should be - # set to true if using the karpenter-crd Helm Chart - # (karpenter_chart_additional_values = true). - karpenter_chart_skip_crds = false + # set to true if using the karpenter-crd Helm Chart (karpenter_crd_helm_create + # = true). + karpenter_chart_skip_crds = true # The version of the Karpenter Helm chart. - karpenter_chart_version = "v0.37.5" + karpenter_chart_version = "1.6.2" # Provide an existing IAM Role ARN to be used with the Karpenter Controller # Service Account. This is required if `create_karpenter_controller_irsa` is @@ -193,10 +193,11 @@ module "eks_k_8_s_karpenter" { # The version of the Karpenter CRD Helm chart. This should typically be the # same version as karpenter_chart_version. - karpenter_crd_chart_version = "v0.32.7" + karpenter_crd_chart_version = "1.6.2" - # Whether or not to create the Karpneter CRDs via the karpenter-crd Helm - # chart. It is suggested to manage the Karpenter CRDs via this Helm chart. + # Whether or not to create the Karpenter CRDs via the karpenter-crd Helm + # chart. It is strongly recommended to manage the Karpenter CRDs via this Helm + # chart. karpenter_crd_helm_create = true # A map of custom tags to apply to the Karpenter Deprovisioning Queue IAM @@ -268,7 +269,7 @@ module "eks_k_8_s_karpenter" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-karpenter?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-karpenter?ref=v2.1.0" } inputs = { @@ -342,12 +343,12 @@ inputs = { karpenter_chart_repository_username = null # Whether or not to install CRDs with the Karpenter Helm Chart. This should be - # set to true if using the karpenter-crd Helm Chart - # (karpenter_chart_additional_values = true). - karpenter_chart_skip_crds = false + # set to true if using the karpenter-crd Helm Chart (karpenter_crd_helm_create + # = true). + karpenter_chart_skip_crds = true # The version of the Karpenter Helm chart. - karpenter_chart_version = "v0.37.5" + karpenter_chart_version = "1.6.2" # Provide an existing IAM Role ARN to be used with the Karpenter Controller # Service Account. This is required if `create_karpenter_controller_irsa` is @@ -383,10 +384,11 @@ inputs = { # The version of the Karpenter CRD Helm chart. This should typically be the # same version as karpenter_chart_version. - karpenter_crd_chart_version = "v0.32.7" + karpenter_crd_chart_version = "1.6.2" - # Whether or not to create the Karpneter CRDs via the karpenter-crd Helm - # chart. It is suggested to manage the Karpenter CRDs via this Helm chart. + # Whether or not to create the Karpenter CRDs via the karpenter-crd Helm + # chart. It is strongly recommended to manage the Karpenter CRDs via this Helm + # chart. karpenter_crd_helm_create = true # A map of custom tags to apply to the Karpenter Deprovisioning Queue IAM @@ -621,10 +623,10 @@ Optionally provide a Username for HTTP basic authentication against the Karpente -Whether or not to install CRDs with the Karpenter Helm Chart. This should be set to true if using the karpenter-crd Helm Chart (karpenter_chart_additional_values = true). +Whether or not to install CRDs with the Karpenter Helm Chart. This should be set to true if using the karpenter-crd Helm Chart (karpenter_crd_helm_create = true). - + @@ -633,7 +635,7 @@ Whether or not to install CRDs with the Karpenter Helm Chart. This should be set The version of the Karpenter Helm chart. - + @@ -723,13 +725,13 @@ Optionally provide a Username for HTTP basic authentication against the Karpente The version of the Karpenter CRD Helm chart. This should typically be the same version as karpenter_chart_version. - + -Whether or not to create the Karpneter CRDs via the karpenter-crd Helm chart. It is suggested to manage the Karpenter CRDs via this Helm chart. +Whether or not to create the Karpenter CRDs via the karpenter-crd Helm chart. It is strongly recommended to manage the Karpenter CRDs via this Helm chart. @@ -919,11 +921,11 @@ The name of the Karpenter Node IAM Role. diff --git a/docs/reference/modules/terraform-aws-eks/eks-k8s-role-mapping/eks-k8s-role-mapping.md b/docs/reference/modules/terraform-aws-eks/eks-k8s-role-mapping/eks-k8s-role-mapping.md index d32f1157e6..5c2c01d172 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-k8s-role-mapping/eks-k8s-role-mapping.md +++ b/docs/reference/modules/terraform-aws-eks/eks-k8s-role-mapping/eks-k8s-role-mapping.md @@ -9,17 +9,17 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS K8S Role Mapping Module -View Source +View Source Release Notes **NOTE: This module manages a single ConfigMap to use with Kubernetes AWS IAM authentication. If you wish to break up the ConfigMap across multiple smaller ConfigMaps to manage entries in isolated modules (e.g., when you add a new IAM -role in a separate module from the EKS cluster), refer to the [eks-aws-auth-merger](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-aws-auth-merger).** +role in a separate module from the EKS cluster), refer to the [eks-aws-auth-merger](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-aws-auth-merger).** This Module can be used to manage the mapping of AWS IAM roles and users to Kubernetes RBAC groups for finer grained access control of your EKS Cluster. @@ -59,7 +59,7 @@ as much or as little permissions as necessary when accessing resources in the AW This Module provides code for you to manage the mapping between AWS IAM roles and Kubernetes RBAC roles so that you can maintain a consistent set of mappings between the two systems. This works hand in hand with the [EKS authentication -system](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/core-concepts.md#how-do-i-authenticate-kubectl-to-the-eks-cluster), providing the information to Kubernetes to resolve the user to the right RBAC group based on the provided IAM role credentials. +system](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/core-concepts.md#how-do-i-authenticate-kubectl-to-the-eks-cluster), providing the information to Kubernetes to resolve the user to the right RBAC group based on the provided IAM role credentials. ## Examples @@ -335,7 +335,7 @@ ConfigMap and as such does not have the cyclic dependency problem of Helm. module "eks_k_8_s_role_mapping" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-role-mapping?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-role-mapping?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -393,7 +393,7 @@ module "eks_k_8_s_role_mapping" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-role-mapping?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-role-mapping?ref=v2.1.0" } inputs = { @@ -568,11 +568,11 @@ The name of the ConfigMap created to store the mapping. This exists so that down diff --git a/docs/reference/modules/terraform-aws-eks/eks-scripts/eks-scripts.md b/docs/reference/modules/terraform-aws-eks/eks-scripts/eks-scripts.md index 3bff0fae13..a878aba9d5 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-scripts/eks-scripts.md +++ b/docs/reference/modules/terraform-aws-eks/eks-scripts/eks-scripts.md @@ -9,13 +9,13 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS Scripts Module -View Source +View Source -Release Notes +Release Notes This folder contains helper scripts for running an EKS Cluster, including: @@ -31,7 +31,7 @@ gruntwork-install --module-name "eks-scripts" --repo "https://github.com/gruntwo ``` For an example, see the [Packer](https://www.packer.io/) template under -[examples/eks-cluster-with-supporting-services/packer/build.json](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/examples/eks-cluster-with-supporting-services/packer/build.json). +[examples/eks-cluster-with-supporting-services/packer/build.json](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/examples/eks-cluster-with-supporting-services/packer/build.json). ## Using the map-ec2-tags-to-node-labels helper @@ -96,7 +96,7 @@ and you specified `ec2.gruntwork.io/` as your tag prefix (`map-ec2-tags-to-node- In order for the script to be able to successfully retrieve the tags for EC2 instance, the instances need to be associated with an IAM profile that grants it access to retrieve the EC2 tags on the instance. If you launch the workers -using the [eks-cluster-workers module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v1.4.0/modules/eks-cluster-workers), this is automatically attached to the worker IAM role. +using the [eks-cluster-workers module](https://github.com/gruntwork-io/terraform-aws-eks/tree/v2.1.0/modules/eks-cluster-workers), this is automatically attached to the worker IAM role. ### map_ec2\_tags_to_node_labels.py symlink @@ -107,11 +107,11 @@ tests. diff --git a/docs/reference/modules/terraform-aws-eks/eks-vpc-tags/eks-vpc-tags.md b/docs/reference/modules/terraform-aws-eks/eks-vpc-tags/eks-vpc-tags.md index d8acf0beeb..55bdf3a15d 100644 --- a/docs/reference/modules/terraform-aws-eks/eks-vpc-tags/eks-vpc-tags.md +++ b/docs/reference/modules/terraform-aws-eks/eks-vpc-tags/eks-vpc-tags.md @@ -9,11 +9,11 @@ import VersionBadge from '../../../../../src/components/VersionBadge.tsx'; import { HclListItem, HclListItemDescription, HclListItemTypeDetails, HclListItemDefaultValue, HclGeneralListItem } from '../../../../../src/components/HclListItem.tsx'; import { ModuleUsage } from "../../../../../src/components/ModuleUsage"; - + # EKS VPC Tags Module -View Source +View Source Release Notes @@ -39,7 +39,7 @@ with EKS. module "eks_vpc_tags" { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-vpc-tags?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-vpc-tags?ref=v2.1.0" # ---------------------------------------------------------------------------------------------------- # REQUIRED VARIABLES @@ -63,7 +63,7 @@ module "eks_vpc_tags" { # ------------------------------------------------------------------------------------------------------ terraform { - source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-vpc-tags?ref=v1.4.0" + source = "git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-vpc-tags?ref=v2.1.0" } inputs = { @@ -142,11 +142,11 @@ Tags for public subnets in the VPC to use for integration with EKS.