From 1b44530ddc1b667e962ecb76bab3fde270839ffe Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Tue, 14 Mar 2023 13:19:08 -0700 Subject: [PATCH 01/31] refactor: Update examples to use new addons module repo --- examples/agones-game-controller/main.tf | 86 ++++++--- examples/amp-amg-opensearch/main.tf | 4 +- examples/appmesh-mtls/main.tf | 175 +++++++++++++++++- examples/argocd/main.tf | 16 +- .../modules/eks_cluster/main.tf | 4 +- examples/external-secrets/main.tf | 4 +- examples/fargate-serverless/main.tf | 110 ++++++++++- examples/karpenter/main.tf | 4 +- examples/stateful/main.tf | 4 +- examples/tls-with-aws-pca-issuer/main.tf | 29 ++- examples/wireguard-with-cilium/main.tf | 46 +++-- 11 files changed, 410 insertions(+), 72 deletions(-) diff --git a/examples/agones-game-controller/main.tf b/examples/agones-game-controller/main.tf index 045a8f7199..168903432b 100644 --- a/examples/agones-game-controller/main.tf +++ b/examples/agones-game-controller/main.tf @@ -5,23 +5,35 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token - } -} -data "aws_eks_cluster_auth" "this" { - name = module.eks.cluster_name + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } + } } data "aws_availability_zones" "available" {} +data "aws_security_group" "eks_worker_group" { + id = module.eks.cluster_security_group_id +} + locals { name = basename(path.cwd) region = "us-west-2" @@ -31,6 +43,8 @@ locals { vpc_cidr = "10.0.0.0/16" azs = slice(data.aws_availability_zones.available.names, 0, 3) + gameserver_minport = 7000 + gameserver_maxport = 8000 tags = { Blueprint = local.name GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" @@ -78,7 +92,9 @@ module "eks" { ################################################################################ module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" eks_cluster_id = module.eks.cluster_name eks_cluster_endpoint = module.eks.cluster_endpoint @@ -89,28 +105,35 @@ module "eks_blueprints_kubernetes_addons" { enable_metrics_server = true enable_cluster_autoscaler = true - # NOTE: Agones requires a Node group in Public Subnets and enable Public IP - enable_agones = true - # Do not be fooled, this is required by the Agones addon - eks_worker_security_group_id = module.eks.cluster_security_group_id - agones_helm_config = { - name = "agones" - chart = "agones" - repository = "https://agones.dev/chart/stable" - version = "1.21.0" - namespace = "agones-system" - - values = [templatefile("${path.module}/helm_values/agones-values.yaml", { - expose_udp = true - gameserver_namespaces = "{${join(",", ["default", "xbox-gameservers", "xbox-gameservers"])}}" - gameserver_minport = 7000 - gameserver_maxport = 8000 - })] - } - tags = local.tags } +################################################################################ +# Agones Helm Chart +################################################################################ + +# NOTE: Agones requires a Node group in Public Subnets and enable Public IP +resource "helm_release" "agones" { + name = "agones" + chart = "agones" + version = "1.21.0" + repository = "https://agones.dev/chart/stable" + description = "Agones helm chart" + namespace = "agones-system" + create_namespace = true + + values = [templatefile("${path.module}/helm_values/agones-values.yaml", { + expose_udp = true + gameserver_namespaces = "{${join(",", ["default", "xbox-gameservers", "xbox-gameservers"])}}" + gameserver_minport = 7000 + gameserver_maxport = 8000 + })] + + depends_on = [ + module.eks_blueprints_kubernetes_addons + ] +} + ################################################################################ # Supporting Resources ################################################################################ @@ -148,3 +171,14 @@ module "vpc" { tags = local.tags } + +resource "aws_security_group_rule" "agones_sg_ingress_rule" { + description = "Allow UDP ingress from internet" + type = "ingress" + from_port = local.gameserver_minport + to_port = local.gameserver_maxport + protocol = "udp" + cidr_blocks = ["0.0.0.0/0"] #tfsec:ignore:aws-vpc-no-public-ingress-sgr + ipv6_cidr_blocks = ["::/0"] #tfsec:ignore:aws-vpc-no-public-ingress-sgr + security_group_id = data.aws_security_group.eks_worker_group.id +} diff --git a/examples/amp-amg-opensearch/main.tf b/examples/amp-amg-opensearch/main.tf index fc7a151d7f..12def1f3f9 100644 --- a/examples/amp-amg-opensearch/main.tf +++ b/examples/amp-amg-opensearch/main.tf @@ -83,7 +83,9 @@ module "eks" { ################################################################################ module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" eks_cluster_id = module.eks.cluster_name eks_cluster_endpoint = module.eks.cluster_endpoint diff --git a/examples/appmesh-mtls/main.tf b/examples/appmesh-mtls/main.tf index 739fcc24d1..f35e3a8b54 100644 --- a/examples/appmesh-mtls/main.tf +++ b/examples/appmesh-mtls/main.tf @@ -30,6 +30,128 @@ data "aws_eks_cluster_auth" "this" { data "aws_availability_zones" "available" {} +data "aws_partition" "current" {} + +data "aws_caller_identity" "current" {} + + +data "aws_iam_policy_document" "this" { + statement { + sid = "AllowAppMesh" + effect = "Allow" + resources = ["arn:${data.aws_partition.current.partition}:appmesh:${local.region}:${data.aws_caller_identity.current.account_id}:mesh/*"] + + actions = [ + "appmesh:ListVirtualRouters", + "appmesh:ListVirtualServices", + "appmesh:ListRoutes", + "appmesh:ListGatewayRoutes", + "appmesh:ListMeshes", + "appmesh:ListVirtualNodes", + "appmesh:ListVirtualGateways", + "appmesh:DescribeMesh", + "appmesh:DescribeVirtualRouter", + "appmesh:DescribeRoute", + "appmesh:DescribeVirtualNode", + "appmesh:DescribeVirtualGateway", + "appmesh:DescribeGatewayRoute", + "appmesh:DescribeVirtualService", + "appmesh:CreateMesh", + "appmesh:CreateVirtualRouter", + "appmesh:CreateVirtualGateway", + "appmesh:CreateVirtualService", + "appmesh:CreateGatewayRoute", + "appmesh:CreateRoute", + "appmesh:CreateVirtualNode", + "appmesh:UpdateMesh", + "appmesh:UpdateRoute", + "appmesh:UpdateVirtualGateway", + "appmesh:UpdateVirtualRouter", + "appmesh:UpdateGatewayRoute", + "appmesh:UpdateVirtualService", + "appmesh:UpdateVirtualNode", + "appmesh:DeleteMesh", + "appmesh:DeleteRoute", + "appmesh:DeleteVirtualRouter", + "appmesh:DeleteGatewayRoute", + "appmesh:DeleteVirtualService", + "appmesh:DeleteVirtualNode", + "appmesh:DeleteVirtualGateway" + ] + } + + statement { + sid = "CreateServiceLinkedRole" + effect = "Allow" + resources = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/appmesh.${data.aws_partition.current.dns_suffix}/AWSServiceRoleForAppMesh"] + actions = ["iam:CreateServiceLinkedRole"] + + condition { + test = "StringLike" + variable = "iam:AWSServiceName" + values = ["appmesh.${data.aws_partition.current.dns_suffix}"] + } + } + + statement { + sid = "AllowACMAccess" + effect = "Allow" + resources = ["arn:${data.aws_partition.current.partition}:acm:${local.region}:${data.aws_caller_identity.current.account_id}:certificate/*"] + actions = [ + "acm:ListCertificates", + "acm:DescribeCertificate", + ] + } + + statement { + sid = "AllowACMPCAAccess" + effect = "Allow" + resources = ["arn:${data.aws_partition.current.partition}:acm-pca:${local.region}:${data.aws_caller_identity.current.account_id}:certificate-authority/*"] + actions = [ + "acm-pca:DescribeCertificateAuthority", + "acm-pca:ListCertificateAuthorities" + ] + } + + statement { + sid = "AllowServiceDiscovery" + effect = "Allow" + resources = [ + "arn:${data.aws_partition.current.partition}:servicediscovery:${local.region}:${data.aws_caller_identity.current.account_id}:namespace/*", + "arn:${data.aws_partition.current.partition}:servicediscovery:${local.region}:${data.aws_caller_identity.current.account_id}:service/*" + ] + actions = [ + "servicediscovery:CreateService", + "servicediscovery:DeleteService", + "servicediscovery:GetService", + "servicediscovery:GetInstance", + "servicediscovery:RegisterInstance", + "servicediscovery:DeregisterInstance", + "servicediscovery:ListInstances", + "servicediscovery:ListNamespaces", + "servicediscovery:ListServices", + "servicediscovery:GetInstancesHealthStatus", + "servicediscovery:UpdateInstanceCustomHealthStatus", + "servicediscovery:GetOperation" + ] + } + + statement { + sid = "AllowRoute53" + effect = "Allow" + resources = [ + "arn:${data.aws_partition.current.partition}:route53:::*"] + actions = [ + "route53:ChangeResourceRecordSets", + "route53:GetHealthCheck", + "route53:CreateHealthCheck", + "route53:UpdateHealthCheck", + "route53:DeleteHealthCheck" + ] + } +} + + locals { name = basename(path.cwd) region = "us-west-2" @@ -85,7 +207,9 @@ module "eks" { ################################################################################ module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" eks_cluster_id = module.eks.cluster_name eks_cluster_endpoint = module.eks.cluster_endpoint @@ -93,13 +217,54 @@ module "eks_blueprints_kubernetes_addons" { eks_cluster_version = module.eks.cluster_version aws_privateca_acmca_arn = aws_acmpca_certificate_authority.this.arn - enable_appmesh_controller = true enable_cert_manager = true enable_aws_privateca_issuer = true tags = local.tags } +################################################################################ +# AppMesh Addons +################################################################################ + +module "appmesh_addon" { + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addon" + + chart = "appmesh-controller" + chart_version = "1.7.0" + repository = "https://aws.github.io/eks-charts" + description = "AWS App Mesh Helm Chart" + namespace = "appmesh-system" + create_namespace = true + + set = [ + { + name = "serviceAccount.name" + value = "appmesh-controller" + } + ] + + set_irsa_name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" + + # IAM role for service account (IRSA) + create_role = true + role_name = "appmesh-controller" + role_policy_arns = { + appmesh = aws_iam_policy.this.arn + } + + oidc_providers = { + this = { + provider_arn = module.eks.oidc_provider_arn + service_account = "appmesh-controller" + } + } + + tags = local.tags +} + #--------------------------------------------------------------- # Certificate Resources #--------------------------------------------------------------- @@ -230,3 +395,9 @@ module "vpc" { tags = local.tags } + +resource "aws_iam_policy" "this" { + name = "${module.eks.cluster_name}-appmesh" + description = "IAM Policy for App Mesh" + policy = data.aws_iam_policy_document.this.json +} diff --git a/examples/argocd/main.tf b/examples/argocd/main.tf index 30d924a935..f78a8e7632 100644 --- a/examples/argocd/main.tf +++ b/examples/argocd/main.tf @@ -80,7 +80,9 @@ module "eks" { ################################################################################ module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" eks_cluster_id = module.eks.cluster_name eks_cluster_endpoint = module.eks.cluster_endpoint @@ -98,15 +100,6 @@ module "eks_blueprints_kubernetes_addons" { ] } - keda_helm_config = { - values = [ - { - name = "serviceAccount.create" - value = "false" - } - ] - } - argocd_manage_add_ons = true # Indicates that ArgoCD is responsible for managing/deploying add-ons argocd_applications = { addons = { @@ -129,12 +122,9 @@ module "eks_blueprints_kubernetes_addons" { enable_cert_manager = true enable_cluster_autoscaler = true enable_karpenter = true - enable_keda = true enable_metrics_server = true enable_prometheus = true - enable_traefik = true enable_vpa = true - enable_yunikorn = true enable_argo_rollouts = true tags = local.tags diff --git a/examples/blue-green-upgrade/modules/eks_cluster/main.tf b/examples/blue-green-upgrade/modules/eks_cluster/main.tf index cc183fa03d..d197aeeeb5 100644 --- a/examples/blue-green-upgrade/modules/eks_cluster/main.tf +++ b/examples/blue-green-upgrade/modules/eks_cluster/main.tf @@ -363,7 +363,9 @@ module "eks_blueprints" { #certificate_arn = aws_acm_certificate_validation.example.certificate_arn module "kubernetes_addons" { - source = "github.com/aws-ia/terraform-aws-eks-blueprints?ref=v4.18.1/modules/kubernetes-addons" + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" eks_cluster_id = module.eks_blueprints.eks_cluster_id eks_cluster_domain = local.eks_cluster_domain diff --git a/examples/external-secrets/main.tf b/examples/external-secrets/main.tf index e0e8aa74a1..782e6d6e01 100644 --- a/examples/external-secrets/main.tf +++ b/examples/external-secrets/main.tf @@ -91,7 +91,9 @@ module "eks" { ################################################################################ module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" eks_cluster_id = module.eks.cluster_name eks_cluster_endpoint = module.eks.cluster_endpoint diff --git a/examples/fargate-serverless/main.tf b/examples/fargate-serverless/main.tf index 874d4be04a..b9a9d67de8 100644 --- a/examples/fargate-serverless/main.tf +++ b/examples/fargate-serverless/main.tf @@ -107,7 +107,9 @@ module "eks" { ################################################################################ module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" eks_cluster_id = module.eks.cluster_name eks_cluster_endpoint = module.eks.cluster_endpoint @@ -117,9 +119,6 @@ module "eks_blueprints_kubernetes_addons" { # Wait on the `kube-system` profile before provisioning addons data_plane_wait_arn = join(",", [for prof in module.eks.fargate_profiles : prof.fargate_profile_arn]) - # Sample application - enable_app_2048 = true - # Enable Fargate logging enable_fargate_fluentbit = true fargate_fluentbit_addon_config = { @@ -180,3 +179,106 @@ module "vpc" { tags = local.tags } + +################################################################################ +# Sample App +################################################################################ + +resource "kubernetes_namespace_v1" "this" { + metadata { + name = local.name + } +} + +resource "kubernetes_deployment_v1" "this" { + metadata { + name = local.name + namespace = kubernetes_namespace_v1.this.metadata[0].name + } + + spec { + replicas = 3 + + selector { + match_labels = { + "app.kubernetes.io/name" = local.name + } + } + + template { + metadata { + labels = { + "app.kubernetes.io/name" = local.name + } + } + + spec { + container { + image = "public.ecr.aws/l6m2t8p7/docker-2048:latest" + # image_pull_policy = "Always" + name = local.name + + port { + container_port = 80 + } + } + } + } + } +} + +resource "kubernetes_service_v1" "this" { + metadata { + name = local.name + namespace = kubernetes_namespace_v1.this.metadata[0].name + } + + spec { + selector = { + "app.kubernetes.io/name" = local.name + } + + + port { + port = 80 + target_port = 80 + protocol = "TCP" + } + + type = "NodePort" + } +} + +resource "kubernetes_ingress_v1" "this" { + metadata { + name = local.name + namespace = kubernetes_namespace_v1.this.metadata[0].name + + annotations = { + "alb.ingress.kubernetes.io/scheme" = "internet-facing" + "alb.ingress.kubernetes.io/target-type" = "ip" + } + } + + spec { + ingress_class_name = "alb" + + rule { + http { + path { + path = "/" + path_type = "Prefix" + + backend { + service { + name = local.name + port { + number = 80 + } + } + } + } + } + } + } +} diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf index 2811a9868e..a533949e03 100644 --- a/examples/karpenter/main.tf +++ b/examples/karpenter/main.tf @@ -154,7 +154,9 @@ module "eks" { ################################################################################ module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" eks_cluster_id = module.eks.cluster_name eks_cluster_endpoint = module.eks.cluster_endpoint diff --git a/examples/stateful/main.tf b/examples/stateful/main.tf index b4b1491fea..45b01eeaa7 100644 --- a/examples/stateful/main.tf +++ b/examples/stateful/main.tf @@ -190,7 +190,9 @@ module "eks" { ################################################################################ module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" eks_cluster_id = module.eks.cluster_name eks_cluster_endpoint = module.eks.cluster_endpoint diff --git a/examples/tls-with-aws-pca-issuer/main.tf b/examples/tls-with-aws-pca-issuer/main.tf index 1a14f75b12..0c18390265 100644 --- a/examples/tls-with-aws-pca-issuer/main.tf +++ b/examples/tls-with-aws-pca-issuer/main.tf @@ -85,7 +85,9 @@ module "eks" { ################################################################################ module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" eks_cluster_id = module.eks.cluster_name eks_cluster_endpoint = module.eks.cluster_endpoint @@ -93,14 +95,31 @@ module "eks_blueprints_kubernetes_addons" { eks_cluster_version = module.eks.cluster_version # Add-ons - enable_cert_manager = true - enable_cert_manager_csi_driver = true - enable_aws_privateca_issuer = true - aws_privateca_acmca_arn = aws_acmpca_certificate_authority.this.arn + enable_cert_manager = true + enable_aws_privateca_issuer = true + aws_privateca_acmca_arn = aws_acmpca_certificate_authority.this.arn tags = local.tags } +################################################################################ +# Cert Manager CSI Helm Chart +################################################################################ + +resource "helm_release" "cert_manager_csi" { + name = "cert-manager-csi-driver" + chart = "cert-manager-csi-driver" + version = "v0.4.2" + repository = "https://charts.jetstack.io" + description = "Cert Manager CSI Driver Add-on" + namespace = "cert-manager" + create_namespace = false + + depends_on = [ + module.eks_blueprints_kubernetes_addons + ] +} + #------------------------------- # Associates a certificate with an AWS Certificate Manager Private Certificate Authority (ACM PCA Certificate Authority). # An ACM PCA Certificate Authority is unable to issue certificates until it has a certificate associated with it. diff --git a/examples/wireguard-with-cilium/main.tf b/examples/wireguard-with-cilium/main.tf index 73f752d589..dc1585c991 100644 --- a/examples/wireguard-with-cilium/main.tf +++ b/examples/wireguard-with-cilium/main.tf @@ -86,27 +86,39 @@ module "eks" { } ################################################################################ -# Kubernetes Addons +# Cilium Helm Chart for e2e encryption with Wireguard ################################################################################ -module "eks_blueprints_kubernetes_addons" { - source = "../../modules/kubernetes-addons" - - eks_cluster_id = module.eks.cluster_name - eks_cluster_endpoint = module.eks.cluster_endpoint - eks_oidc_provider = module.eks.oidc_provider - eks_cluster_version = module.eks.cluster_version - - # Wait on the `kube-system` profile before provisioning addons - data_plane_wait_arn = join(",", [for group in module.eks.eks_managed_node_groups : group.node_group_arn]) - - # Add-ons - enable_cilium = true - cilium_enable_wireguard = true +resource "helm_release" "cilium" { + name = "cilium" + chart = "cilium" + version = "1.12.3" + repository = "https://helm.cilium.io/" + description = "Cilium Add-on" + namespace = "kube-system" + create_namespace = false + + values = [ + <<-EOT + cni: + chainingMode: aws-cni + enableIPv4Masquerade: false + tunnel: disabled + endpointRoutes: + enabled: true + l7Proxy: false + encryption: + enabled: true + type: wireguard + EOT + ] - tags = local.tags + depends_on = [ + module.eks + ] } + #--------------------------------------------------------------- # Sample App for Testing #--------------------------------------------------------------- @@ -147,7 +159,7 @@ resource "kubectl_manifest" "server" { }) depends_on = [ - module.eks_blueprints_kubernetes_addons + helm_release.cilium ] } From 41bc6dd59fe994cf42cebe2786d68ff2220618fe Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Tue, 14 Mar 2023 13:26:31 -0700 Subject: [PATCH 02/31] update providers to use aws cli --- examples/amp-amg-opensearch/main.tf | 16 +++++++++++-- examples/appmesh-mtls/main.tf | 26 ++++++++++++++++++---- examples/argocd/main.tf | 16 +++++++++++-- examples/external-secrets/main.tf | 26 ++++++++++++++++++---- examples/fargate-serverless/main.tf | 16 +++++++++++-- examples/ipv4-prefix-delegation/main.tf | 16 +++++++++++-- examples/ipv6-eks-cluster/main.tf | 16 +++++++++++-- examples/karpenter/main.tf | 26 ++++++++++++++++++---- examples/stateful/main.tf | 16 +++++++++++-- examples/tls-with-aws-pca-issuer/main.tf | 26 ++++++++++++++++++---- examples/vpc-cni-custom-networking/main.tf | 26 ++++++++++++++++++---- examples/wireguard-with-cilium/main.tf | 26 ++++++++++++++++++---- 12 files changed, 216 insertions(+), 36 deletions(-) diff --git a/examples/amp-amg-opensearch/main.tf b/examples/amp-amg-opensearch/main.tf index 12def1f3f9..b632f29bf2 100644 --- a/examples/amp-amg-opensearch/main.tf +++ b/examples/amp-amg-opensearch/main.tf @@ -5,14 +5,26 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } } diff --git a/examples/appmesh-mtls/main.tf b/examples/appmesh-mtls/main.tf index f35e3a8b54..47f5c6c335 100644 --- a/examples/appmesh-mtls/main.tf +++ b/examples/appmesh-mtls/main.tf @@ -5,23 +5,41 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } } provider "kubectl" { - apply_retry_count = 10 + apply_retry_count = 5 host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) load_config_file = false - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } data "aws_eks_cluster_auth" "this" { diff --git a/examples/argocd/main.tf b/examples/argocd/main.tf index f78a8e7632..53cd90bd85 100644 --- a/examples/argocd/main.tf +++ b/examples/argocd/main.tf @@ -5,14 +5,26 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } } diff --git a/examples/external-secrets/main.tf b/examples/external-secrets/main.tf index 782e6d6e01..79cf90fc4c 100644 --- a/examples/external-secrets/main.tf +++ b/examples/external-secrets/main.tf @@ -5,23 +5,41 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } } provider "kubectl" { - apply_retry_count = 10 + apply_retry_count = 5 host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) load_config_file = false - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } data "aws_eks_cluster_auth" "this" { diff --git a/examples/fargate-serverless/main.tf b/examples/fargate-serverless/main.tf index b9a9d67de8..cfe4871e5e 100644 --- a/examples/fargate-serverless/main.tf +++ b/examples/fargate-serverless/main.tf @@ -5,14 +5,26 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } } diff --git a/examples/ipv4-prefix-delegation/main.tf b/examples/ipv4-prefix-delegation/main.tf index 418f394d4c..ce97dfe09e 100644 --- a/examples/ipv4-prefix-delegation/main.tf +++ b/examples/ipv4-prefix-delegation/main.tf @@ -5,14 +5,26 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } } diff --git a/examples/ipv6-eks-cluster/main.tf b/examples/ipv6-eks-cluster/main.tf index 2c66c81ed9..5599cbedc2 100644 --- a/examples/ipv6-eks-cluster/main.tf +++ b/examples/ipv6-eks-cluster/main.tf @@ -5,14 +5,26 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } } diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf index a533949e03..478198a7a7 100644 --- a/examples/karpenter/main.tf +++ b/examples/karpenter/main.tf @@ -11,23 +11,41 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } } provider "kubectl" { - apply_retry_count = 10 + apply_retry_count = 5 host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) load_config_file = false - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } data "aws_eks_cluster_auth" "this" { diff --git a/examples/stateful/main.tf b/examples/stateful/main.tf index 45b01eeaa7..4c58d1f7c7 100644 --- a/examples/stateful/main.tf +++ b/examples/stateful/main.tf @@ -5,14 +5,26 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } } diff --git a/examples/tls-with-aws-pca-issuer/main.tf b/examples/tls-with-aws-pca-issuer/main.tf index 0c18390265..b6c7ae7ad6 100644 --- a/examples/tls-with-aws-pca-issuer/main.tf +++ b/examples/tls-with-aws-pca-issuer/main.tf @@ -5,23 +5,41 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } } provider "kubectl" { - apply_retry_count = 10 + apply_retry_count = 5 host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) load_config_file = false - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } data "aws_eks_cluster_auth" "this" { diff --git a/examples/vpc-cni-custom-networking/main.tf b/examples/vpc-cni-custom-networking/main.tf index 43ac65b121..1220e87ad0 100644 --- a/examples/vpc-cni-custom-networking/main.tf +++ b/examples/vpc-cni-custom-networking/main.tf @@ -5,23 +5,41 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } } provider "kubectl" { - apply_retry_count = 10 + apply_retry_count = 5 host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) load_config_file = false - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } data "aws_eks_cluster_auth" "this" { diff --git a/examples/wireguard-with-cilium/main.tf b/examples/wireguard-with-cilium/main.tf index dc1585c991..d84d1e35fc 100644 --- a/examples/wireguard-with-cilium/main.tf +++ b/examples/wireguard-with-cilium/main.tf @@ -5,23 +5,41 @@ provider "aws" { provider "kubernetes" { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } provider "helm" { kubernetes { host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } } provider "kubectl" { - apply_retry_count = 10 + apply_retry_count = 5 host = module.eks.cluster_endpoint cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data) load_config_file = false - token = data.aws_eks_cluster_auth.this.token + + exec { + api_version = "client.authentication.k8s.io/v1beta1" + command = "aws" + # This requires the awscli to be installed locally where Terraform is executed + args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name] + } } data "aws_eks_cluster_auth" "this" { From f68e76fcb2379d2b4ec6ed8b01cbe993bdb6f18e Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Tue, 14 Mar 2023 15:14:37 -0700 Subject: [PATCH 03/31] new line --- examples/agones-game-controller/main.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/agones-game-controller/main.tf b/examples/agones-game-controller/main.tf index 168903432b..5909706efc 100644 --- a/examples/agones-game-controller/main.tf +++ b/examples/agones-game-controller/main.tf @@ -45,6 +45,7 @@ locals { gameserver_minport = 7000 gameserver_maxport = 8000 + tags = { Blueprint = local.name GithubRepo = "github.com/aws-ia/terraform-aws-eks-blueprints" From afd61b3a20b72d40b5dc46e6246e261773721143 Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Fri, 17 Mar 2023 15:54:15 -0700 Subject: [PATCH 04/31] updates for latest changes --- examples/agones-game-controller/main.tf | 48 +++- examples/amp-amg-opensearch/main.tf | 79 ++++-- examples/appmesh-mtls/main.tf | 59 ++-- examples/argocd/main.tf | 75 ++++-- .../modules/eks_cluster/main.tf | 85 +++--- examples/external-secrets/main.tf | 251 +++++++++++------- examples/fargate-serverless/main.tf | 100 ++++--- examples/fully-private-cluster/main.tf | 4 +- examples/ipv4-prefix-delegation/main.tf | 8 +- examples/ipv6-eks-cluster/main.tf | 8 +- examples/karpenter/main.tf | 140 ++++++---- examples/stateful/main.tf | 79 ++++-- examples/tls-with-aws-pca-issuer/main.tf | 52 ++-- examples/vpc-cni-custom-networking/main.tf | 6 +- examples/wireguard-with-cilium/main.tf | 8 +- 15 files changed, 644 insertions(+), 358 deletions(-) diff --git a/examples/agones-game-controller/main.tf b/examples/agones-game-controller/main.tf index 5909706efc..8a2fa48d5a 100644 --- a/examples/agones-game-controller/main.tf +++ b/examples/agones-game-controller/main.tf @@ -38,7 +38,7 @@ locals { name = basename(path.cwd) region = "us-west-2" - cluster_version = "1.24" + cluster_version = "1.25" vpc_cidr = "10.0.0.0/16" azs = slice(data.aws_availability_zones.available.names, 0, 3) @@ -59,19 +59,12 @@ locals { #tfsec:ignore:aws-eks-enable-control-plane-logging module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 19.9" + version = "~> 19.10" cluster_name = local.name cluster_version = local.cluster_version cluster_endpoint_public_access = true - # EKS Addons - cluster_addons = { - coredns = {} - kube-proxy = {} - vpc-cni = {} - } - vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets @@ -97,10 +90,20 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - eks_cluster_id = module.eks.cluster_name - eks_cluster_endpoint = module.eks.cluster_endpoint - eks_oidc_provider = module.eks.oidc_provider - eks_cluster_version = module.eks.cluster_version + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn + + #EKS Add-Ons + eks_addons = { + coredns = {} + vpc-cni = { + service_account_role_arn = module.vpc_cni_irsa.iam_role_arn + } + kube-proxy = {} + } # Add-ons enable_metrics_server = true @@ -183,3 +186,22 @@ resource "aws_security_group_rule" "agones_sg_ingress_rule" { ipv6_cidr_blocks = ["::/0"] #tfsec:ignore:aws-vpc-no-public-ingress-sgr security_group_id = data.aws_security_group.eks_worker_group.id } + +module "vpc_cni_irsa" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.14" + + role_name_prefix = "${module.eks.cluster_name}-vpc-cni-" + + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + + oidc_providers = { + main = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + + tags = local.tags +} diff --git a/examples/amp-amg-opensearch/main.tf b/examples/amp-amg-opensearch/main.tf index b632f29bf2..b7bbdced37 100644 --- a/examples/amp-amg-opensearch/main.tf +++ b/examples/amp-amg-opensearch/main.tf @@ -27,23 +27,18 @@ provider "helm" { } } } - -data "aws_eks_cluster_auth" "this" { - name = module.eks.cluster_name -} - -data "aws_availability_zones" "available" {} - provider "grafana" { url = var.grafana_endpoint auth = var.grafana_api_key } +data "aws_availability_zones" "available" {} + locals { name = basename(path.cwd) region = "us-west-2" - cluster_version = "1.24" + cluster_version = "1.25" vpc_cidr = "10.0.0.0/16" azs = slice(data.aws_availability_zones.available.names, 0, 3) @@ -61,19 +56,12 @@ locals { #tfsec:ignore:aws-eks-enable-control-plane-logging module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 19.9" + version = "~> 19.10" cluster_name = local.name cluster_version = local.cluster_version cluster_endpoint_public_access = true - # EKS Addons - cluster_addons = { - coredns = {} - kube-proxy = {} - vpc-cni = {} - } - vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets @@ -99,10 +87,23 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - eks_cluster_id = module.eks.cluster_name - eks_cluster_endpoint = module.eks.cluster_endpoint - eks_oidc_provider = module.eks.oidc_provider - eks_cluster_version = module.eks.cluster_version + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn + + # EKS Add-ons + eks_addons = { + aws-ebs-csi-driver = { + service_account_role_arn = module.ebs_csi_driver_irsa.iam_role_arn + } + coredns = {} + vpc-cni = { + service_account_role_arn = module.vpc_cni_irsa.iam_role_arn + } + kube-proxy = {} + } # Add-ons enable_metrics_server = true @@ -125,7 +126,6 @@ module "eks_blueprints_kubernetes_addons" { })] } - enable_amazon_eks_aws_ebs_csi_driver = true enable_prometheus = true enable_amazon_prometheus = true amazon_prometheus_workspace_endpoint = module.managed_prometheus.workspace_prometheus_endpoint @@ -302,3 +302,40 @@ module "vpc" { tags = local.tags } + +module "ebs_csi_driver_irsa" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.14" + + role_name_prefix = "${module.eks.cluster_name}-ebs-csi-driver-" + + attach_ebs_csi_policy = true + + oidc_providers = { + main = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"] + } + } + + tags = local.tags +} + +module "vpc_cni_irsa" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.14" + + role_name_prefix = "${module.eks.cluster_name}-vpc-cni-" + + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + + oidc_providers = { + main = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + + tags = local.tags +} diff --git a/examples/appmesh-mtls/main.tf b/examples/appmesh-mtls/main.tf index 47f5c6c335..6fc0d943e7 100644 --- a/examples/appmesh-mtls/main.tf +++ b/examples/appmesh-mtls/main.tf @@ -42,10 +42,6 @@ provider "kubectl" { } } -data "aws_eks_cluster_auth" "this" { - name = module.eks.cluster_name -} - data "aws_availability_zones" "available" {} data "aws_partition" "current" {} @@ -191,19 +187,12 @@ locals { #tfsec:ignore:aws-eks-enable-control-plane-logging module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 19.9" + version = "~> 19.10" cluster_name = local.name cluster_version = "1.24" cluster_endpoint_public_access = true - # EKS Addons - cluster_addons = { - coredns = {} - kube-proxy = {} - vpc-cni = {} - } - vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets @@ -229,10 +218,21 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - eks_cluster_id = module.eks.cluster_name - eks_cluster_endpoint = module.eks.cluster_endpoint - eks_oidc_provider = module.eks.oidc_provider - eks_cluster_version = module.eks.cluster_version + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn + + eks_addons = { + coredns = {} + vpc-cni = { + service_account_role_arn = module.vpc_cni_irsa.iam_role_arn + } + kube-proxy = {} + } + + aws_privateca_acmca_arn = aws_acmpca_certificate_authority.this.arn enable_cert_manager = true @@ -248,7 +248,7 @@ module "eks_blueprints_kubernetes_addons" { module "appmesh_addon" { # Users should pin the version to the latest available release # tflint-ignore: terraform_module_pinned_source - source = "github.com/aws-ia/terraform-aws-eks-blueprints-addon" + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons//modules/eks-blueprints-addon" chart = "appmesh-controller" chart_version = "1.7.0" @@ -267,8 +267,8 @@ module "appmesh_addon" { set_irsa_name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" # IAM role for service account (IRSA) - create_role = true - role_name = "appmesh-controller" + create_role = true + role_name_prefix = "${module.eks.cluster_name}-appmesh-controller-" role_policy_arns = { appmesh = aws_iam_policy.this.arn } @@ -415,7 +415,26 @@ module "vpc" { } resource "aws_iam_policy" "this" { - name = "${module.eks.cluster_name}-appmesh" + name_prefix = "${module.eks.cluster_name}-appmesh-" description = "IAM Policy for App Mesh" policy = data.aws_iam_policy_document.this.json } + +module "vpc_cni_irsa" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.14" + + role_nam_prefix = "${module.eks.cluster_name}-vpc-cni-" + + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + + oidc_providers = { + main = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + + tags = local.tags +} diff --git a/examples/argocd/main.tf b/examples/argocd/main.tf index 53cd90bd85..7fd160a85a 100644 --- a/examples/argocd/main.tf +++ b/examples/argocd/main.tf @@ -30,17 +30,13 @@ provider "helm" { provider "bcrypt" {} -data "aws_eks_cluster_auth" "this" { - name = module.eks.cluster_name -} - data "aws_availability_zones" "available" {} locals { name = basename(path.cwd) region = "us-west-2" - cluster_version = "1.24" + cluster_version = "1.25" vpc_cidr = "10.0.0.0/16" azs = slice(data.aws_availability_zones.available.names, 0, 3) @@ -58,19 +54,12 @@ locals { #tfsec:ignore:aws-eks-enable-control-plane-logging module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 19.9" + version = "~> 19.10" cluster_name = local.name cluster_version = local.cluster_version cluster_endpoint_public_access = true - # EKS Addons - cluster_addons = { - coredns = {} - kube-proxy = {} - vpc-cni = {} - } - vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets @@ -96,10 +85,22 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - eks_cluster_id = module.eks.cluster_name - eks_cluster_endpoint = module.eks.cluster_endpoint - eks_oidc_provider = module.eks.oidc_provider - eks_cluster_version = module.eks.cluster_version + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn + + eks_addons = { + aws-ebs-csi-driver = { + service_account_role_arn = module.ebs_csi_driver_irsa.iam_role_arn + } + coredns = {} + vpc-cni = { + service_account_role_arn = module.vpc_cni_irsa.iam_role_arn + } + kube-proxy = {} + } enable_argocd = true # This example shows how to set default ArgoCD Admin Password using SecretsManager with Helm Chart set_sensitive values. @@ -127,8 +128,7 @@ module "eks_blueprints_kubernetes_addons" { } # Add-ons - enable_amazon_eks_aws_ebs_csi_driver = true - enable_aws_for_fluentbit = true + enable_aws_for_fluentbit = true # Let fluentbit create the cw log group aws_for_fluentbit_create_cw_log_group = false enable_cert_manager = true @@ -206,3 +206,40 @@ module "vpc" { tags = local.tags } + +module "ebs_csi_driver_irsa" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.14" + + role_name_prefix = "${module.eks.cluster_name}-ebs-csi-driver-" + + attach_ebs_csi_policy = true + + oidc_providers = { + main = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"] + } + } + + tags = local.tags +} + +module "vpc_cni_irsa" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.14" + + role_name_prefix = "${module.eks.cluster_name}-vpc-cni-" + + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + + oidc_providers = { + main = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + + tags = local.tags +} diff --git a/examples/blue-green-upgrade/modules/eks_cluster/main.tf b/examples/blue-green-upgrade/modules/eks_cluster/main.tf index d197aeeeb5..2d31b99a28 100644 --- a/examples/blue-green-upgrade/modules/eks_cluster/main.tf +++ b/examples/blue-green-upgrade/modules/eks_cluster/main.tf @@ -365,9 +365,13 @@ module "eks_blueprints" { module "kubernetes_addons" { # Users should pin the version to the latest available release # tflint-ignore: terraform_module_pinned_source - source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - eks_cluster_id = module.eks_blueprints.eks_cluster_id - eks_cluster_domain = local.eks_cluster_domain + source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" + + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn #--------------------------------------------------------------- # ARGO CD ADD-ON @@ -399,36 +403,17 @@ module "kubernetes_addons" { } #--------------------------------------------------------------- - # EKS Managed AddOns - # https://aws-ia.github.io/terraform-aws-eks-blueprints/add-ons/ + # EKS AddOns #--------------------------------------------------------------- - - enable_amazon_eks_coredns = true - amazon_eks_coredns_config = { - most_recent = true - kubernetes_version = local.cluster_version - resolve_conflicts = "OVERWRITE" - } - - enable_amazon_eks_aws_ebs_csi_driver = true - amazon_eks_aws_ebs_csi_driver_config = { - most_recent = true - kubernetes_version = local.cluster_version - resolve_conflicts = "OVERWRITE" - } - - enable_amazon_eks_kube_proxy = true - amazon_eks_kube_proxy_config = { - most_recent = true - kubernetes_version = local.cluster_version - resolve_conflicts = "OVERWRITE" - } - - enable_amazon_eks_vpc_cni = true - amazon_eks_vpc_cni_config = { - most_recent = true - kubernetes_version = local.cluster_version - resolve_conflicts = "OVERWRITE" + eks_addons = { + aws-ebs-csi-driver = { + service_account_role_arn = module.ebs_csi_driver_irsa.iam_role_arn + } + coredns = {} + vpc-cni = { + service_account_role_arn = module.vpc_cni_irsa.iam_role_arn + } + kube-proxy = {} } #--------------------------------------------------------------- @@ -457,5 +442,41 @@ module "kubernetes_addons" { } enable_kubecost = true +} + +module "ebs_csi_driver_irsa" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.14" + + role_name_prefix = "${module.eks.cluster_name}-ebs-csi-driver-" + attach_ebs_csi_policy = true + + oidc_providers = { + main = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"] + } + } + + tags = local.tags +} + +module "vpc_cni_irsa" { + source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" + version = "~> 5.14" + + role_name_prefix = "${module.eks.cluster_name}-vpc-cni-" + + attach_vpc_cni_policy = true + vpc_cni_enable_ipv4 = true + + oidc_providers = { + main = { + provider_arn = module.eks.oidc_provider_arn + namespace_service_accounts = ["kube-system:aws-node"] + } + } + + tags = local.tags } diff --git a/examples/external-secrets/main.tf b/examples/external-secrets/main.tf index 79cf90fc4c..494bddc540 100644 --- a/examples/external-secrets/main.tf +++ b/examples/external-secrets/main.tf @@ -42,11 +42,8 @@ provider "kubectl" { } } -data "aws_eks_cluster_auth" "this" { - name = module.eks.cluster_name -} - data "aws_availability_zones" "available" {} + data "aws_caller_identity" "current" {} locals { @@ -75,19 +72,12 @@ locals { #tfsec:ignore:aws-eks-enable-control-plane-logging module "eks" { source = "terraform-aws-modules/eks/aws" - version = "~> 19.9" + version = "~> 19.10" cluster_name = local.name - cluster_version = "1.24" + cluster_version = "1.25" cluster_endpoint_public_access = true - # EKS Addons - cluster_addons = { - coredns = {} - kube-proxy = {} - vpc-cni = {} - } - vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets @@ -113,11 +103,25 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - eks_cluster_id = module.eks.cluster_name - eks_cluster_endpoint = module.eks.cluster_endpoint - eks_oidc_provider = module.eks.oidc_provider - eks_cluster_version = module.eks.cluster_version + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn + + # EKS Add-ons + eks_addons = { + aws-ebs-csi-driver = { + service_account_role_arn = module.ebs_csi_driver_irsa.iam_role_arn + } + coredns = {} + vpc-cni = { + service_account_role_arn = module.vpc_cni_irsa.iam_role_arn + } + kube-proxy = {} + } + # Add-ons enable_external_secrets = true tags = local.tags @@ -163,53 +167,11 @@ module "vpc" { tags = local.tags } -#--------------------------------------------------------------- -# External Secrets Operator - Secret -#--------------------------------------------------------------- - resource "aws_kms_key" "secrets" { enable_key_rotation = true } -module "cluster_secretstore_role" { - source = "../../modules/irsa" - kubernetes_namespace = local.namespace - create_kubernetes_namespace = false - kubernetes_service_account = local.cluster_secretstore_sa - irsa_iam_policies = [aws_iam_policy.cluster_secretstore.arn] - eks_cluster_id = module.eks.cluster_name - eks_oidc_provider_arn = module.eks.oidc_provider_arn - depends_on = [module.eks_blueprints_kubernetes_addons] -} - -resource "aws_iam_policy" "cluster_secretstore" { - name_prefix = local.cluster_secretstore_sa - policy = < Date: Fri, 17 Mar 2023 16:00:05 -0700 Subject: [PATCH 05/31] add prefix for role_names --- examples/appmesh-mtls/main.tf | 2 +- examples/karpenter/main.tf | 4 ++-- examples/stateful/main.tf | 4 ++-- examples/tls-with-aws-pca-issuer/main.tf | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/appmesh-mtls/main.tf b/examples/appmesh-mtls/main.tf index 6fc0d943e7..12c71b93af 100644 --- a/examples/appmesh-mtls/main.tf +++ b/examples/appmesh-mtls/main.tf @@ -424,7 +424,7 @@ module "vpc_cni_irsa" { source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" version = "~> 5.14" - role_nam_prefix = "${module.eks.cluster_name}-vpc-cni-" + role_name_prefix = "${module.eks.cluster_name}-vpc-cni-" attach_vpc_cni_policy = true vpc_cni_enable_ipv4 = true diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf index 627425e033..30e54f79b1 100644 --- a/examples/karpenter/main.tf +++ b/examples/karpenter/main.tf @@ -349,7 +349,7 @@ module "ebs_csi_driver_irsa" { source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" version = "~> 5.14" - role_name = "ebs-csi-driver" + role_name_prefix = "${module.eks.cluster_name}-ebs-csi-driver-" attach_ebs_csi_policy = true @@ -367,7 +367,7 @@ module "vpc_cni_irsa" { source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" version = "~> 5.14" - role_name = "vpc-cni" + role_name_prefix = "${module.eks.cluster_name}-vpc-cni-" attach_vpc_cni_policy = true vpc_cni_enable_ipv4 = true diff --git a/examples/stateful/main.tf b/examples/stateful/main.tf index 473916402d..a1a35b25ad 100644 --- a/examples/stateful/main.tf +++ b/examples/stateful/main.tf @@ -420,7 +420,7 @@ module "ebs_csi_driver_irsa" { source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" version = "~> 5.14" - role_name = "ebs-csi-driver" + role_name_prefix = "${module.eks.cluster_name}-ebs-csi-driver-" attach_ebs_csi_policy = true @@ -438,7 +438,7 @@ module "vpc_cni_irsa" { source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" version = "~> 5.14" - role_name = "vpc-cni" + role_name_prefix = "${module.eks.cluster_name}-vpc-cni-" attach_vpc_cni_policy = true vpc_cni_enable_ipv4 = true diff --git a/examples/tls-with-aws-pca-issuer/main.tf b/examples/tls-with-aws-pca-issuer/main.tf index a88e1ead51..d0eb08f652 100644 --- a/examples/tls-with-aws-pca-issuer/main.tf +++ b/examples/tls-with-aws-pca-issuer/main.tf @@ -287,7 +287,7 @@ module "vpc_cni_irsa" { source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks" version = "~> 5.14" - role_name = "vpc-cni" + role_name_prefix = "${module.eks.cluster_name}-vpc-cni-" attach_vpc_cni_policy = true vpc_cni_enable_ipv4 = true From 54f5aa7c97c64e9d6649d9fbfc5e89d12de514b8 Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Fri, 17 Mar 2023 16:04:46 -0700 Subject: [PATCH 06/31] remove kubecost from example --- examples/blue-green-upgrade/modules/eks_cluster/main.tf | 2 -- 1 file changed, 2 deletions(-) diff --git a/examples/blue-green-upgrade/modules/eks_cluster/main.tf b/examples/blue-green-upgrade/modules/eks_cluster/main.tf index 2d31b99a28..22253403ba 100644 --- a/examples/blue-green-upgrade/modules/eks_cluster/main.tf +++ b/examples/blue-green-upgrade/modules/eks_cluster/main.tf @@ -440,8 +440,6 @@ module "kubernetes_addons" { policy = "sync" logLevel = "debug" } - - enable_kubecost = true } module "ebs_csi_driver_irsa" { From 51cbca6d50032f1cf0a8ac37da7db0320cad5669 Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Fri, 17 Mar 2023 16:07:33 -0700 Subject: [PATCH 07/31] fix error --- examples/appmesh-mtls/main.tf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/appmesh-mtls/main.tf b/examples/appmesh-mtls/main.tf index 12c71b93af..8d3f652ced 100644 --- a/examples/appmesh-mtls/main.tf +++ b/examples/appmesh-mtls/main.tf @@ -267,8 +267,8 @@ module "appmesh_addon" { set_irsa_name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" # IAM role for service account (IRSA) - create_role = true - role_name_prefix = "${module.eks.cluster_name}-appmesh-controller-" + create_role = true + role_name = "${module.eks.cluster_name}-appmesh-controller-" role_policy_arns = { appmesh = aws_iam_policy.this.arn } From ea410e00f1d2303abf7e233c0418d49cacd4c1e0 Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Fri, 17 Mar 2023 18:40:37 -0700 Subject: [PATCH 08/31] add prefix for role_names --- examples/blue-green-upgrade/eks-blue/main.tf | 2 +- .../modules/eks_cluster/main.tf | 112 +++++++++++------- 2 files changed, 67 insertions(+), 47 deletions(-) diff --git a/examples/blue-green-upgrade/eks-blue/main.tf b/examples/blue-green-upgrade/eks-blue/main.tf index da54d5daf4..069900ab08 100644 --- a/examples/blue-green-upgrade/eks-blue/main.tf +++ b/examples/blue-green-upgrade/eks-blue/main.tf @@ -32,7 +32,7 @@ module "eks_cluster" { source = "../modules/eks_cluster" suffix_stack_name = "blue" - cluster_version = "1.23" + cluster_version = "1.25" argocd_route53_weight = "100" route53_weight = "100" diff --git a/examples/blue-green-upgrade/modules/eks_cluster/main.tf b/examples/blue-green-upgrade/modules/eks_cluster/main.tf index 22253403ba..40787a14c8 100644 --- a/examples/blue-green-upgrade/modules/eks_cluster/main.tf +++ b/examples/blue-green-upgrade/modules/eks_cluster/main.tf @@ -198,29 +198,18 @@ data "aws_secretsmanager_secret_version" "admin_password_version" { secret_id = data.aws_secretsmanager_secret.argocd.id } -module "eks_blueprints" { - source = "github.com/aws-ia/terraform-aws-eks-blueprints?ref=v4.18.1" +module "eks" { + source = "terraform-aws-modules/eks/aws" + version = "~> 19.10" - cluster_name = local.name + cluster_name = local.name + cluster_version = "1.25" + cluster_endpoint_public_access = true - # EKS Cluster VPC and Subnet mandatory config - vpc_id = data.aws_vpc.vpc.id - private_subnet_ids = data.aws_subnets.private.ids + vpc_id = module.vpc.vpc_id + subnet_ids = module.vpc.private_subnets - # EKS CONTROL PLANE VARIABLES - cluster_version = local.cluster_version - - # List of map_roles - map_roles = [ - { - rolearn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${var.eks_admin_role_name}" # The ARN of the IAM role - username = "ops-role" # The user name within Kubernetes to map to the IAM role - groups = ["system:masters"] # A list of groups within Kubernetes to which the role is mapped; Checkout K8s Role and Rolebindings - } - ] - - # EKS MANAGED NODE GROUPS - managed_node_groups = { + eks_managed_node_groups = { mg_5 = { node_group_name = local.node_group_name instance_types = ["m5.xlarge"] @@ -229,20 +218,49 @@ module "eks_blueprints" { } } - platform_teams = { - admin = { - users = [ - data.aws_caller_identity.current.arn, - "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:user/${var.iam_platform_user}", - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${var.eks_admin_role_name}" - ] + manage_aws_auth_configmap = true + aws_auth_roles = [ + { + rolearn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${var.eks_admin_role_name}" # The ARN of the IAM role + username = "ops-role" # The user name within Kubernetes to map to the IAM role + groups = ["system:masters"] # A list of groups within Kubernetes to which the role is mapped; Checkout K8s Role and Rolebindings } - } + ] + + tags = local.tags +} + +module "admin_team" { + source = "github.com/aws-ia/terraform-aws-eks-blueprints-teams" + + name = "admin" + + # Enables elevated, admin privileges for this team + enable_admin = true + + users = [ + data.aws_caller_identity.current.arn, + "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:user/${var.iam_platform_user}", + "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${var.eks_admin_role_name}" + ] + + cluster_arn = module.eks.cluster_arn + + tags = local.tags +} + - application_teams = { +module "application_teams" { + source = "github.com/aws-ia/terraform-aws-eks-blueprints-teams" + name = "application_teams" + users = [data.aws_caller_identity.current.arn] + cluster_arn = module.eks.cluster_arn + oidc_provider_arn = module.eks.oidc_provider_arn + + namespaces = { team-burnham = { - "labels" = { + labels = { "elbv2.k8s.aws/pod-readiness-gate-inject" = "enabled", "appName" = "burnham-team-app", "projectName" = "project-burnham", @@ -252,7 +270,8 @@ module "eks_blueprints" { "billingCode" = "example", "branch" = "example" } - "quota" = { + + resource_quotas = { "requests.cpu" = "10000m", "requests.memory" = "20Gi", "limits.cpu" = "20000m", @@ -261,13 +280,13 @@ module "eks_blueprints" { "secrets" = "10", "services" = "10" } + ## Manifests Example: we can specify a directory with kubernetes manifests that can be automatically applied in the team-riker namespace. manifests_dir = "../kubernetes/team-burnham/" - users = [data.aws_caller_identity.current.arn] } team-riker = { - "labels" = { + labels = { "elbv2.k8s.aws/pod-readiness-gate-inject" = "enabled", "appName" = "riker-team-app", "projectName" = "project-riker", @@ -277,7 +296,7 @@ module "eks_blueprints" { "billingCode" = "example", "branch" = "example" } - "quota" = { + resource_quotas = { "requests.cpu" = "10000m", "requests.memory" = "20Gi", "limits.cpu" = "20000m", @@ -286,21 +305,20 @@ module "eks_blueprints" { "secrets" = "10", "services" = "10" } + ## Manifests Example: we can specify a directory with kubernetes manifests that can be automatically applied in the team-riker namespace. manifests_dir = "../kubernetes/team-riker/" - users = [data.aws_caller_identity.current.arn] } - ecsdemo-frontend = { - "labels" = { + labels = { "elbv2.k8s.aws/pod-readiness-gate-inject" = "enabled", "appName" = "ecsdemo-frontend-app", "projectName" = "ecsdemo-frontend", "environment" = "dev", } #don't use quotas here cause ecsdemo app does not have request/limits - "quota" = { + resource_quotas = { "requests.cpu" = "100", "requests.memory" = "20Gi", "limits.cpu" = "200", @@ -311,17 +329,18 @@ module "eks_blueprints" { } ## Manifests Example: we can specify a directory with kubernetes manifests that can be automatically applied in the team-riker namespace. manifests_dir = "../kubernetes/ecsdemo-frontend/" - users = [data.aws_caller_identity.current.arn] } + ecsdemo-nodejs = { - "labels" = { + labels = { "elbv2.k8s.aws/pod-readiness-gate-inject" = "enabled", "appName" = "ecsdemo-nodejs-app", "projectName" = "ecsdemo-nodejs", "environment" = "dev", } + #don't use quotas here cause ecsdemo app does not have request/limits - "quota" = { + resource_quotas = { "requests.cpu" = "10000m", "requests.memory" = "20Gi", "limits.cpu" = "20000m", @@ -330,19 +349,20 @@ module "eks_blueprints" { "secrets" = "10", "services" = "10" } + ## Manifests Example: we can specify a directory with kubernetes manifests that can be automatically applied in the team-riker namespace. manifests_dir = "../kubernetes/ecsdemo-nodejs" - users = [data.aws_caller_identity.current.arn] } ecsdemo-crystal = { - "labels" = { + labels = { "elbv2.k8s.aws/pod-readiness-gate-inject" = "enabled", "appName" = "ecsdemo-crystal-app", "projectName" = "ecsdemo-crystal", "environment" = "dev", } + #don't use quotas here cause ecsdemo app does not have request/limits - "quota" = { + resource_quotas = { "requests.cpu" = "10000m", "requests.memory" = "20Gi", "limits.cpu" = "20000m", @@ -351,9 +371,9 @@ module "eks_blueprints" { "secrets" = "10", "services" = "10" } + ## Manifests Example: we can specify a directory with kubernetes manifests that can be automatically applied in the team-riker namespace. manifests_dir = "../kubernetes/ecsdemo-crystal" - users = [data.aws_caller_identity.current.arn] } } @@ -367,7 +387,7 @@ module "kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - cluster_name = module.eks.cluster_name + cluster_name = module.eks_blueprints.eks_cluster_id cluster_endpoint = module.eks.cluster_endpoint cluster_version = module.eks.cluster_version cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url From 11434a3e85de1142ae419f4a387389b839d1298c Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Fri, 17 Mar 2023 18:45:40 -0700 Subject: [PATCH 09/31] fix precommit errors --- .../blue-green-upgrade/modules/eks_cluster/main.tf | 13 +++++-------- .../modules/eks_cluster/variables.tf | 2 +- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/examples/blue-green-upgrade/modules/eks_cluster/main.tf b/examples/blue-green-upgrade/modules/eks_cluster/main.tf index 40787a14c8..b93ea3918e 100644 --- a/examples/blue-green-upgrade/modules/eks_cluster/main.tf +++ b/examples/blue-green-upgrade/modules/eks_cluster/main.tf @@ -170,13 +170,6 @@ data "aws_partition" "current" {} # Find the user currently in use by AWS data "aws_caller_identity" "current" {} -data "aws_vpc" "vpc" { - filter { - name = "tag:${var.vpc_tag_key}" - values = [local.tag_val_vpc] - } -} - data "aws_subnets" "private" { filter { name = "tag:${var.vpc_tag_key}" @@ -203,7 +196,7 @@ module "eks" { version = "~> 19.10" cluster_name = local.name - cluster_version = "1.25" + cluster_version = var.cluster_version cluster_endpoint_public_access = true vpc_id = module.vpc.vpc_id @@ -231,6 +224,8 @@ module "eks" { } module "admin_team" { + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-teams" name = "admin" @@ -251,6 +246,8 @@ module "admin_team" { module "application_teams" { + # Users should pin the version to the latest available release + # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-teams" name = "application_teams" diff --git a/examples/blue-green-upgrade/modules/eks_cluster/variables.tf b/examples/blue-green-upgrade/modules/eks_cluster/variables.tf index fde9dcd037..0836f8559e 100644 --- a/examples/blue-green-upgrade/modules/eks_cluster/variables.tf +++ b/examples/blue-green-upgrade/modules/eks_cluster/variables.tf @@ -13,7 +13,7 @@ variable "suffix_stack_name" { variable "cluster_version" { description = "The Version of Kubernetes to deploy" type = string - default = "1.23" + default = "1.25" } variable "hosted_zone_name" { From 7cba485c9208271fe8a3046ac0748539caa41126 Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Mon, 20 Mar 2023 10:39:59 -0700 Subject: [PATCH 10/31] updates to blue-green example --- examples/blue-green-upgrade/eks-blue/outputs.tf | 5 ----- examples/blue-green-upgrade/eks-blue/variables.tf | 2 +- .../modules/eks_cluster/main.tf | 15 +++++++++++---- .../modules/eks_cluster/outputs.tf | 11 +++-------- 4 files changed, 15 insertions(+), 18 deletions(-) diff --git a/examples/blue-green-upgrade/eks-blue/outputs.tf b/examples/blue-green-upgrade/eks-blue/outputs.tf index 62df761b32..c0de1186c4 100644 --- a/examples/blue-green-upgrade/eks-blue/outputs.tf +++ b/examples/blue-green-upgrade/eks-blue/outputs.tf @@ -2,8 +2,3 @@ output "eks_cluster_id" { description = "The name of the EKS cluster." value = module.eks_cluster.eks_cluster_id } - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_cluster.configure_kubectl -} diff --git a/examples/blue-green-upgrade/eks-blue/variables.tf b/examples/blue-green-upgrade/eks-blue/variables.tf index a36064016c..50bf788115 100644 --- a/examples/blue-green-upgrade/eks-blue/variables.tf +++ b/examples/blue-green-upgrade/eks-blue/variables.tf @@ -13,7 +13,7 @@ variable "core_stack_name" { variable "hosted_zone_name" { type = string description = "Route53 domain for the cluster." - default = "" + default = "kuapoorv.people.aws.dev" } variable "eks_admin_role_name" { diff --git a/examples/blue-green-upgrade/modules/eks_cluster/main.tf b/examples/blue-green-upgrade/modules/eks_cluster/main.tf index b93ea3918e..4fb88bf85a 100644 --- a/examples/blue-green-upgrade/modules/eks_cluster/main.tf +++ b/examples/blue-green-upgrade/modules/eks_cluster/main.tf @@ -170,6 +170,13 @@ data "aws_partition" "current" {} # Find the user currently in use by AWS data "aws_caller_identity" "current" {} +data "aws_vpc" "vpc" { + filter { + name = "tag:${var.vpc_tag_key}" + values = [local.tag_val_vpc] + } +} + data "aws_subnets" "private" { filter { name = "tag:${var.vpc_tag_key}" @@ -196,11 +203,11 @@ module "eks" { version = "~> 19.10" cluster_name = local.name - cluster_version = var.cluster_version + cluster_version = local.cluster_version cluster_endpoint_public_access = true - vpc_id = module.vpc.vpc_id - subnet_ids = module.vpc.private_subnets + vpc_id = data.aws_vpc.vpc.id + subnet_ids = data.aws_subnets.private.ids eks_managed_node_groups = { mg_5 = { @@ -384,7 +391,7 @@ module "kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - cluster_name = module.eks_blueprints.eks_cluster_id + cluster_name = module.eks.cluster_name cluster_endpoint = module.eks.cluster_endpoint cluster_version = module.eks.cluster_version cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url diff --git a/examples/blue-green-upgrade/modules/eks_cluster/outputs.tf b/examples/blue-green-upgrade/modules/eks_cluster/outputs.tf index 04291c8456..24c4a5a6c7 100644 --- a/examples/blue-green-upgrade/modules/eks_cluster/outputs.tf +++ b/examples/blue-green-upgrade/modules/eks_cluster/outputs.tf @@ -1,19 +1,14 @@ output "eks_cluster_id" { description = "The name of the EKS cluster." - value = module.eks_blueprints.eks_cluster_id -} - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_blueprints.configure_kubectl + value = module.eks.cluster_name } output "eks_cluster_endpoint" { description = "The endpoint of the EKS cluster." - value = module.eks_blueprints.eks_cluster_endpoint + value = module.eks.cluster_endpoint } output "eks_cluster_certificate_authority_data" { description = "eks_cluster_certificate_authority_data" - value = module.eks_blueprints.eks_cluster_certificate_authority_data + value = module.eks.cluster_certificate_authority_data } From 706a1789b30195e75ce62896a552310c78dd402a Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Mon, 20 Mar 2023 11:06:01 -0700 Subject: [PATCH 11/31] remove route53 hosted zone --- examples/blue-green-upgrade/eks-blue/variables.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/blue-green-upgrade/eks-blue/variables.tf b/examples/blue-green-upgrade/eks-blue/variables.tf index 50bf788115..a36064016c 100644 --- a/examples/blue-green-upgrade/eks-blue/variables.tf +++ b/examples/blue-green-upgrade/eks-blue/variables.tf @@ -13,7 +13,7 @@ variable "core_stack_name" { variable "hosted_zone_name" { type = string description = "Route53 domain for the cluster." - default = "kuapoorv.people.aws.dev" + default = "" } variable "eks_admin_role_name" { From 27225f3905c8e47838905745a932219455fec0a5 Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Mon, 20 Mar 2023 12:01:56 -0700 Subject: [PATCH 12/31] fix pre-commit errors --- examples/blue-green-upgrade/eks-green/outputs.tf | 5 ----- examples/blue-green-upgrade/modules/eks_cluster/main.tf | 1 + 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/examples/blue-green-upgrade/eks-green/outputs.tf b/examples/blue-green-upgrade/eks-green/outputs.tf index 62df761b32..c0de1186c4 100644 --- a/examples/blue-green-upgrade/eks-green/outputs.tf +++ b/examples/blue-green-upgrade/eks-green/outputs.tf @@ -2,8 +2,3 @@ output "eks_cluster_id" { description = "The name of the EKS cluster." value = module.eks_cluster.eks_cluster_id } - -output "configure_kubectl" { - description = "Configure kubectl: make sure you're logged in with the correct AWS profile and run the following command to update your kubeconfig" - value = module.eks_cluster.configure_kubectl -} diff --git a/examples/blue-green-upgrade/modules/eks_cluster/main.tf b/examples/blue-green-upgrade/modules/eks_cluster/main.tf index 4fb88bf85a..6ae24ffe02 100644 --- a/examples/blue-green-upgrade/modules/eks_cluster/main.tf +++ b/examples/blue-green-upgrade/modules/eks_cluster/main.tf @@ -198,6 +198,7 @@ data "aws_secretsmanager_secret_version" "admin_password_version" { secret_id = data.aws_secretsmanager_secret.argocd.id } +#tfsec:ignore:aws-eks-enable-control-plane-logging module "eks" { source = "terraform-aws-modules/eks/aws" version = "~> 19.10" From cb60dfc9ca0c68592b734198b338c0aee613957c Mon Sep 17 00:00:00 2001 From: Rodrigo Bersa Date: Wed, 22 Mar 2023 21:54:04 -0400 Subject: [PATCH 13/31] Fixing `oidc_provider` argunment on `examples` directory. --- examples/agones-game-controller/main.tf | 10 +++++----- examples/amp-amg-opensearch/main.tf | 10 +++++----- examples/appmesh-mtls/main.tf | 10 +++++----- examples/argocd/main.tf | 10 +++++----- examples/blue-green-upgrade/README.md | 8 +++++--- .../blue-green-upgrade/modules/eks_cluster/main.tf | 4 ++-- examples/external-secrets/main.tf | 10 +++++----- examples/fargate-serverless/main.tf | 10 +++++----- examples/karpenter/main.tf | 10 +++++----- examples/stateful/main.tf | 10 +++++----- examples/tls-with-aws-pca-issuer/main.tf | 10 +++++----- 11 files changed, 52 insertions(+), 50 deletions(-) diff --git a/examples/agones-game-controller/main.tf b/examples/agones-game-controller/main.tf index 8a2fa48d5a..aaf4eff368 100644 --- a/examples/agones-game-controller/main.tf +++ b/examples/agones-game-controller/main.tf @@ -90,11 +90,11 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - cluster_name = module.eks.cluster_name - cluster_endpoint = module.eks.cluster_endpoint - cluster_version = module.eks.cluster_version - cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url - oidc_provider_arn = module.eks.oidc_provider_arn + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + oidc_provider = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn #EKS Add-Ons eks_addons = { diff --git a/examples/amp-amg-opensearch/main.tf b/examples/amp-amg-opensearch/main.tf index b7bbdced37..8f7f330cac 100644 --- a/examples/amp-amg-opensearch/main.tf +++ b/examples/amp-amg-opensearch/main.tf @@ -87,11 +87,11 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - cluster_name = module.eks.cluster_name - cluster_endpoint = module.eks.cluster_endpoint - cluster_version = module.eks.cluster_version - cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url - oidc_provider_arn = module.eks.oidc_provider_arn + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + oidc_provider = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn # EKS Add-ons eks_addons = { diff --git a/examples/appmesh-mtls/main.tf b/examples/appmesh-mtls/main.tf index 8d3f652ced..2b3b034850 100644 --- a/examples/appmesh-mtls/main.tf +++ b/examples/appmesh-mtls/main.tf @@ -218,11 +218,11 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - cluster_name = module.eks.cluster_name - cluster_endpoint = module.eks.cluster_endpoint - cluster_version = module.eks.cluster_version - cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url - oidc_provider_arn = module.eks.oidc_provider_arn + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + oidc_provider = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn eks_addons = { coredns = {} diff --git a/examples/argocd/main.tf b/examples/argocd/main.tf index 7fd160a85a..0381220b06 100644 --- a/examples/argocd/main.tf +++ b/examples/argocd/main.tf @@ -85,11 +85,11 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - cluster_name = module.eks.cluster_name - cluster_endpoint = module.eks.cluster_endpoint - cluster_version = module.eks.cluster_version - cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url - oidc_provider_arn = module.eks.oidc_provider_arn + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + oidc_provider = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn eks_addons = { aws-ebs-csi-driver = { diff --git a/examples/blue-green-upgrade/README.md b/examples/blue-green-upgrade/README.md index 3a46c62ae0..77761164fd 100644 --- a/examples/blue-green-upgrade/README.md +++ b/examples/blue-green-upgrade/README.md @@ -77,13 +77,15 @@ Our objective here is to show you how Application teams and Platform teams can c ```bash git clone https://github.com/aws-ia/terraform-aws-eks-blueprints.git -cd examples/upgrade/blue-green-route53 +cd examples/blue-green-upgrade/ ``` -2. Copy the `terraform.tfvars.example` to `terraform.tfvars` and change region, hosted_zone_name, eks_admin_role_name according to your needs. +2. Copy the `terraform.tfvars.example` to `terraform.tfvars` on each `core-infra`, `eks-blue` and `eks-green` folders, and change region, hosted_zone_name, eks_admin_role_name according to your needs. ```shell -cp terraform.tfvars.example terraform.tfvars +cp terraform.tfvars.example core-infra/terraform.tfvars +cp terraform.tfvars.example eks-blue/terraform.tfvars +cp terraform.tfvars.example eks-green/terraform.tfvars ``` - You will need to provide the `hosted_zone_name` for example `my-example.com`. Terraform will create a new hosted zone for the project with name: `${core_stack_name}.${hosted_zone_name}` so in our example `eks-blueprint.my-example.com`. diff --git a/examples/blue-green-upgrade/modules/eks_cluster/main.tf b/examples/blue-green-upgrade/modules/eks_cluster/main.tf index 6ae24ffe02..f367f84c19 100644 --- a/examples/blue-green-upgrade/modules/eks_cluster/main.tf +++ b/examples/blue-green-upgrade/modules/eks_cluster/main.tf @@ -395,7 +395,7 @@ module "kubernetes_addons" { cluster_name = module.eks.cluster_name cluster_endpoint = module.eks.cluster_endpoint cluster_version = module.eks.cluster_version - cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url + oidc_provider = module.eks.cluster_oidc_issuer_url oidc_provider_arn = module.eks.oidc_provider_arn #--------------------------------------------------------------- @@ -454,7 +454,7 @@ module "kubernetes_addons" { } enable_karpenter = true enable_aws_for_fluentbit = true - enable_aws_cloudwatch_metrics = true + #enable_aws_cloudwatch_metrics = true #to view the result : terraform state show 'module.kubernetes_addons.module.external_dns[0].module.helm_addon.helm_release.addon[0]' enable_external_dns = true diff --git a/examples/external-secrets/main.tf b/examples/external-secrets/main.tf index 494bddc540..0dab27612a 100644 --- a/examples/external-secrets/main.tf +++ b/examples/external-secrets/main.tf @@ -103,11 +103,11 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - cluster_name = module.eks.cluster_name - cluster_endpoint = module.eks.cluster_endpoint - cluster_version = module.eks.cluster_version - cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url - oidc_provider_arn = module.eks.oidc_provider_arn + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + oidc_provider = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn # EKS Add-ons eks_addons = { diff --git a/examples/fargate-serverless/main.tf b/examples/fargate-serverless/main.tf index 38d73478c8..7b03542341 100644 --- a/examples/fargate-serverless/main.tf +++ b/examples/fargate-serverless/main.tf @@ -89,11 +89,11 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - cluster_name = module.eks.cluster_name - cluster_endpoint = module.eks.cluster_endpoint - cluster_version = module.eks.cluster_version - cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url - oidc_provider_arn = module.eks.oidc_provider_arn + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + oidc_provider = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn # EKS Add-ons eks_addons = { diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf index 30e54f79b1..28113110ac 100644 --- a/examples/karpenter/main.tf +++ b/examples/karpenter/main.tf @@ -131,11 +131,11 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - cluster_name = module.eks.cluster_name - cluster_endpoint = module.eks.cluster_endpoint - cluster_version = module.eks.cluster_version - cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url - oidc_provider_arn = module.eks.oidc_provider_arn + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + oidc_provider = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn eks_addons = { aws-ebs-csi-driver = { diff --git a/examples/stateful/main.tf b/examples/stateful/main.tf index a1a35b25ad..d72c74299c 100644 --- a/examples/stateful/main.tf +++ b/examples/stateful/main.tf @@ -196,11 +196,11 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - cluster_name = module.eks.cluster_name - cluster_endpoint = module.eks.cluster_endpoint - cluster_version = module.eks.cluster_version - cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url - oidc_provider_arn = module.eks.oidc_provider_arn + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + oidc_provider = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn eks_addons = { aws-ebs-csi-driver = { diff --git a/examples/tls-with-aws-pca-issuer/main.tf b/examples/tls-with-aws-pca-issuer/main.tf index d0eb08f652..5df80fe87f 100644 --- a/examples/tls-with-aws-pca-issuer/main.tf +++ b/examples/tls-with-aws-pca-issuer/main.tf @@ -96,11 +96,11 @@ module "eks_blueprints_kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - cluster_name = module.eks.cluster_name - cluster_endpoint = module.eks.cluster_endpoint - cluster_version = module.eks.cluster_version - cluster_oidc_issuer_url = module.eks.cluster_oidc_issuer_url - oidc_provider_arn = module.eks.oidc_provider_arn + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + oidc_provider = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn # EKS Add-on eks_addons = { From cc733696370adee08eb0fd6604add4330fbc182b Mon Sep 17 00:00:00 2001 From: Apoorva Kulkarni Date: Thu, 23 Mar 2023 05:55:39 -0700 Subject: [PATCH 14/31] fix pre-commit errors --- .../blue-green-upgrade/modules/eks_cluster/main.tf | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/examples/blue-green-upgrade/modules/eks_cluster/main.tf b/examples/blue-green-upgrade/modules/eks_cluster/main.tf index f367f84c19..08f7d33b52 100644 --- a/examples/blue-green-upgrade/modules/eks_cluster/main.tf +++ b/examples/blue-green-upgrade/modules/eks_cluster/main.tf @@ -392,11 +392,11 @@ module "kubernetes_addons" { # tflint-ignore: terraform_module_pinned_source source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" - cluster_name = module.eks.cluster_name - cluster_endpoint = module.eks.cluster_endpoint - cluster_version = module.eks.cluster_version - oidc_provider = module.eks.cluster_oidc_issuer_url - oidc_provider_arn = module.eks.oidc_provider_arn + cluster_name = module.eks.cluster_name + cluster_endpoint = module.eks.cluster_endpoint + cluster_version = module.eks.cluster_version + oidc_provider = module.eks.cluster_oidc_issuer_url + oidc_provider_arn = module.eks.oidc_provider_arn #--------------------------------------------------------------- # ARGO CD ADD-ON @@ -452,8 +452,8 @@ module "kubernetes_addons" { aws_load_balancer_controller_helm_config = { service_account = "aws-lb-sa" } - enable_karpenter = true - enable_aws_for_fluentbit = true + enable_karpenter = true + enable_aws_for_fluentbit = true #enable_aws_cloudwatch_metrics = true #to view the result : terraform state show 'module.kubernetes_addons.module.external_dns[0].module.helm_addon.helm_release.addon[0]' From 07ec0d6e8e77614d6825dba6251ed8bb80bae2b3 Mon Sep 17 00:00:00 2001 From: Rodrigo Bersa Date: Fri, 24 Mar 2023 20:10:06 -0400 Subject: [PATCH 15/31] Adjusting `enable_efs_csi_driver` parameter on `examples/stateful`. --- examples/stateful/main.tf | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/stateful/main.tf b/examples/stateful/main.tf index d72c74299c..6c77dcfdf9 100644 --- a/examples/stateful/main.tf +++ b/examples/stateful/main.tf @@ -194,7 +194,8 @@ module "eks" { module "eks_blueprints_kubernetes_addons" { # Users should pin the version to the latest available release # tflint-ignore: terraform_module_pinned_source - source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons" + #source = "github.com/aws-ia/terraform-aws-eks-blueprints-addons?ref=2023-03-24-addons-refinement" + source = "../../../../terraform-aws-eks-blueprints-addons" cluster_name = module.eks.cluster_name cluster_endpoint = module.eks.cluster_endpoint @@ -218,7 +219,7 @@ module "eks_blueprints_kubernetes_addons" { enable_velero = true velero_backup_s3_bucket = module.velero_backup_s3_bucket.s3_bucket_id - enable_aws_efs_csi_driver = true + enable_efs_csi_driver = true tags = local.tags } From c9d5bfbe042a47f2d09cd6512aaf7c8b3e6fadcb Mon Sep 17 00:00:00 2001 From: Bryant Biggs Date: Wed, 3 May 2023 14:24:07 -0400 Subject: [PATCH 16/31] chore: Validate `karpenter` addon using the updated `karpenter` example (#1574) --- docs/add-ons/karpenter.md | 54 - examples/do-not-use/README.md | 5 + examples/do-not-use/docs/karpenter.md | 99 + examples/do-not-use/main.tf | 3038 +++++++++++++++++++++++++ examples/do-not-use/outputs.tf | 141 ++ examples/do-not-use/variables.tf | 525 +++++ examples/do-not-use/versions.tf | 18 + examples/karpenter/README.md | 64 +- examples/karpenter/main.tf | 86 +- examples/karpenter/versions.tf | 2 +- 10 files changed, 3849 insertions(+), 183 deletions(-) delete mode 100644 docs/add-ons/karpenter.md create mode 100644 examples/do-not-use/README.md create mode 100644 examples/do-not-use/docs/karpenter.md create mode 100644 examples/do-not-use/main.tf create mode 100644 examples/do-not-use/outputs.tf create mode 100644 examples/do-not-use/variables.tf create mode 100644 examples/do-not-use/versions.tf diff --git a/docs/add-ons/karpenter.md b/docs/add-ons/karpenter.md deleted file mode 100644 index 4ec6777e1d..0000000000 --- a/docs/add-ons/karpenter.md +++ /dev/null @@ -1,54 +0,0 @@ -# Karpenter - -Karpenter is an open-source node provisioning project built for Kubernetes. Karpenter automatically launches just the right compute resources to handle your cluster's applications. It is designed to let you take full advantage of the cloud with fast and simple compute provisioning for Kubernetes clusters. - -For complete project documentation, please visit the [Karpenter documentation](https://karpenter.sh/docs/getting-started/). - -## Usage - -Karpenter can be deployed by enabling the add-on via the following. Check out the full [example](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/modules/kubernetes-addons/karpenter/locals.tf) to deploy the EKS Cluster with Karpenter. - -```hcl -enable_karpenter = true -``` - -You can optionally customize the Helm chart that deploys `Karpenter` via the following configuration. - -```hcl - enable_karpenter = true - # Queue optional for native handling of instance termination events - karpenter_sqs_queue_arn = "arn:aws:sqs:us-west-2:444455556666:queue1" - # Optional to add name prefix for Karpenter's event bridge rules - karpenter_event_rule_name_prefix = "Karpenter" - # Optional karpenter_helm_config - karpenter_helm_config = { - name = "karpenter" - chart = "karpenter" - repository = "https://charts.karpenter.sh" - version = "0.19.3" - namespace = "karpenter" - values = [templatefile("${path.module}/values.yaml", { - eks_cluster_id = var.eks_cluster_id, - eks_cluster_endpoint = var.eks_cluster_endpoint, - service_account = var.service_account, - operating_system = "linux" - })] - } - - karpenter_irsa_policies = [] # Optional to add additional policies to IRSA -``` - -### GitOps Configuration -The following properties are made available for use when managing the add-on via GitOps. - -Refer to [locals.tf](https://github.com/aws-ia/terraform-aws-eks-blueprints/blob/main/modules/kubernetes-addons/karpenter/locals.tf) for latest config. GitOps with ArgoCD Add-on repo is located [here](https://github.com/aws-samples/eks-blueprints-add-ons/blob/main/chart/values.yaml) - -```hcl - argocd_gitops_config = { - enable = true - serviceAccountName = local.service_account - controllerClusterName = var.eks_cluster_id - controllerClusterEndpoint = local.eks_cluster_endpoint - awsDefaultInstanceProfile = var.node_iam_instance_profile - } -``` diff --git a/examples/do-not-use/README.md b/examples/do-not-use/README.md new file mode 100644 index 0000000000..18d28c3ec3 --- /dev/null +++ b/examples/do-not-use/README.md @@ -0,0 +1,5 @@ +# DO NOT USE + +This is a local copy of https://github.com/aws-ia/terraform-aws-eks-blueprints-addons to speed up testing and validation of both the examples provided here and the addons defined in the addons repository. + +Once all addons and examples have been validated, we will sync the changes from here back up to https://github.com/aws-ia/terraform-aws-eks-blueprints-addons diff --git a/examples/do-not-use/docs/karpenter.md b/examples/do-not-use/docs/karpenter.md new file mode 100644 index 0000000000..61fb291bb1 --- /dev/null +++ b/examples/do-not-use/docs/karpenter.md @@ -0,0 +1,99 @@ +# Karpenter + +## Prerequisites + +If deploying a node template that uses `spot`, please ensure you have the Spot service linked role available in your account. You can run the following command to ensure this role is available: + +```sh +aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true +``` + +## Validate + +The following command will update the `kubeconfig` on your local machine and allow you to interact with your EKS Cluster using `kubectl` to validate the CoreDNS deployment for Fargate. + +1. Run `update-kubeconfig` command: + +```sh +aws eks --region update-kubeconfig --name +``` + +2. Test by listing all the pods running currently + +```sh +kubectl get pods -n karpenter + +# Output should look similar to below +NAME READY STATUS RESTARTS AGE +karpenter-6f97df4f77-5nqsk 1/1 Running 0 3m28s +karpenter-6f97df4f77-n7fkf 1/1 Running 0 3m28s +``` + +3. View the current nodes - this example utilizes EKS Fargate for hosting the Karpenter controller so only Fargate nodes are present currently: + +```sh +kubectl get nodes + +# Output should look similar to below +NAME STATUS ROLES AGE VERSION +fargate-ip-10-0-29-25.us-west-2.compute.internal Ready 2m56s v1.26.3-eks-f4dc2c0 +fargate-ip-10-0-36-148.us-west-2.compute.internal Ready 2m57s v1.26.3-eks-f4dc2c0 +fargate-ip-10-0-42-30.us-west-2.compute.internal Ready 2m34s v1.26.3-eks-f4dc2c0 +fargate-ip-10-0-45-112.us-west-2.compute.internal Ready 2m33s v1.26.3-eks-f4dc2c0 +``` + +4. Create a sample `pause` deployment to demonstrate scaling: + +```sh +kubectl apply -f - < 5m15s v1.26.3-eks-f4dc2c0 +fargate-ip-10-0-36-148.us-west-2.compute.internal Ready 5m16s v1.26.3-eks-f4dc2c0 +fargate-ip-10-0-42-30.us-west-2.compute.internal Ready 4m53s v1.26.3-eks-f4dc2c0 +fargate-ip-10-0-45-112.us-west-2.compute.internal Ready 4m52s v1.26.3-eks-f4dc2c0 +ip-10-0-1-184.us-west-2.compute.internal Ready 26s v1.26.2-eks-a59e1f0 # <= new EC2 node launched +``` + +7. Remove the sample `pause` deployment: + +```sh +kubectl delete deployment inflate +``` diff --git a/examples/do-not-use/main.tf b/examples/do-not-use/main.tf new file mode 100644 index 0000000000..2691af9f78 --- /dev/null +++ b/examples/do-not-use/main.tf @@ -0,0 +1,3038 @@ +data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} +data "aws_region" "current" {} + +# This resource is used to provide a means of mapping an implicit dependency +# between the cluster and the addons. +resource "time_sleep" "this" { + create_duration = var.create_delay_duration + + triggers = { + cluster_endpoint = var.cluster_endpoint + cluster_name = var.cluster_name + custom = join(",", var.create_delay_dependencies) + oidc_provider_arn = var.oidc_provider_arn + } +} + +locals { + account_id = data.aws_caller_identity.current.account_id + dns_suffix = data.aws_partition.current.dns_suffix + partition = data.aws_partition.current.partition + region = data.aws_region.current.name + + # Threads the sleep resource into the module to make the dependency + cluster_endpoint = time_sleep.this.triggers["cluster_endpoint"] + cluster_name = time_sleep.this.triggers["cluster_name"] + oidc_provider_arn = time_sleep.this.triggers["oidc_provider_arn"] + + iam_role_policy_prefix = "arn:${local.partition}:iam::aws:policy" + + # Used by Karpenter & AWS Node Termination Handler + ec2_events = { + health_event = { + name = "HealthEvent" + description = "AWS health event" + event_pattern = { + source = ["aws.health"] + detail-type = ["AWS Health Event"] + } + } + spot_interupt = { + name = "SpotInterrupt" + description = "EC2 spot instance interruption warning" + event_pattern = { + source = ["aws.ec2"] + detail-type = ["EC2 Spot Instance Interruption Warning"] + } + } + instance_rebalance = { + name = "InstanceRebalance" + description = "EC2 instance rebalance recommendation" + event_pattern = { + source = ["aws.ec2"] + detail-type = ["EC2 Instance Rebalance Recommendation"] + } + } + instance_state_change = { + name = "InstanceStateChange" + description = "EC2 instance state-change notification" + event_pattern = { + source = ["aws.ec2"] + detail-type = ["EC2 Instance State-change Notification"] + } + } + } +} + +################################################################################ +# Argo Rollouts +################################################################################ + +module "argo_rollouts" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_argo_rollouts + + # https://github.com/argoproj/argo-helm/tree/main/charts/argo-rollouts + name = try(var.argo_rollouts.name, "argo-rollouts") + description = try(var.argo_rollouts.description, "A Helm chart for Argo Rollouts") + namespace = try(var.argo_rollouts.namespace, "argo-rollouts") + create_namespace = try(var.argo_rollouts.create_namespace, true) + chart = "argo-rollouts" + chart_version = try(var.argo_rollouts.chart_version, "2.22.3") + repository = try(var.argo_rollouts.repository, "https://argoproj.github.io/argo-helm") + values = try(var.argo_rollouts.values, []) + + timeout = try(var.argo_rollouts.timeout, null) + repository_key_file = try(var.argo_rollouts.repository_key_file, null) + repository_cert_file = try(var.argo_rollouts.repository_cert_file, null) + repository_ca_file = try(var.argo_rollouts.repository_ca_file, null) + repository_username = try(var.argo_rollouts.repository_username, null) + repository_password = try(var.argo_rollouts.repository_password, null) + devel = try(var.argo_rollouts.devel, null) + verify = try(var.argo_rollouts.verify, null) + keyring = try(var.argo_rollouts.keyring, null) + disable_webhooks = try(var.argo_rollouts.disable_webhooks, null) + reuse_values = try(var.argo_rollouts.reuse_values, null) + reset_values = try(var.argo_rollouts.reset_values, null) + force_update = try(var.argo_rollouts.force_update, null) + recreate_pods = try(var.argo_rollouts.recreate_pods, null) + cleanup_on_fail = try(var.argo_rollouts.cleanup_on_fail, null) + max_history = try(var.argo_rollouts.max_history, null) + atomic = try(var.argo_rollouts.atomic, null) + skip_crds = try(var.argo_rollouts.skip_crds, null) + render_subchart_notes = try(var.argo_rollouts.render_subchart_notes, null) + disable_openapi_validation = try(var.argo_rollouts.disable_openapi_validation, null) + wait = try(var.argo_rollouts.wait, null) + wait_for_jobs = try(var.argo_rollouts.wait_for_jobs, null) + dependency_update = try(var.argo_rollouts.dependency_update, null) + replace = try(var.argo_rollouts.replace, null) + lint = try(var.argo_rollouts.lint, null) + + postrender = try(var.argo_rollouts.postrender, []) + set = try(var.argo_rollouts.set, []) + set_sensitive = try(var.argo_rollouts.set_sensitive, []) + + tags = var.tags +} + +################################################################################ +# Argo Workflows +################################################################################ + +module "argo_workflows" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_argo_workflows + + # https://github.com/argoproj/argo-helm/tree/main/charts/argo-workflows + name = try(var.argo_workflows.name, "argo-workflows") + description = try(var.argo_workflows.description, "A Helm chart for Argo Workflows") + namespace = try(var.argo_workflows.namespace, "argo-workflows") + create_namespace = try(var.argo_workflows.create_namespace, true) + chart = "argo-workflows" + chart_version = try(var.argo_workflows.chart_version, "2.22.13") + repository = try(var.argo_workflows.repository, "https://argoproj.github.io/argo-helm") + values = try(var.argo_workflows.values, []) + + timeout = try(var.argo_workflows.timeout, null) + repository_key_file = try(var.argo_workflows.repository_key_file, null) + repository_cert_file = try(var.argo_workflows.repository_cert_file, null) + repository_ca_file = try(var.argo_workflows.repository_ca_file, null) + repository_username = try(var.argo_workflows.repository_username, null) + repository_password = try(var.argo_workflows.repository_password, null) + devel = try(var.argo_workflows.devel, null) + verify = try(var.argo_workflows.verify, null) + keyring = try(var.argo_workflows.keyring, null) + disable_webhooks = try(var.argo_workflows.disable_webhooks, null) + reuse_values = try(var.argo_workflows.reuse_values, null) + reset_values = try(var.argo_workflows.reset_values, null) + force_update = try(var.argo_workflows.force_update, null) + recreate_pods = try(var.argo_workflows.recreate_pods, null) + cleanup_on_fail = try(var.argo_workflows.cleanup_on_fail, null) + max_history = try(var.argo_workflows.max_history, null) + atomic = try(var.argo_workflows.atomic, null) + skip_crds = try(var.argo_workflows.skip_crds, null) + render_subchart_notes = try(var.argo_workflows.render_subchart_notes, null) + disable_openapi_validation = try(var.argo_workflows.disable_openapi_validation, null) + wait = try(var.argo_workflows.wait, null) + wait_for_jobs = try(var.argo_workflows.wait_for_jobs, null) + dependency_update = try(var.argo_workflows.dependency_update, null) + replace = try(var.argo_workflows.replace, null) + lint = try(var.argo_workflows.lint, null) + + postrender = try(var.argo_workflows.postrender, []) + set = try(var.argo_workflows.set, []) + set_sensitive = try(var.argo_workflows.set_sensitive, []) + + tags = var.tags +} + +################################################################################ +# ArgoCD +################################################################################ + +module "argocd" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_argocd + + # https://github.com/argoproj/argo-helm/blob/main/charts/argo-cd/Chart.yaml + # (there is no offical helm chart for argocd) + name = try(var.argocd.name, "argo-cd") + description = try(var.argocd.description, "A Helm chart to install the ArgoCD") + namespace = try(var.argocd.namespace, "argocd") + create_namespace = try(var.argocd.create_namespace, true) + chart = "argo-cd" + chart_version = try(var.argocd.chart_version, "5.29.1") + repository = try(var.argocd.repository, "https://argoproj.github.io/argo-helm") + values = try(var.argocd.values, []) + + timeout = try(var.argocd.timeout, null) + repository_key_file = try(var.argocd.repository_key_file, null) + repository_cert_file = try(var.argocd.repository_cert_file, null) + repository_ca_file = try(var.argocd.repository_ca_file, null) + repository_username = try(var.argocd.repository_username, null) + repository_password = try(var.argocd.repository_password, null) + devel = try(var.argocd.devel, null) + verify = try(var.argocd.verify, null) + keyring = try(var.argocd.keyring, null) + disable_webhooks = try(var.argocd.disable_webhooks, null) + reuse_values = try(var.argocd.reuse_values, null) + reset_values = try(var.argocd.reset_values, null) + force_update = try(var.argocd.force_update, null) + recreate_pods = try(var.argocd.recreate_pods, null) + cleanup_on_fail = try(var.argocd.cleanup_on_fail, null) + max_history = try(var.argocd.max_history, null) + atomic = try(var.argocd.atomic, null) + skip_crds = try(var.argocd.skip_crds, null) + render_subchart_notes = try(var.argocd.render_subchart_notes, null) + disable_openapi_validation = try(var.argocd.disable_openapi_validation, null) + wait = try(var.argocd.wait, null) + wait_for_jobs = try(var.argocd.wait_for_jobs, null) + dependency_update = try(var.argocd.dependency_update, null) + replace = try(var.argocd.replace, null) + lint = try(var.argocd.lint, null) + + postrender = try(var.argocd.postrender, []) + set = try(var.argocd.set, []) + set_sensitive = try(var.argocd.set_sensitive, []) + + tags = var.tags +} + +################################################################################ +# AWS Cloudwatch Metrics +################################################################################ + +locals { + aws_cloudwatch_metrics_service_account = try(var.aws_cloudwatch_metrics.service_account_name, "aws-cloudwatch-metrics") +} + +module "aws_cloudwatch_metrics" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_aws_cloudwatch_metrics + + # https://github.com/aws/eks-charts/tree/master/stable/aws-cloudwatch-metrics + name = try(var.aws_cloudwatch_metrics.name, "aws-cloudwatch-metrics") + description = try(var.aws_cloudwatch_metrics.description, "A Helm chart to deploy aws-cloudwatch-metrics project") + namespace = try(var.aws_cloudwatch_metrics.namespace, "amazon-cloudwatch") + create_namespace = try(var.aws_cloudwatch_metrics.create_namespace, true) + chart = "aws-cloudwatch-metrics" + chart_version = try(var.aws_cloudwatch_metrics.chart_version, "0.0.8") + repository = try(var.aws_cloudwatch_metrics.repository, "https://aws.github.io/eks-charts") + values = try(var.aws_cloudwatch_metrics.values, []) + + timeout = try(var.aws_cloudwatch_metrics.timeout, null) + repository_key_file = try(var.aws_cloudwatch_metrics.repository_key_file, null) + repository_cert_file = try(var.aws_cloudwatch_metrics.repository_cert_file, null) + repository_ca_file = try(var.aws_cloudwatch_metrics.repository_ca_file, null) + repository_username = try(var.aws_cloudwatch_metrics.repository_username, null) + repository_password = try(var.aws_cloudwatch_metrics.repository_password, null) + devel = try(var.aws_cloudwatch_metrics.devel, null) + verify = try(var.aws_cloudwatch_metrics.verify, null) + keyring = try(var.aws_cloudwatch_metrics.keyring, null) + disable_webhooks = try(var.aws_cloudwatch_metrics.disable_webhooks, null) + reuse_values = try(var.aws_cloudwatch_metrics.reuse_values, null) + reset_values = try(var.aws_cloudwatch_metrics.reset_values, null) + force_update = try(var.aws_cloudwatch_metrics.force_update, null) + recreate_pods = try(var.aws_cloudwatch_metrics.recreate_pods, null) + cleanup_on_fail = try(var.aws_cloudwatch_metrics.cleanup_on_fail, null) + max_history = try(var.aws_cloudwatch_metrics.max_history, null) + atomic = try(var.aws_cloudwatch_metrics.atomic, null) + skip_crds = try(var.aws_cloudwatch_metrics.skip_crds, null) + render_subchart_notes = try(var.aws_cloudwatch_metrics.render_subchart_notes, null) + disable_openapi_validation = try(var.aws_cloudwatch_metrics.disable_openapi_validation, null) + wait = try(var.aws_cloudwatch_metrics.wait, null) + wait_for_jobs = try(var.aws_cloudwatch_metrics.wait_for_jobs, null) + dependency_update = try(var.aws_cloudwatch_metrics.dependency_update, null) + replace = try(var.aws_cloudwatch_metrics.replace, null) + lint = try(var.aws_cloudwatch_metrics.lint, null) + + postrender = try(var.aws_cloudwatch_metrics.postrender, []) + set = concat( + [ + { + name = "clusterName" + value = local.cluster_name + }, + { + name = "serviceAccount.name" + value = local.aws_cloudwatch_metrics_service_account + } + ], + try(var.aws_cloudwatch_metrics.set, []) + ) + set_sensitive = try(var.aws_cloudwatch_metrics.set_sensitive, []) + + # IAM role for service account (IRSA) + set_irsa_names = ["serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"] + create_role = try(var.aws_cloudwatch_metrics.create_role, true) + role_name = try(var.aws_cloudwatch_metrics.role_name, "aws-cloudwatch-metrics") + role_name_use_prefix = try(var.aws_cloudwatch_metrics.role_name_use_prefix, true) + role_path = try(var.aws_cloudwatch_metrics.role_path, "/") + role_permissions_boundary_arn = try(var.aws_cloudwatch_metrics.role_permissions_boundary_arn, null) + role_description = try(var.aws_cloudwatch_metrics.role_description, "IRSA for aws-cloudwatch-metrics project") + role_policies = lookup(var.aws_cloudwatch_metrics, "role_policies", + { CloudWatchAgentServerPolicy = "arn:${local.partition}:iam::aws:policy/CloudWatchAgentServerPolicy" } + ) + create_policy = try(var.aws_cloudwatch_metrics.create_policy, false) + + oidc_providers = { + this = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.aws_cloudwatch_metrics_service_account + } + } + + tags = var.tags +} + +################################################################################ +# AWS EFS CSI DRIVER +################################################################################ + +locals { + aws_efs_csi_driver_controller_service_account = try(var.aws_efs_csi_driver.controller_service_account_name, "efs-csi-controller-sa") + aws_efs_csi_driver_node_service_account = try(var.aws_efs_csi_driver.node_service_account_name, "efs-csi-node-sa") + efs_arns = lookup(var.aws_efs_csi_driver, "efs_arns", + ["arn:${local.partition}:elasticfilesystem:${local.region}:${local.account_id}:file-system/*"], + ) + efs_access_point_arns = lookup(var.aws_efs_csi_driver, "efs_access_point_arns", + ["arn:${local.partition}:elasticfilesystem:${local.region}:${local.account_id}:access-point/*"] + ) +} + +data "aws_iam_policy_document" "aws_efs_csi_driver" { + count = var.enable_aws_efs_csi_driver ? 1 : 0 + + statement { + sid = "AllowDescribeAvailabilityZones" + actions = ["ec2:DescribeAvailabilityZones"] + resources = ["*"] + } + + statement { + sid = "AllowDescribeFileSystems" + actions = [ + "elasticfilesystem:DescribeAccessPoints", + "elasticfilesystem:DescribeFileSystems", + "elasticfilesystem:DescribeMountTargets" + ] + resources = flatten([ + local.efs_arns, + local.efs_access_point_arns, + ]) + } + + statement { + sid = "AllowCreateAccessPoint" + actions = ["elasticfilesystem:CreateAccessPoint"] + resources = local.efs_arns + + condition { + test = "StringLike" + variable = "aws:RequestTag/efs.csi.aws.com/cluster" + values = ["true"] + } + } + + statement { + sid = "AllowDeleteAccessPoint" + actions = ["elasticfilesystem:DeleteAccessPoint"] + resources = local.efs_access_point_arns + + condition { + test = "StringLike" + variable = "aws:ResourceTag/efs.csi.aws.com/cluster" + values = ["true"] + } + } + + statement { + sid = "ClientReadWrite" + actions = [ + "elasticfilesystem:ClientRootAccess", + "elasticfilesystem:ClientWrite", + "elasticfilesystem:ClientMount", + ] + resources = local.efs_arns + + condition { + test = "Bool" + variable = "elasticfilesystem:AccessedViaMountTarget" + values = ["true"] + } + } +} + +module "aws_efs_csi_driver" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_aws_efs_csi_driver + + # https://github.com/kubernetes-sigs/aws-efs-csi-driver/tree/master/charts/aws-efs-csi-driver + name = try(var.aws_efs_csi_driver.name, "aws-efs-csi-driver") + description = try(var.aws_efs_csi_driver.description, "A Helm chart to deploy aws-efs-csi-driver") + namespace = try(var.aws_efs_csi_driver.namespace, "kube-system") + create_namespace = try(var.aws_efs_csi_driver.create_namespace, false) + chart = "aws-efs-csi-driver" + chart_version = try(var.aws_efs_csi_driver.chart_version, "2.4.1") + repository = try(var.aws_efs_csi_driver.repository, "https://kubernetes-sigs.github.io/aws-efs-csi-driver/") + values = try(var.aws_efs_csi_driver.values, []) + + timeout = try(var.aws_efs_csi_driver.timeout, null) + repository_key_file = try(var.aws_efs_csi_driver.repository_key_file, null) + repository_cert_file = try(var.aws_efs_csi_driver.repository_cert_file, null) + repository_ca_file = try(var.aws_efs_csi_driver.repository_ca_file, null) + repository_username = try(var.aws_efs_csi_driver.repository_username, null) + repository_password = try(var.aws_efs_csi_driver.repository_password, null) + devel = try(var.aws_efs_csi_driver.devel, null) + verify = try(var.aws_efs_csi_driver.verify, null) + keyring = try(var.aws_efs_csi_driver.keyring, null) + disable_webhooks = try(var.aws_efs_csi_driver.disable_webhooks, null) + reuse_values = try(var.aws_efs_csi_driver.reuse_values, null) + reset_values = try(var.aws_efs_csi_driver.reset_values, null) + force_update = try(var.aws_efs_csi_driver.force_update, null) + recreate_pods = try(var.aws_efs_csi_driver.recreate_pods, null) + cleanup_on_fail = try(var.aws_efs_csi_driver.cleanup_on_fail, null) + max_history = try(var.aws_efs_csi_driver.max_history, null) + atomic = try(var.aws_efs_csi_driver.atomic, null) + skip_crds = try(var.aws_efs_csi_driver.skip_crds, null) + render_subchart_notes = try(var.aws_efs_csi_driver.render_subchart_notes, null) + disable_openapi_validation = try(var.aws_efs_csi_driver.disable_openapi_validation, null) + wait = try(var.aws_efs_csi_driver.wait, null) + wait_for_jobs = try(var.aws_efs_csi_driver.wait_for_jobs, null) + dependency_update = try(var.aws_efs_csi_driver.dependency_update, null) + replace = try(var.aws_efs_csi_driver.replace, null) + lint = try(var.aws_efs_csi_driver.lint, null) + + postrender = try(var.aws_efs_csi_driver.postrender, []) + set = concat([ + { + name = "controller.serviceAccount.name" + value = local.aws_efs_csi_driver_controller_service_account + }, + { + name = "node.serviceAccount.name" + value = local.aws_efs_csi_driver_node_service_account + }], + try(var.aws_efs_csi_driver.set, []) + ) + set_sensitive = try(var.aws_efs_csi_driver.set_sensitive, []) + + # IAM role for service account (IRSA) + set_irsa_names = [ + "controller.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn", + "node.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" + ] + create_role = try(var.aws_efs_csi_driver.create_role, true) + role_name = try(var.aws_efs_csi_driver.role_name, "aws-efs-csi-driver") + role_name_use_prefix = try(var.aws_efs_csi_driver.role_name_use_prefix, true) + role_path = try(var.aws_efs_csi_driver.role_path, "/") + role_permissions_boundary_arn = lookup(var.aws_efs_csi_driver, "role_permissions_boundary_arn", null) + role_description = try(var.aws_efs_csi_driver.role_description, "IRSA for aws-efs-csi-driver project") + role_policies = lookup(var.aws_efs_csi_driver, "role_policies", {}) + + source_policy_documents = compact(concat( + data.aws_iam_policy_document.aws_efs_csi_driver[*].json, + lookup(var.aws_efs_csi_driver, "source_policy_documents", []) + )) + override_policy_documents = lookup(var.aws_efs_csi_driver, "override_policy_documents", []) + policy_statements = lookup(var.aws_efs_csi_driver, "policy_statements", []) + policy_name = try(var.aws_efs_csi_driver.policy_name, null) + policy_name_use_prefix = try(var.aws_efs_csi_driver.policy_name_use_prefix, true) + policy_path = try(var.aws_efs_csi_driver.policy_path, null) + policy_description = try(var.aws_efs_csi_driver.policy_description, "IAM Policy for AWS EFS CSI Driver") + + oidc_providers = { + controller = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.aws_efs_csi_driver_controller_service_account + } + node = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.aws_efs_csi_driver_node_service_account + } + } + + tags = var.tags +} + +################################################################################ +# AWS for Fluent-bit +################################################################################ + +locals { + aws_for_fluentbit_service_account = try(var.aws_for_fluentbit.service_account_name, "aws-for-fluent-bit-sa") +} + +resource "aws_cloudwatch_log_group" "aws_for_fluentbit" { + count = try(var.aws_for_fluentbit_cw_log_group.create, true) && var.enable_aws_for_fluentbit ? 1 : 0 + + name = try(var.aws_for_fluentbit_cw_log_group.name, null) + name_prefix = try(var.aws_for_fluentbit_cw_log_group.name_prefix, "/${var.cluster_name}/aws-fluentbit-logs") + retention_in_days = try(var.aws_for_fluentbit_cw_log_group.retention, 90) + kms_key_id = try(var.aws_for_fluentbit_cw_log_group.kms_key_arn, null) + skip_destroy = try(var.aws_for_fluentbit_cw_log_group.skip_destroy, false) + tags = merge(var.tags, try(var.aws_for_fluentbit_cw_log_group.tags, {})) +} + +data "aws_iam_policy_document" "aws_for_fluentbit" { + count = try(var.aws_for_fluentbit_cw_log_group.create, true) && var.enable_aws_for_fluentbit ? 1 : 0 + + statement { + sid = "PutLogEvents" + effect = "Allow" + resources = [ + "arn:${local.partition}:logs:${local.region}:${local.account_id}:log-group:${try(var.aws_for_fluentbit_cw_log_group.name, "*")}:log-stream:*", + ] + + actions = [ + "logs:PutLogEvents" + ] + } + + statement { + sid = "CreateCWLogs" + effect = "Allow" + resources = [ + "arn:${local.partition}:logs:${local.region}:${local.account_id}:log-group:${try(var.aws_for_fluentbit_cw_log_group.name, "*")}", + ] + + actions = [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams", + "logs:PutRetentionPolicy", + ] + } +} + +module "aws_for_fluentbit" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_aws_for_fluentbit + + # https://github.com/aws/eks-charts/blob/master/stable/aws-for-fluent-bit/Chart.yaml + name = try(var.aws_for_fluentbit.name, "aws-for-fluent-bit") + description = try(var.aws_for_fluentbit.description, "A Helm chart to install the Fluent-bit Driver") + namespace = try(var.aws_for_fluentbit.namespace, "kube-system") + create_namespace = try(var.aws_for_fluentbit.create_namespace, false) + chart = "aws-for-fluent-bit" + chart_version = try(var.aws_for_fluentbit.chart_version, "0.1.24") + repository = try(var.aws_for_fluentbit.repository, "https://aws.github.io/eks-charts") + values = try(var.aws_for_fluentbit.values, []) + + timeout = try(var.aws_for_fluentbit.timeout, null) + repository_key_file = try(var.aws_for_fluentbit.repository_key_file, null) + repository_cert_file = try(var.aws_for_fluentbit.repository_cert_file, null) + repository_ca_file = try(var.aws_for_fluentbit.repository_ca_file, null) + repository_username = try(var.aws_for_fluentbit.repository_username, null) + repository_password = try(var.aws_for_fluentbit.repository_password, null) + devel = try(var.aws_for_fluentbit.devel, null) + verify = try(var.aws_for_fluentbit.verify, null) + keyring = try(var.aws_for_fluentbit.keyring, null) + disable_webhooks = try(var.aws_for_fluentbit.disable_webhooks, null) + reuse_values = try(var.aws_for_fluentbit.reuse_values, null) + reset_values = try(var.aws_for_fluentbit.reset_values, null) + force_update = try(var.aws_for_fluentbit.force_update, null) + recreate_pods = try(var.aws_for_fluentbit.recreate_pods, null) + cleanup_on_fail = try(var.aws_for_fluentbit.cleanup_on_fail, null) + max_history = try(var.aws_for_fluentbit.max_history, null) + atomic = try(var.aws_for_fluentbit.atomic, null) + skip_crds = try(var.aws_for_fluentbit.skip_crds, null) + render_subchart_notes = try(var.aws_for_fluentbit.render_subchart_notes, null) + disable_openapi_validation = try(var.aws_for_fluentbit.disable_openapi_validation, null) + wait = try(var.aws_for_fluentbit.wait, null) + wait_for_jobs = try(var.aws_for_fluentbit.wait_for_jobs, null) + dependency_update = try(var.aws_for_fluentbit.dependency_update, null) + replace = try(var.aws_for_fluentbit.replace, null) + lint = try(var.aws_for_fluentbit.lint, null) + + postrender = try(var.aws_for_fluentbit.postrender, []) + set = concat([ + { + name = "serviceAccount.name" + value = local.aws_for_fluentbit_service_account + }], + try(var.aws_for_fluentbit.set, []) + ) + set_sensitive = try(var.aws_for_fluentbit.set_sensitive, []) + + # IAM role for service account (IRSA) + set_irsa_names = [ + "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn", + ] + create_role = try(var.aws_for_fluentbit.create_role, true) + role_name = try(var.aws_for_fluentbit.role_name, "aws-for-fluent-bit") + role_name_use_prefix = try(var.aws_for_fluentbit.role_name_use_prefix, true) + role_path = try(var.aws_for_fluentbit.role_path, "/") + role_permissions_boundary_arn = lookup(var.aws_for_fluentbit, "role_permissions_boundary_arn", null) + role_description = try(var.aws_for_fluentbit.role_description, "IRSA for aws-for-fluent-bit") + role_policies = lookup(var.aws_for_fluentbit, "role_policies", {}) + + source_policy_documents = compact(concat( + data.aws_iam_policy_document.aws_for_fluentbit[*].json, + lookup(var.aws_for_fluentbit, "source_policy_documents", []) + )) + override_policy_documents = lookup(var.aws_for_fluentbit, "override_policy_documents", []) + policy_statements = lookup(var.aws_for_fluentbit, "policy_statements", []) + policy_name = try(var.aws_for_fluentbit.policy_name, "aws-for-fluent-bit") + policy_name_use_prefix = try(var.aws_for_fluentbit.policy_name_use_prefix, true) + policy_path = try(var.aws_for_fluentbit.policy_path, null) + policy_description = try(var.aws_for_fluentbit.policy_description, "IAM Policy for AWS Fluentbit") + + oidc_providers = { + this = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.aws_for_fluentbit_service_account + } + } + + tags = var.tags +} + +################################################################################ +# AWS FSX CSI DRIVER +################################################################################ + +locals { + aws_fsx_csi_driver_controller_service_account = try(var.aws_fsx_csi_driver.controller_service_account_name, "aws-fsx-csi-controller-sa") + aws_fsx_csi_driver_node_service_account = try(var.aws_fsx_csi_driver.node_service_account_name, "aws-fsx-csi-node-sa") +} + +data "aws_iam_policy_document" "aws_fsx_csi_driver" { + statement { + sid = "AllowCreateServiceLinkedRoles" + resources = ["arn:${local.partition}:iam::*:role/aws-service-role/s3.data-source.lustre.fsx.${local.dns_suffix}/*"] + + actions = [ + "iam:CreateServiceLinkedRole", + "iam:AttachRolePolicy", + "iam:PutRolePolicy", + ] + } + + statement { + sid = "AllowCreateServiceLinkedRole" + resources = ["arn:${local.partition}:iam::${local.account_id}:role/*"] + actions = ["iam:CreateServiceLinkedRole"] + + condition { + test = "StringLike" + variable = "iam:AWSServiceName" + values = ["fsx.${local.dns_suffix}"] + } + } + + statement { + sid = "AllowListBuckets" + resources = ["arn:${local.partition}:s3:::*"] + actions = [ + "s3:ListBucket" + ] + } + + statement { + resources = ["arn:${local.partition}:fsx:${local.region}:${local.account_id}:file-system/*"] + actions = [ + "fsx:CreateFileSystem", + "fsx:DeleteFileSystem", + "fsx:UpdateFileSystem", + ] + } + + statement { + resources = ["arn:${local.partition}:fsx:${local.region}:${local.account_id}:*"] + actions = [ + "fsx:DescribeFileSystems", + "fsx:TagResource" + ] + } +} + +module "aws_fsx_csi_driver" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_aws_fsx_csi_driver + + # https://github.com/kubernetes-sigs/aws-fsx-csi-driver/tree/master/charts/aws-fsx-csi-driver + name = try(var.aws_fsx_csi_driver.name, "aws-fsx-csi-driver") + description = try(var.aws_fsx_csi_driver.description, "A Helm chart for AWS FSx for Lustre CSI Driver") + namespace = try(var.aws_fsx_csi_driver.namespace, "kube-system") + create_namespace = try(var.aws_fsx_csi_driver.create_namespace, false) + chart = "aws-fsx-csi-driver" + chart_version = try(var.aws_fsx_csi_driver.chart_version, "1.5.1") + repository = try(var.aws_fsx_csi_driver.repository, "https://kubernetes-sigs.github.io/aws-fsx-csi-driver/") + values = try(var.aws_fsx_csi_driver.values, []) + + timeout = try(var.aws_fsx_csi_driver.timeout, null) + repository_key_file = try(var.aws_fsx_csi_driver.repository_key_file, null) + repository_cert_file = try(var.aws_fsx_csi_driver.repository_cert_file, null) + repository_ca_file = try(var.aws_fsx_csi_driver.repository_ca_file, null) + repository_username = try(var.aws_fsx_csi_driver.repository_username, null) + repository_password = try(var.aws_fsx_csi_driver.repository_password, null) + devel = try(var.aws_fsx_csi_driver.devel, null) + verify = try(var.aws_fsx_csi_driver.verify, null) + keyring = try(var.aws_fsx_csi_driver.keyring, null) + disable_webhooks = try(var.aws_fsx_csi_driver.disable_webhooks, null) + reuse_values = try(var.aws_fsx_csi_driver.reuse_values, null) + reset_values = try(var.aws_fsx_csi_driver.reset_values, null) + force_update = try(var.aws_fsx_csi_driver.force_update, null) + recreate_pods = try(var.aws_fsx_csi_driver.recreate_pods, null) + cleanup_on_fail = try(var.aws_fsx_csi_driver.cleanup_on_fail, null) + max_history = try(var.aws_fsx_csi_driver.max_history, null) + atomic = try(var.aws_fsx_csi_driver.atomic, null) + skip_crds = try(var.aws_fsx_csi_driver.skip_crds, null) + render_subchart_notes = try(var.aws_fsx_csi_driver.render_subchart_notes, null) + disable_openapi_validation = try(var.aws_fsx_csi_driver.disable_openapi_validation, null) + wait = try(var.aws_fsx_csi_driver.wait, null) + wait_for_jobs = try(var.aws_fsx_csi_driver.wait_for_jobs, null) + dependency_update = try(var.aws_fsx_csi_driver.dependency_update, null) + replace = try(var.aws_fsx_csi_driver.replace, null) + lint = try(var.aws_fsx_csi_driver.lint, null) + + postrender = try(var.aws_fsx_csi_driver.postrender, []) + set = concat([ + { + name = "controller.serviceAccount.name" + value = local.aws_fsx_csi_driver_controller_service_account + }, + { + name = "node.serviceAccount.name" + value = local.aws_fsx_csi_driver_node_service_account + }], + try(var.aws_fsx_csi_driver.set, []) + ) + set_sensitive = try(var.aws_fsx_csi_driver.set_sensitive, []) + + # IAM role for service account (IRSA) + set_irsa_names = [ + "controller.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn", + "node.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn" + ] + create_role = try(var.aws_fsx_csi_driver.create_role, true) + role_name = try(var.aws_fsx_csi_driver.role_name, "aws-fsx-csi-driver") + role_name_use_prefix = try(var.aws_fsx_csi_driver.role_name_use_prefix, true) + role_path = try(var.aws_fsx_csi_driver.role_path, "/") + role_permissions_boundary_arn = lookup(var.aws_fsx_csi_driver, "role_permissions_boundary_arn", null) + role_description = try(var.aws_fsx_csi_driver.role_description, "IRSA for aws-fsx-csi-driver") + role_policies = lookup(var.aws_fsx_csi_driver, "role_policies", {}) + + source_policy_documents = compact(concat( + data.aws_iam_policy_document.aws_fsx_csi_driver[*].json, + lookup(var.aws_fsx_csi_driver, "source_policy_documents", []) + )) + override_policy_documents = lookup(var.aws_fsx_csi_driver, "override_policy_documents", []) + policy_statements = lookup(var.aws_fsx_csi_driver, "policy_statements", []) + policy_name = try(var.aws_fsx_csi_driver.policy_name, "aws-fsx-csi-driver") + policy_name_use_prefix = try(var.aws_fsx_csi_driver.policy_name_use_prefix, true) + policy_path = try(var.aws_fsx_csi_driver.policy_path, null) + policy_description = try(var.aws_fsx_csi_driver.policy_description, "IAM Policy for AWS FSX CSI Driver") + + oidc_providers = { + controller = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.aws_fsx_csi_driver_controller_service_account + } + node = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.aws_fsx_csi_driver_node_service_account + } + } +} + +################################################################################ +# AWS Load Balancer Controller +################################################################################ + +locals { + aws_load_balancer_controller_service_account = try(var.aws_load_balancer_controller.service_account_name, "aws-load-balancer-controller-sa") +} + +data "aws_iam_policy_document" "aws_load_balancer_controller" { + statement { + resources = ["*"] + actions = ["iam:CreateServiceLinkedRole"] + + condition { + test = "StringEquals" + variable = "iam:AWSServiceName" + values = ["elasticloadbalancing.${local.dns_suffix}"] + } + } + + statement { + resources = ["*"] + actions = [ + "ec2:DescribeAccountAttributes", + "ec2:DescribeAddresses", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeCoipPools", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeNetworkInterfaces", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeTags", + "ec2:DescribeVpcPeeringConnections", + "ec2:DescribeVpcs", + "ec2:GetCoipPoolUsage", + "elasticloadbalancing:DescribeListenerCertificates", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:DescribeSSLPolicies", + "elasticloadbalancing:DescribeTags", + "elasticloadbalancing:DescribeTargetGroupAttributes", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + ] + } + + statement { + resources = ["*"] + actions = [ + "acm:DescribeCertificate", + "acm:ListCertificates", + "cognito-idp:DescribeUserPoolClient", + "iam:GetServerCertificate", + "iam:ListServerCertificates", + "shield:CreateProtection", + "shield:DeleteProtection", + "shield:DescribeProtection", + "shield:GetSubscriptionState", + "waf-regional:AssociateWebACL", + "waf-regional:DisassociateWebACL", + "waf-regional:GetWebACL", + "waf-regional:GetWebACLForResource", + "wafv2:AssociateWebACL", + "wafv2:DisassociateWebACL", + "wafv2:GetWebACL", + "wafv2:GetWebACLForResource", + ] + } + + statement { + resources = ["*"] + actions = [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupIngress", + ] + } + + statement { + resources = ["*"] + actions = ["ec2:CreateSecurityGroup"] + } + + statement { + resources = ["arn:${local.partition}:ec2:*:*:security-group/*"] + actions = ["ec2:CreateTags"] + + condition { + test = "Null" + variable = "aws:RequestTag/elbv2.k8s.aws/cluster" + values = ["false"] + } + + condition { + test = "StringEquals" + variable = "ec2:CreateAction" + values = ["CreateSecurityGroup"] + } + } + + statement { + resources = ["arn:${local.partition}:ec2:*:*:security-group/*"] + actions = [ + "ec2:CreateTags", + "ec2:DeleteTags", + ] + + condition { + test = "Null" + variable = "aws:ResourceTag/ingress.k8s.aws/cluster" + values = ["false"] + } + } + + statement { + resources = [ + "arn:${local.partition}:elasticloadbalancing:*:*:loadbalancer/app/*/*", + "arn:${local.partition}:elasticloadbalancing:*:*:loadbalancer/net/*/*", + "arn:${local.partition}:elasticloadbalancing:*:*:targetgroup/*/*", + ] + actions = [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:RemoveTags", + ] + + condition { + test = "Null" + variable = "aws:ResourceTag/ingress.k8s.aws/cluster" + values = ["false"] + } + } + + statement { + resources = ["arn:${local.partition}:ec2:*:*:security-group/*"] + actions = [ + "ec2:CreateTags", + "ec2:DeleteTags", + ] + + condition { + test = "Null" + variable = "aws:ResourceTag/elbv2.k8s.aws/cluster" + values = ["false"] + } + + condition { + test = "Null" + variable = "aws:RequestTag/elbv2.k8s.aws/cluster" + values = ["true"] + } + } + + statement { + resources = ["*"] + actions = [ + "ec2:AuthorizeSecurityGroupIngress", + "ec2:DeleteSecurityGroup", + "ec2:RevokeSecurityGroupIngress", + ] + + condition { + test = "Null" + variable = "aws:ResourceTag/elbv2.k8s.aws/cluster" + values = ["false"] + } + } + + statement { + resources = ["*"] + actions = [ + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateTargetGroup", + ] + + condition { + test = "Null" + variable = "aws:RequestTag/elbv2.k8s.aws/cluster" + values = ["false"] + } + } + + statement { + resources = ["*"] + actions = [ + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateRule", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteRule", + ] + } + + statement { + resources = [ + "arn:${local.partition}:elasticloadbalancing:*:*:loadbalancer/app/*/*", + "arn:${local.partition}:elasticloadbalancing:*:*:loadbalancer/net/*/*", + "arn:${local.partition}:elasticloadbalancing:*:*:targetgroup/*/*", + ] + actions = [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:RemoveTags", + ] + + condition { + test = "Null" + variable = "aws:RequestTag/elbv2.k8s.aws/cluster" + values = ["true"] + } + + condition { + test = "Null" + variable = "aws:ResourceTag/elbv2.k8s.aws/cluster" + values = ["false"] + } + } + + statement { + resources = [ + "arn:${local.partition}:elasticloadbalancing:*:*:listener/net/*/*/*", + "arn:${local.partition}:elasticloadbalancing:*:*:listener/app/*/*/*", + "arn:${local.partition}:elasticloadbalancing:*:*:listener-rule/net/*/*/*", + "arn:${local.partition}:elasticloadbalancing:*:*:listener-rule/app/*/*/*", + ] + actions = [ + "elasticloadbalancing:AddTags", + "elasticloadbalancing:RemoveTags", + ] + } + + statement { + resources = ["*"] + actions = [ + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:ModifyTargetGroupAttributes", + "elasticloadbalancing:SetIpAddressType", + "elasticloadbalancing:SetSecurityGroups", + "elasticloadbalancing:SetSubnets", + ] + + condition { + test = "Null" + variable = "aws:ResourceTag/elbv2.k8s.aws/cluster" + values = ["false"] + } + } + + statement { + resources = ["arn:${local.partition}:elasticloadbalancing:*:*:targetgroup/*/*"] + actions = [ + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:RegisterTargets", + ] + } + + statement { + resources = ["*"] + actions = [ + "elasticloadbalancing:AddListenerCertificates", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyRule", + "elasticloadbalancing:RemoveListenerCertificates", + "elasticloadbalancing:SetWebAcl", + ] + } +} + +module "aws_load_balancer_controller" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_aws_load_balancer_controller + + # https://github.com/aws/eks-charts/blob/master/stable/aws-load-balancer-controller/Chart.yaml + name = try(var.aws_load_balancer_controller.name, "aws-load-balancer-controller") + description = try(var.aws_load_balancer_controller.description, "A Helm chart to deploy aws-load-balancer-controller for ingress resources") + namespace = try(var.aws_load_balancer_controller.namespace, "kube-system") + # namespace creation is false here as kube-system already exists by default + create_namespace = try(var.aws_load_balancer_controller.create_namespace, false) + chart = "aws-load-balancer-controller" + chart_version = try(var.aws_load_balancer_controller.chart_version, "1.4.8") + repository = try(var.aws_load_balancer_controller.repository, "https://aws.github.io/eks-charts") + values = try(var.aws_load_balancer_controller.values, []) + + timeout = try(var.aws_load_balancer_controller.timeout, null) + repository_key_file = try(var.aws_load_balancer_controller.repository_key_file, null) + repository_cert_file = try(var.aws_load_balancer_controller.repository_cert_file, null) + repository_ca_file = try(var.aws_load_balancer_controller.repository_ca_file, null) + repository_username = try(var.aws_load_balancer_controller.repository_username, null) + repository_password = try(var.aws_load_balancer_controller.repository_password, null) + devel = try(var.aws_load_balancer_controller.devel, null) + verify = try(var.aws_load_balancer_controller.verify, null) + keyring = try(var.aws_load_balancer_controller.keyring, null) + disable_webhooks = try(var.aws_load_balancer_controller.disable_webhooks, null) + reuse_values = try(var.aws_load_balancer_controller.reuse_values, null) + reset_values = try(var.aws_load_balancer_controller.reset_values, null) + force_update = try(var.aws_load_balancer_controller.force_update, null) + recreate_pods = try(var.aws_load_balancer_controller.recreate_pods, null) + cleanup_on_fail = try(var.aws_load_balancer_controller.cleanup_on_fail, null) + max_history = try(var.aws_load_balancer_controller.max_history, null) + atomic = try(var.aws_load_balancer_controller.atomic, null) + skip_crds = try(var.aws_load_balancer_controller.skip_crds, null) + render_subchart_notes = try(var.aws_load_balancer_controller.render_subchart_notes, null) + disable_openapi_validation = try(var.aws_load_balancer_controller.disable_openapi_validation, null) + wait = try(var.aws_load_balancer_controller.wait, null) + wait_for_jobs = try(var.aws_load_balancer_controller.wait_for_jobs, null) + dependency_update = try(var.aws_load_balancer_controller.dependency_update, null) + replace = try(var.aws_load_balancer_controller.replace, null) + lint = try(var.aws_load_balancer_controller.lint, null) + + postrender = try(var.aws_load_balancer_controller.postrender, []) + set = concat([ + { + name = "serviceAccount.name" + value = local.aws_load_balancer_controller_service_account + }, { + name = "clusterName" + value = local.cluster_name + }], + try(var.aws_load_balancer_controller.set, []) + ) + set_sensitive = try(var.aws_load_balancer_controller.set_sensitive, []) + + # IAM role for service account (IRSA) + create_role = try(var.aws_load_balancer_controller.create_role, true) + set_irsa_names = ["serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"] + role_name = try(var.aws_load_balancer_controller.role_name, "alb-controller") + role_name_use_prefix = try(var.aws_load_balancer_controller.role_name_use_prefix, true) + role_path = try(var.aws_load_balancer_controller.role_path, "/") + role_permissions_boundary_arn = lookup(var.aws_load_balancer_controller, "role_permissions_boundary_arn", null) + role_description = try(var.aws_load_balancer_controller.role_description, "IRSA for aws-load-balancer-controller project") + role_policies = lookup(var.aws_load_balancer_controller, "role_policies", {}) + + source_policy_documents = compact(concat( + data.aws_iam_policy_document.aws_load_balancer_controller[*].json, + lookup(var.aws_load_balancer_controller, "source_policy_documents", []) + )) + override_policy_documents = lookup(var.aws_load_balancer_controller, "override_policy_documents", []) + policy_statements = lookup(var.aws_load_balancer_controller, "policy_statements", []) + policy_name = try(var.aws_load_balancer_controller.policy_name, null) + policy_name_use_prefix = try(var.aws_load_balancer_controller.policy_name_use_prefix, true) + policy_path = try(var.aws_load_balancer_controller.policy_path, null) + policy_description = try(var.aws_load_balancer_controller.policy_description, "IAM Policy for AWS Load Balancer Controller") + + oidc_providers = { + this = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.aws_load_balancer_controller_service_account + } + } + + tags = var.tags +} + +################################################################################ +# AWS Node Termination Handler +################################################################################ + +locals { + aws_node_termination_handler_service_account = try(var.aws_node_termination_handler.service_account_name, "aws-node-termination-handler-sa") + aws_node_termination_handler_events = merge( + { + autoscaling_terminate = { + name = "ASGTerminiate" + description = "Auto scaling instance terminate event" + event_pattern = { + source = ["aws.autoscaling"] + detail-type = ["EC2 Instance-terminate Lifecycle Action"] + } + } + }, + local.ec2_events + ) +} + +module "aws_node_termination_handler_sqs" { + source = "terraform-aws-modules/sqs/aws" + version = "4.0.1" + + create = var.enable_aws_node_termination_handler + + name = try(var.aws_node_termination_handler_sqs.queue_name, "aws-nth-${var.cluster_name}") + + message_retention_seconds = try(var.aws_node_termination_handler_sqs.message_retention_seconds, 300) + sqs_managed_sse_enabled = try(var.aws_node_termination_handler_sqs.sse_enabled, true) + kms_master_key_id = try(var.aws_node_termination_handler_sqs.kms_master_key_id, null) + kms_data_key_reuse_period_seconds = try(var.aws_node_termination_handler_sqs.kms_data_key_reuse_period_seconds, null) + + create_queue_policy = true + queue_policy_statements = { + account = { + sid = "SendEventsToQueue" + actions = ["sqs:SendMessage"] + principals = [ + { + type = "Service" + identifiers = [ + "events.${local.dns_suffix}", + "sqs.${local.dns_suffix}", + ] + } + ] + } + } + + tags = merge(var.tags, try(var.aws_node_termination_handler_sqs.tags, {})) +} + +resource "aws_autoscaling_lifecycle_hook" "aws_node_termination_handler" { + for_each = { for k, v in var.aws_node_termination_handler_asg_arns : k => v if var.enable_aws_node_termination_handler } + + name = "aws_node_termination_handler" + autoscaling_group_name = replace(each.value, "/^.*:autoScalingGroupName//", "") + default_result = "CONTINUE" + heartbeat_timeout = 300 + lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING" +} + +resource "aws_autoscaling_group_tag" "aws_node_termination_handler" { + for_each = { for k, v in var.aws_node_termination_handler_asg_arns : k => v if var.enable_aws_node_termination_handler } + + autoscaling_group_name = replace(each.value, "/^.*:autoScalingGroupName//", "") + + tag { + key = "aws-node-termination-handler/managed" + value = "true" + propagate_at_launch = true + } +} + +resource "aws_cloudwatch_event_rule" "aws_node_termination_handler" { + for_each = { for k, v in local.aws_node_termination_handler_events : k => v if var.enable_aws_node_termination_handler } + + name_prefix = "NTH-${each.value.name}-" + description = each.value.description + event_pattern = jsonencode(each.value.event_pattern) + + tags = merge( + { "ClusterName" : var.cluster_name }, + var.tags, + ) +} + +resource "aws_cloudwatch_event_target" "aws_node_termination_handler" { + for_each = { for k, v in local.aws_node_termination_handler_events : k => v if var.enable_aws_node_termination_handler } + + rule = aws_cloudwatch_event_rule.aws_node_termination_handler[each.key].name + target_id = "AWSNodeTerminationHandlerQueueTarget" + arn = module.aws_node_termination_handler_sqs.queue_arn +} + +data "aws_iam_policy_document" "aws_node_termination_handler" { + count = var.enable_aws_node_termination_handler ? 1 : 0 + + statement { + actions = [ + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeTags", + "ec2:DescribeInstances", + ] + resources = ["*"] + } + + statement { + actions = ["autoscaling:CompleteLifecycleAction"] + resources = var.aws_node_termination_handler_asg_arns + } + + statement { + actions = [ + "sqs:DeleteMessage", + "sqs:ReceiveMessage", + ] + resources = [module.aws_node_termination_handler_sqs.queue_arn] + } +} + +module "aws_node_termination_handler" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_aws_node_termination_handler + + # https://github.com/aws/eks-charts/blob/master/stable/aws-node-termination-handler/Chart.yaml + name = try(var.aws_node_termination_handler.name, "aws-node-termination-handler") + description = try(var.aws_node_termination_handler.description, "A Helm chart to deploy AWS Node Termination Handler") + namespace = try(var.aws_node_termination_handler.namespace, "aws-node-termination-handler") + create_namespace = try(var.aws_node_termination_handler.create_namespace, true) + chart = "aws-node-termination-handler" + chart_version = try(var.aws_node_termination_handler.chart_version, "0.21.0") + repository = try(var.aws_node_termination_handler.repository, "https://aws.github.io/eks-charts") + values = try(var.aws_node_termination_handler.values, []) + + timeout = try(var.aws_node_termination_handler.timeout, null) + repository_key_file = try(var.aws_node_termination_handler.repository_key_file, null) + repository_cert_file = try(var.aws_node_termination_handler.repository_cert_file, null) + repository_ca_file = try(var.aws_node_termination_handler.repository_ca_file, null) + repository_username = try(var.aws_node_termination_handler.repository_username, null) + repository_password = try(var.aws_node_termination_handler.repository_password, null) + devel = try(var.aws_node_termination_handler.devel, null) + verify = try(var.aws_node_termination_handler.verify, null) + keyring = try(var.aws_node_termination_handler.keyring, null) + disable_webhooks = try(var.aws_node_termination_handler.disable_webhooks, null) + reuse_values = try(var.aws_node_termination_handler.reuse_values, null) + reset_values = try(var.aws_node_termination_handler.reset_values, null) + force_update = try(var.aws_node_termination_handler.force_update, null) + recreate_pods = try(var.aws_node_termination_handler.recreate_pods, null) + cleanup_on_fail = try(var.aws_node_termination_handler.cleanup_on_fail, null) + max_history = try(var.aws_node_termination_handler.max_history, null) + atomic = try(var.aws_node_termination_handler.atomic, null) + skip_crds = try(var.aws_node_termination_handler.skip_crds, null) + render_subchart_notes = try(var.aws_node_termination_handler.render_subchart_notes, null) + disable_openapi_validation = try(var.aws_node_termination_handler.disable_openapi_validation, null) + wait = try(var.aws_node_termination_handler.wait, null) + wait_for_jobs = try(var.aws_node_termination_handler.wait_for_jobs, null) + dependency_update = try(var.aws_node_termination_handler.dependency_update, null) + replace = try(var.aws_node_termination_handler.replace, null) + lint = try(var.aws_node_termination_handler.lint, null) + + postrender = try(var.aws_node_termination_handler.postrender, []) + set = concat( + [ + { + name = "serviceAccount.name" + value = local.aws_node_termination_handler_service_account + }, + { + name = "awsRegion" + value = local.region + }, + { name = "queueURL" + value = module.aws_node_termination_handler_sqs.queue_url + }, + { + name = "enableSqsTerminationDraining" + value = true + } + ], + try(var.aws_node_termination_handler.set, []) + ) + set_sensitive = try(var.aws_node_termination_handler.set_sensitive, []) + + # IAM role for service account (IRSA) + set_irsa_names = ["serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"] + create_role = try(var.aws_node_termination_handler.create_role, true) + role_name = try(var.aws_node_termination_handler.role_name, "aws-node-termination-handler") + role_name_use_prefix = try(var.aws_node_termination_handler.role_name_use_prefix, true) + role_path = try(var.aws_node_termination_handler.role_path, "/") + role_permissions_boundary_arn = lookup(var.aws_node_termination_handler, "role_permissions_boundary_arn", null) + role_description = try(var.aws_node_termination_handler.role_description, "IRSA for AWS Node Termination Handler project") + role_policies = lookup(var.aws_node_termination_handler, "role_policies", {}) + + source_policy_documents = compact(concat( + data.aws_iam_policy_document.aws_node_termination_handler[*].json, + lookup(var.aws_node_termination_handler, "source_policy_documents", []) + )) + override_policy_documents = lookup(var.aws_node_termination_handler, "override_policy_documents", []) + policy_statements = lookup(var.aws_node_termination_handler, "policy_statements", []) + policy_name = try(var.aws_node_termination_handler.policy_name, null) + policy_name_use_prefix = try(var.aws_node_termination_handler.policy_name_use_prefix, true) + policy_path = try(var.aws_node_termination_handler.policy_path, null) + policy_description = try(var.aws_node_termination_handler.policy_description, "IAM Policy for AWS Node Termination Handler") + + oidc_providers = { + this = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.aws_node_termination_handler_service_account + } + } + + tags = var.tags +} + +################################################################################ +# AWS Private CA Issuer +################################################################################ + +locals { + aws_privateca_issuer_service_account = try(var.aws_privateca_issuer.service_account_name, "aws-privateca-issuer-sa") +} + +data "aws_iam_policy_document" "aws_privateca_issuer" { + count = var.enable_aws_privateca_issuer ? 1 : 0 + + statement { + actions = [ + "acm-pca:DescribeCertificateAuthority", + "acm-pca:GetCertificate", + "acm-pca:IssueCertificate", + ] + resources = [ + try(var.aws_privateca_issuer.acmca_arn, + "arn:${local.partition}:acm-pca:${local.region}:${local.account_id}:certificate-authority/*") + ] + } +} + +module "aws_privateca_issuer" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_aws_privateca_issuer + + # https://github.com/cert-manager/aws-privateca-issuer/blob/main/charts/aws-pca-issuer/Chart.yaml + name = try(var.aws_privateca_issuer.name, "aws-privateca-issuer") + description = try(var.aws_privateca_issuer.description, "A Helm chart to install the AWS Private CA Issuer") + namespace = try(var.aws_privateca_issuer.namespace, "kube-system") + create_namespace = try(var.aws_privateca_issuer.create_namespace, false) + chart = "aws-privateca-issuer" + chart_version = try(var.aws_privateca_issuer.chart_version, "v1.2.5") + repository = try(var.aws_privateca_issuer.repository, "https://cert-manager.github.io/aws-privateca-issuer") + values = try(var.aws_privateca_issuer.values, []) + + timeout = try(var.aws_privateca_issuer.timeout, null) + repository_key_file = try(var.aws_privateca_issuer.repository_key_file, null) + repository_cert_file = try(var.aws_privateca_issuer.repository_cert_file, null) + repository_ca_file = try(var.aws_privateca_issuer.repository_ca_file, null) + repository_username = try(var.aws_privateca_issuer.repository_username, null) + repository_password = try(var.aws_privateca_issuer.repository_password, null) + devel = try(var.aws_privateca_issuer.devel, null) + verify = try(var.aws_privateca_issuer.verify, null) + keyring = try(var.aws_privateca_issuer.keyring, null) + disable_webhooks = try(var.aws_privateca_issuer.disable_webhooks, null) + reuse_values = try(var.aws_privateca_issuer.reuse_values, null) + reset_values = try(var.aws_privateca_issuer.reset_values, null) + force_update = try(var.aws_privateca_issuer.force_update, null) + recreate_pods = try(var.aws_privateca_issuer.recreate_pods, null) + cleanup_on_fail = try(var.aws_privateca_issuer.cleanup_on_fail, null) + max_history = try(var.aws_privateca_issuer.max_history, null) + atomic = try(var.aws_privateca_issuer.atomic, null) + skip_crds = try(var.aws_privateca_issuer.skip_crds, null) + render_subchart_notes = try(var.aws_privateca_issuer.render_subchart_notes, null) + disable_openapi_validation = try(var.aws_privateca_issuer.disable_openapi_validation, null) + wait = try(var.aws_privateca_issuer.wait, null) + wait_for_jobs = try(var.aws_privateca_issuer.wait_for_jobs, null) + dependency_update = try(var.aws_privateca_issuer.dependency_update, null) + replace = try(var.aws_privateca_issuer.replace, null) + lint = try(var.aws_privateca_issuer.lint, null) + + postrender = try(var.aws_privateca_issuer.postrender, []) + set = concat([ + { + name = "serviceAccount.name" + value = local.aws_privateca_issuer_service_account + }], + try(var.aws_privateca_issuer.set, []) + ) + set_sensitive = try(var.aws_privateca_issuer.set_sensitive, []) + + # IAM role for service account (IRSA) + set_irsa_names = ["serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"] + create_role = try(var.aws_privateca_issuer.create_role, true) + role_name = try(var.aws_privateca_issuer.role_name, "aws-privateca-issuer") + role_name_use_prefix = try(var.aws_privateca_issuer.role_name_use_prefix, true) + role_path = try(var.aws_privateca_issuer.role_path, "/") + role_permissions_boundary_arn = lookup(var.aws_privateca_issuer, "role_permissions_boundary_arn", null) + role_description = try(var.aws_privateca_issuer.role_description, "IRSA for AWS Private CA Issuer") + role_policies = lookup(var.aws_privateca_issuer, "role_policies", {}) + + source_policy_documents = compact(concat( + data.aws_iam_policy_document.aws_privateca_issuer[*].json, + lookup(var.aws_privateca_issuer, "source_policy_documents", []) + )) + override_policy_documents = lookup(var.aws_privateca_issuer, "override_policy_documents", []) + policy_statements = lookup(var.aws_privateca_issuer, "policy_statements", []) + policy_name = try(var.aws_privateca_issuer.policy_name, "aws-privateca-issuer") + policy_name_use_prefix = try(var.aws_privateca_issuer.policy_name_use_prefix, true) + policy_path = try(var.aws_privateca_issuer.policy_path, null) + policy_description = try(var.aws_privateca_issuer.policy_description, "IAM Policy for AWS Private CA Issuer") + + oidc_providers = { + controller = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.aws_privateca_issuer_service_account + } + } + + tags = var.tags +} + +################################################################################ +# Cert Manager +################################################################################ + +locals { + cert_manager_service_account = try(var.cert_manager.service_account_name, "cert-manager") + create_cert_manager_irsa = var.enable_cert_manager && length(var.cert_manager_route53_hosted_zone_arns) > 0 +} + +data "aws_iam_policy_document" "cert_manager" { + count = local.create_cert_manager_irsa ? 1 : 0 + + statement { + actions = ["route53:GetChange", ] + resources = ["arn:${local.partition}:route53:::change/*"] + } + + statement { + actions = [ + "route53:ChangeResourceRecordSets", + "route53:ListResourceRecordSets", + ] + resources = var.cert_manager_route53_hosted_zone_arns + } + + statement { + actions = ["route53:ListHostedZonesByName"] + resources = ["*"] + } +} + +module "cert_manager" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_cert_manager + + # https://github.com/cert-manager/cert-manager/blob/master/deploy/charts/cert-manager/Chart.template.yaml + name = try(var.cert_manager.name, "cert-manager") + description = try(var.cert_manager.description, "A Helm chart to deploy cert-manager") + namespace = try(var.cert_manager.namespace, "cert-manager") + create_namespace = try(var.cert_manager.create_namespace, true) + chart = "cert-manager" + chart_version = try(var.cert_manager.chart_version, "v1.11.1") + repository = try(var.cert_manager.repository, "https://charts.jetstack.io") + values = try(var.cert_manager.values, []) + + timeout = try(var.cert_manager.timeout, null) + repository_key_file = try(var.cert_manager.repository_key_file, null) + repository_cert_file = try(var.cert_manager.repository_cert_file, null) + repository_ca_file = try(var.cert_manager.repository_ca_file, null) + repository_username = try(var.cert_manager.repository_username, null) + repository_password = try(var.cert_manager.repository_password, null) + devel = try(var.cert_manager.devel, null) + verify = try(var.cert_manager.verify, null) + keyring = try(var.cert_manager.keyring, null) + disable_webhooks = try(var.cert_manager.disable_webhooks, null) + reuse_values = try(var.cert_manager.reuse_values, null) + reset_values = try(var.cert_manager.reset_values, null) + force_update = try(var.cert_manager.force_update, null) + recreate_pods = try(var.cert_manager.recreate_pods, null) + cleanup_on_fail = try(var.cert_manager.cleanup_on_fail, null) + max_history = try(var.cert_manager.max_history, null) + atomic = try(var.cert_manager.atomic, null) + skip_crds = try(var.cert_manager.skip_crds, null) + render_subchart_notes = try(var.cert_manager.render_subchart_notes, null) + disable_openapi_validation = try(var.cert_manager.disable_openapi_validation, null) + wait = try(var.cert_manager.wait, null) + wait_for_jobs = try(var.cert_manager.wait_for_jobs, null) + dependency_update = try(var.cert_manager.dependency_update, null) + replace = try(var.cert_manager.replace, null) + lint = try(var.cert_manager.lint, null) + + postrender = try(var.cert_manager.postrender, []) + set = concat([ + { + name = "installCRDs" + value = true + } + ], + try(var.cert_manager.set, []) + ) + set_sensitive = try(var.cert_manager.set_sensitive, []) + + # IAM role for service account (IRSA) + set_irsa_names = ["serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"] + create_role = local.create_cert_manager_irsa && try(var.cert_manager.create_role, true) + role_name = try(var.cert_manager.role_name, "cert-manager") + role_name_use_prefix = try(var.cert_manager.role_name_use_prefix, true) + role_path = try(var.cert_manager.role_path, "/") + role_permissions_boundary_arn = lookup(var.cert_manager, "role_permissions_boundary_arn", null) + role_description = try(var.cert_manager.role_description, "IRSA for cert-manger project") + role_policies = lookup(var.cert_manager, "role_policies", {}) + + source_policy_documents = compact(concat( + data.aws_iam_policy_document.cert_manager[*].json, + lookup(var.cert_manager, "source_policy_documents", []) + )) + override_policy_documents = lookup(var.cert_manager, "override_policy_documents", []) + policy_statements = lookup(var.cert_manager, "policy_statements", []) + policy_name = try(var.cert_manager.policy_name, null) + policy_name_use_prefix = try(var.cert_manager.policy_name_use_prefix, true) + policy_path = try(var.cert_manager.policy_path, null) + policy_description = try(var.cert_manager.policy_description, "IAM Policy for cert-manager") + + oidc_providers = { + this = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.cert_manager_service_account + } + } + + tags = var.tags +} + +################################################################################ +# Cluster Autoscaler +################################################################################ + +locals { + cluster_autoscaler_service_account = try(var.cluster_autoscaler.service_account_name, "cluster-autoscaler-sa") + + # Lookup map to pull latest cluster-autoscaler patch version given the cluster version + cluster_autoscaler_image_tag = { + "1.20" = "v1.20.3" + "1.21" = "v1.21.3" + "1.22" = "v1.22.3" + "1.23" = "v1.23.1" + "1.24" = "v1.24.1" + "1.25" = "v1.25.1" + "1.26" = "v1.26.2" + } +} + +data "aws_iam_policy_document" "cluster_autoscaler" { + count = var.enable_cluster_autoscaler ? 1 : 0 + + statement { + actions = [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeScalingActivities", + "autoscaling:DescribeTags", + "ec2:DescribeLaunchTemplateVersions", + "ec2:DescribeInstanceTypes", + "eks:DescribeNodegroup", + "ec2:DescribeImages", + "ec2:GetInstanceTypesFromInstanceRequirements" + ] + + resources = ["*"] + } + + statement { + actions = [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:UpdateAutoScalingGroup", + ] + + resources = ["*"] + + condition { + test = "StringEquals" + variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${var.cluster_name}" + values = ["owned"] + } + } +} + +module "cluster_autoscaler" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_cluster_autoscaler + + # https://github.com/kubernetes/autoscaler/blob/master/charts/cluster-autoscaler/Chart.yaml + name = try(var.cluster_autoscaler.name, "cluster-autoscaler") + description = try(var.cluster_autoscaler.description, "A Helm chart to deploy cluster-autoscaler") + namespace = try(var.cluster_autoscaler.namespace, "kube-system") + create_namespace = try(var.cluster_autoscaler.create_namespace, false) + chart = "cluster-autoscaler" + chart_version = try(var.cluster_autoscaler.chart_version, "9.28.0") + repository = try(var.cluster_autoscaler.repository, "https://kubernetes.github.io/autoscaler") + values = try(var.cluster_autoscaler.values, []) + + timeout = try(var.cluster_autoscaler.timeout, null) + repository_key_file = try(var.cluster_autoscaler.repository_key_file, null) + repository_cert_file = try(var.cluster_autoscaler.repository_cert_file, null) + repository_ca_file = try(var.cluster_autoscaler.repository_ca_file, null) + repository_username = try(var.cluster_autoscaler.repository_username, null) + repository_password = try(var.cluster_autoscaler.repository_password, null) + devel = try(var.cluster_autoscaler.devel, null) + verify = try(var.cluster_autoscaler.verify, null) + keyring = try(var.cluster_autoscaler.keyring, null) + disable_webhooks = try(var.cluster_autoscaler.disable_webhooks, null) + reuse_values = try(var.cluster_autoscaler.reuse_values, null) + reset_values = try(var.cluster_autoscaler.reset_values, null) + force_update = try(var.cluster_autoscaler.force_update, null) + recreate_pods = try(var.cluster_autoscaler.recreate_pods, null) + cleanup_on_fail = try(var.cluster_autoscaler.cleanup_on_fail, null) + max_history = try(var.cluster_autoscaler.max_history, null) + atomic = try(var.cluster_autoscaler.atomic, null) + skip_crds = try(var.cluster_autoscaler.skip_crds, null) + render_subchart_notes = try(var.cluster_autoscaler.render_subchart_notes, null) + disable_openapi_validation = try(var.cluster_autoscaler.disable_openapi_validation, null) + wait = try(var.cluster_autoscaler.wait, null) + wait_for_jobs = try(var.cluster_autoscaler.wait_for_jobs, null) + dependency_update = try(var.cluster_autoscaler.dependency_update, null) + replace = try(var.cluster_autoscaler.replace, null) + lint = try(var.cluster_autoscaler.lint, null) + + postrender = try(var.cluster_autoscaler.postrender, []) + set = concat( + [ + { + name = "awsRegion" + value = local.region + }, + { + name = "autoDiscovery.clusterName" + value = local.cluster_name + }, + { + name = "image.tag" + value = try(local.cluster_autoscaler_image_tag[var.cluster_version], "v${var.cluster_version}.0") + }, + { + name = "rbac.serviceAccount.name" + value = local.cluster_autoscaler_service_account + } + ], + try(var.cluster_autoscaler.set, []) + ) + set_sensitive = try(var.cluster_autoscaler.set_sensitive, []) + + # IAM role for service account (IRSA) + set_irsa_names = ["rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"] + create_role = try(var.cluster_autoscaler.create_role, true) + role_name = try(var.cluster_autoscaler.role_name, "cluster-autoscaler") + role_name_use_prefix = try(var.cluster_autoscaler.role_name_use_prefix, true) + role_path = try(var.cluster_autoscaler.role_path, "/") + role_permissions_boundary_arn = lookup(var.cluster_autoscaler, "role_permissions_boundary_arn", null) + role_description = try(var.cluster_autoscaler.role_description, "IRSA for cluster-autoscaler operator") + role_policies = lookup(var.cluster_autoscaler, "role_policies", {}) + + source_policy_documents = compact(concat( + data.aws_iam_policy_document.cluster_autoscaler[*].json, + lookup(var.cluster_autoscaler, "source_policy_documents", []) + )) + override_policy_documents = lookup(var.cluster_autoscaler, "override_policy_documents", []) + policy_statements = lookup(var.cluster_autoscaler, "policy_statements", []) + policy_name = try(var.cluster_autoscaler.policy_name, null) + policy_name_use_prefix = try(var.cluster_autoscaler.policy_name_use_prefix, true) + policy_path = try(var.cluster_autoscaler.policy_path, null) + policy_description = try(var.cluster_autoscaler.policy_description, "IAM Policy for cluster-autoscaler operator") + + oidc_providers = { + this = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.cluster_autoscaler_service_account + } + } + + tags = var.tags +} + +################################################################################ +# Cluster Proportional Autoscaler +################################################################################ + +module "cluster_proportional_autoscaler" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_cluster_proportional_autoscaler + + # https://github.com/kubernetes-sigs/cluster-proportional-autoscaler/blob/master/charts/cluster-proportional-autoscaler/Chart.yaml + name = try(var.cluster_proportional_autoscaler.name, "cluster-proportional-autoscaler") + description = try(var.cluster_proportional_autoscaler.description, "A Helm chart to install the Cluster Proportional Autoscaler") + namespace = try(var.cluster_proportional_autoscaler.namespace, "kube-system") + create_namespace = try(var.cluster_proportional_autoscaler.create_namespace, false) + chart = "cluster-proportional-autoscaler" + chart_version = try(var.cluster_proportional_autoscaler.chart_version, "1.1.0") + repository = try(var.cluster_proportional_autoscaler.repository, "https://kubernetes-sigs.github.io/cluster-proportional-autoscaler") + values = try(var.cluster_proportional_autoscaler.values, []) + + timeout = try(var.cluster_proportional_autoscaler.timeout, null) + repository_key_file = try(var.cluster_proportional_autoscaler.repository_key_file, null) + repository_cert_file = try(var.cluster_proportional_autoscaler.repository_cert_file, null) + repository_ca_file = try(var.cluster_proportional_autoscaler.repository_ca_file, null) + repository_username = try(var.cluster_proportional_autoscaler.repository_username, null) + repository_password = try(var.cluster_proportional_autoscaler.repository_password, null) + devel = try(var.cluster_proportional_autoscaler.devel, null) + verify = try(var.cluster_proportional_autoscaler.verify, null) + keyring = try(var.cluster_proportional_autoscaler.keyring, null) + disable_webhooks = try(var.cluster_proportional_autoscaler.disable_webhooks, null) + reuse_values = try(var.cluster_proportional_autoscaler.reuse_values, null) + reset_values = try(var.cluster_proportional_autoscaler.reset_values, null) + force_update = try(var.cluster_proportional_autoscaler.force_update, null) + recreate_pods = try(var.cluster_proportional_autoscaler.recreate_pods, null) + cleanup_on_fail = try(var.cluster_proportional_autoscaler.cleanup_on_fail, null) + max_history = try(var.cluster_proportional_autoscaler.max_history, null) + atomic = try(var.cluster_proportional_autoscaler.atomic, null) + skip_crds = try(var.cluster_proportional_autoscaler.skip_crds, null) + render_subchart_notes = try(var.cluster_proportional_autoscaler.render_subchart_notes, null) + disable_openapi_validation = try(var.cluster_proportional_autoscaler.disable_openapi_validation, null) + wait = try(var.cluster_proportional_autoscaler.wait, null) + wait_for_jobs = try(var.cluster_proportional_autoscaler.wait_for_jobs, null) + dependency_update = try(var.cluster_proportional_autoscaler.dependency_update, null) + replace = try(var.cluster_proportional_autoscaler.replace, null) + lint = try(var.cluster_proportional_autoscaler.lint, null) + + postrender = try(var.cluster_proportional_autoscaler.postrender, []) + set = try(var.cluster_proportional_autoscaler.set, []) + set_sensitive = try(var.cluster_proportional_autoscaler.set_sensitive, []) + + tags = var.tags +} + +################################################################################ +# EKS Addons +################################################################################ + +data "aws_eks_addon_version" "this" { + for_each = var.eks_addons + + addon_name = try(each.value.name, each.key) + kubernetes_version = var.cluster_version + most_recent = try(each.value.most_recent, true) +} + +resource "aws_eks_addon" "this" { + for_each = var.eks_addons + + cluster_name = local.cluster_name + addon_name = try(each.value.name, each.key) + + addon_version = try(each.value.addon_version, data.aws_eks_addon_version.this[each.key].version) + configuration_values = try(each.value.configuration_values, null) + preserve = try(each.value.preserve, null) + resolve_conflicts = try(each.value.resolve_conflicts, "OVERWRITE") + service_account_role_arn = try(each.value.service_account_role_arn, null) + + timeouts { + create = try(each.value.timeouts.create, var.eks_addons_timeouts.create, null) + update = try(each.value.timeouts.update, var.eks_addons_timeouts.update, null) + delete = try(each.value.timeouts.delete, var.eks_addons_timeouts.delete, null) + } + + tags = var.tags +} + +################################################################################ +# External DNS +################################################################################ + +locals { + external_dns_service_account = try(var.external_dns.service_account_name, "external-dns-sa") +} + +# https://github.com/external-secrets/kubernetes-external-secrets#add-a-secret +data "aws_iam_policy_document" "external_dns" { + count = var.enable_external_dns && length(var.external_dns_route53_zone_arns) > 0 ? 1 : 0 + + statement { + actions = ["route53:ChangeResourceRecordSets"] + resources = var.external_dns_route53_zone_arns + } + + statement { + actions = [ + "route53:ListHostedZones", + "route53:ListResourceRecordSets", + ] + resources = ["*"] + } +} + +module "external_dns" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_external_dns + + # https://github.com/kubernetes-sigs/external-dns/tree/master/charts/external-dns/Chart.yaml + name = try(var.external_dns.name, "external-dns") + description = try(var.external_dns.description, "A Helm chart to deploy external-dns") + namespace = try(var.external_dns.namespace, "external-dns") + create_namespace = try(var.external_dns.create_namespace, true) + chart = "external-dns" + chart_version = try(var.external_dns.chart_version, "1.12.2") + repository = try(var.external_dns.repository, "https://kubernetes-sigs.github.io/external-dns/") + values = try(var.external_dns.values, ["provider: aws"]) + + timeout = try(var.external_dns.timeout, null) + repository_key_file = try(var.external_dns.repository_key_file, null) + repository_cert_file = try(var.external_dns.repository_cert_file, null) + repository_ca_file = try(var.external_dns.repository_ca_file, null) + repository_username = try(var.external_dns.repository_username, null) + repository_password = try(var.external_dns.repository_password, null) + devel = try(var.external_dns.devel, null) + verify = try(var.external_dns.verify, null) + keyring = try(var.external_dns.keyring, null) + disable_webhooks = try(var.external_dns.disable_webhooks, null) + reuse_values = try(var.external_dns.reuse_values, null) + reset_values = try(var.external_dns.reset_values, null) + force_update = try(var.external_dns.force_update, null) + recreate_pods = try(var.external_dns.recreate_pods, null) + cleanup_on_fail = try(var.external_dns.cleanup_on_fail, null) + max_history = try(var.external_dns.max_history, null) + atomic = try(var.external_dns.atomic, null) + skip_crds = try(var.external_dns.skip_crds, null) + render_subchart_notes = try(var.external_dns.render_subchart_notes, null) + disable_openapi_validation = try(var.external_dns.disable_openapi_validation, null) + wait = try(var.external_dns.wait, null) + wait_for_jobs = try(var.external_dns.wait_for_jobs, null) + dependency_update = try(var.external_dns.dependency_update, null) + replace = try(var.external_dns.replace, null) + lint = try(var.external_dns.lint, null) + + postrender = try(var.external_dns.postrender, []) + set = concat([ + { + name = "serviceAccount.name" + value = local.external_dns_service_account + }], + try(var.external_dns.set, []) + ) + set_sensitive = try(var.external_dns.set_sensitive, []) + + # IAM role for service account (IRSA) + set_irsa_names = ["serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"] + create_role = try(var.external_dns.create_role, true) && length(var.external_dns_route53_zone_arns) > 0 + role_name = try(var.external_dns.role_name, "external-dns") + role_name_use_prefix = try(var.external_dns.role_name_use_prefix, true) + role_path = try(var.external_dns.role_path, "/") + role_permissions_boundary_arn = lookup(var.external_dns, "role_permissions_boundary_arn", null) + role_description = try(var.external_dns.role_description, "IRSA for external-dns operator") + role_policies = lookup(var.external_dns, "role_policies", {}) + + source_policy_documents = compact(concat( + data.aws_iam_policy_document.external_dns[*].json, + lookup(var.external_dns, "source_policy_documents", []) + )) + override_policy_documents = lookup(var.external_dns, "override_policy_documents", []) + policy_statements = lookup(var.external_dns, "policy_statements", []) + policy_name = try(var.external_dns.policy_name, null) + policy_name_use_prefix = try(var.external_dns.policy_name_use_prefix, true) + policy_path = try(var.external_dns.policy_path, null) + policy_description = try(var.external_dns.policy_description, "IAM Policy for external-dns operator") + + oidc_providers = { + this = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.external_dns_service_account + } + } + + tags = var.tags +} + +################################################################################ +# External Secrets +################################################################################ + +locals { + external_secrets_service_account = try(var.external_secrets.service_account_name, "external-secrets-sa") +} + +# https://github.com/external-secrets/kubernetes-external-secrets#add-a-secret +data "aws_iam_policy_document" "external_secrets" { + count = var.enable_external_secrets ? 1 : 0 + + dynamic "statement" { + for_each = length(var.external_secrets_ssm_parameter_arns) > 0 ? [1] : [] + + content { + actions = ["ssm:DescribeParameters"] + resources = ["*"] + } + } + + dynamic "statement" { + for_each = length(var.external_secrets_ssm_parameter_arns) > 0 ? [1] : [] + + content { + actions = [ + "ssm:GetParameter", + "ssm:GetParameters", + ] + resources = var.external_secrets_ssm_parameter_arns + } + } + + dynamic "statement" { + for_each = length(var.external_secrets_secrets_manager_arns) > 0 ? [1] : [] + + content { + actions = ["secretsmanager:ListSecrets"] + resources = ["*"] + } + } + + dynamic "statement" { + for_each = length(var.external_secrets_secrets_manager_arns) > 0 ? [1] : [] + + content { + actions = [ + "secretsmanager:GetResourcePolicy", + "secretsmanager:GetSecretValue", + "secretsmanager:DescribeSecret", + "secretsmanager:ListSecretVersionIds", + ] + resources = var.external_secrets_secrets_manager_arns + } + } + + dynamic "statement" { + for_each = length(var.external_secrets_kms_key_arns) > 0 ? [1] : [] + + content { + actions = ["kms:Decrypt"] + resources = var.external_secrets_kms_key_arns + } + } +} + +module "external_secrets" { + source = "aws-ia/eks-blueprints-addon/aws" + version = "1.0.0" + + create = var.enable_external_secrets + + # https://github.com/external-secrets/external-secrets/blob/main/deploy/charts/external-secrets/Chart.yaml + name = try(var.external_secrets.name, "external-secrets") + description = try(var.external_secrets.description, "A Helm chart to deploy external-secrets") + namespace = try(var.external_secrets.namespace, "external-secrets") + create_namespace = try(var.external_secrets.create_namespace, true) + chart = "external-secrets" + chart_version = try(var.external_secrets.chart_version, "0.8.1") + repository = try(var.external_secrets.repository, "https://charts.external-secrets.io") + values = try(var.external_secrets.values, []) + + timeout = try(var.external_secrets.timeout, null) + repository_key_file = try(var.external_secrets.repository_key_file, null) + repository_cert_file = try(var.external_secrets.repository_cert_file, null) + repository_ca_file = try(var.external_secrets.repository_ca_file, null) + repository_username = try(var.external_secrets.repository_username, null) + repository_password = try(var.external_secrets.repository_password, null) + devel = try(var.external_secrets.devel, null) + verify = try(var.external_secrets.verify, null) + keyring = try(var.external_secrets.keyring, null) + disable_webhooks = try(var.external_secrets.disable_webhooks, null) + reuse_values = try(var.external_secrets.reuse_values, null) + reset_values = try(var.external_secrets.reset_values, null) + force_update = try(var.external_secrets.force_update, null) + recreate_pods = try(var.external_secrets.recreate_pods, null) + cleanup_on_fail = try(var.external_secrets.cleanup_on_fail, null) + max_history = try(var.external_secrets.max_history, null) + atomic = try(var.external_secrets.atomic, null) + skip_crds = try(var.external_secrets.skip_crds, null) + render_subchart_notes = try(var.external_secrets.render_subchart_notes, null) + disable_openapi_validation = try(var.external_secrets.disable_openapi_validation, null) + wait = try(var.external_secrets.wait, null) + wait_for_jobs = try(var.external_secrets.wait_for_jobs, null) + dependency_update = try(var.external_secrets.dependency_update, null) + replace = try(var.external_secrets.replace, null) + lint = try(var.external_secrets.lint, null) + + postrender = try(var.external_secrets.postrender, []) + set = concat([ + { + name = "serviceAccount.name" + value = local.external_secrets_service_account + }], + try(var.external_secrets.set, []) + ) + set_sensitive = try(var.external_secrets.set_sensitive, []) + + # IAM role for service account (IRSA) + set_irsa_names = ["serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"] + create_role = try(var.external_secrets.create_role, true) + role_name = try(var.external_secrets.role_name, "external-secrets") + role_name_use_prefix = try(var.external_secrets.role_name_use_prefix, true) + role_path = try(var.external_secrets.role_path, "/") + role_permissions_boundary_arn = lookup(var.external_secrets, "role_permissions_boundary_arn", null) + role_description = try(var.external_secrets.role_description, "IRSA for external-secrets operator") + role_policies = lookup(var.external_secrets, "role_policies", {}) + + source_policy_documents = compact(concat( + data.aws_iam_policy_document.external_secrets[*].json, + lookup(var.external_secrets, "source_policy_documents", []) + )) + override_policy_documents = lookup(var.external_secrets, "override_policy_documents", []) + policy_statements = lookup(var.external_secrets, "policy_statements", []) + policy_name = try(var.external_secrets.policy_name, null) + policy_name_use_prefix = try(var.external_secrets.policy_name_use_prefix, true) + policy_path = try(var.external_secrets.policy_path, null) + policy_description = try(var.external_secrets.policy_description, "IAM Policy for external-secrets operator") + + oidc_providers = { + this = { + provider_arn = local.oidc_provider_arn + # namespace is inherited from chart + service_account = local.external_secrets_service_account + } + } + + tags = var.tags +} + +################################################################################ +# Fargate Fluentbit +################################################################################ + +resource "aws_cloudwatch_log_group" "fargate_fluentbit" { + count = try(var.fargate_fluentbit_cw_log_group.create, true) && var.enable_fargate_fluentbit ? 1 : 0 + + name = try(var.fargate_fluentbit_cw_log_group.name, null) + name_prefix = try(var.fargate_fluentbit_cw_log_group.name_prefix, "/${var.cluster_name}/fargate-fluentbit-logs") + retention_in_days = try(var.fargate_fluentbit_cw_log_group.retention, 90) + kms_key_id = try(var.fargate_fluentbit_cw_log_group.kms_key_arn, null) + skip_destroy = try(var.fargate_fluentbit_cw_log_group.skip_destroy, false) + tags = merge(var.tags, try(var.fargate_fluentbit_cw_log_group.tags, {})) +} + +# Help on Fargate Logging with Fluentbit and CloudWatch +# https://docs.aws.amazon.com/eks/latest/userguide/fargate-logging.html +resource "kubernetes_namespace_v1" "aws_observability" { + count = var.enable_fargate_fluentbit ? 1 : 0 + + metadata { + name = "aws-observability" + + labels = { + aws-observability = "enabled" + } + } +} + +# fluent-bit-cloudwatch value as the name of the CloudWatch log group that is automatically created as soon as your apps start logging +resource "kubernetes_config_map_v1" "aws_logging" { + count = var.enable_fargate_fluentbit ? 1 : 0 + + metadata { + name = "aws-logging" + namespace = kubernetes_namespace_v1.aws_observability[0].id + } + + data = { + "parsers.conf" = try( + var.fargate_fluentbit.parsers_conf, + <<-EOT + [PARSER] + Name regex + Format regex + Regex ^(?