From 57ab8189280c3bdf9fe97d6877df9d8af4940b14 Mon Sep 17 00:00:00 2001 From: maxim Date: Wed, 10 Nov 2021 13:03:01 +0600 Subject: [PATCH 1/6] feat: use flags to enabled/disable additional functionalities instead of using examples folder --- README.md | 2 + terraform/layer1-aws/README.md | 143 +++++++++++------- .../{examples => }/aws-ec2-pritunl.tf | 3 +- terraform/layer1-aws/variables.tf | 6 + .../eks-aws-loadbalancer-controller.tf | 35 +++-- .../{helm-charts.yaml => helm-releases.yaml} | 56 ++++++- terraform/layer2-k8s/locals.tf | 2 +- 7 files changed, 167 insertions(+), 80 deletions(-) rename terraform/layer1-aws/{examples => }/aws-ec2-pritunl.tf (89%) rename terraform/layer2-k8s/{helm-charts.yaml => helm-releases.yaml} (70%) diff --git a/README.md b/README.md index c0ff5354..a46a56e8 100644 --- a/README.md +++ b/README.md @@ -478,6 +478,8 @@ We use GitHub Actions and [tfsec](https://github.com/aquasecurity/tfsec) to chec | layer1-aws/aws-eks.tf | aws-eks-no-public-cluster-access | Resource 'module.eks:aws_eks_cluster.this[0]' has public access is explicitly set to enabled | By default we create public accessible EKS cluster from anywhere | | layer1-aws/aws-eks.tf | aws-eks-no-public-cluster-access-to-cidr | Resource 'module.eks:aws_eks_cluster.this[0]' has public access cidr explicitly set to wide open | By default we create public accessible EKS cluster from anywhere | | layer1-aws/aws-eks.tf | aws-vpc-no-public-egress-sgr | Resource 'module.eks:aws_security_group_rule.workers_egress_internet[0]' defines a fully open egress security group rule | We use recommended option. [More info](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) | +| modules/aws-ec2-pritunl/security_groups.tf | aws-vpc-no-public-ingress-sgr | Resource 'module.pritunl[0]:module.ec2_sg:aws_security_group_rule.ingress_with_cidr_blocks[1]' defines a fully open ingress security group rule. | This is a VPN server and it need to have egress traffic to anywhere by default | +| modules/aws-ec2-pritunl/security_groups.tf | aws-vpc-no-public-ingress-sgr | Resource 'module.pritunl[0]:module.ec2_sg:aws_security_group_rule.ingress_with_cidr_blocks[1]' defines a fully open ingress security group rule. | This is a VPN server and by default it needs to have ingress traffic from anywhere | | modules/aws-iam-eks-trusted/main.tf | aws-iam-no-policy-wildcards | Resource 'module.aws_iam_external_secrets:aws_iam_role_policy.this' defines a policy with wildcarded resources. | We use this policy for external-secrets and grant it access to all secrets. | | modules/aws-iam-eks-trusted/main.tf | aws-iam-no-policy-wildcards | Resource 'module.aws_iam_autoscaler:aws_iam_role_policy.this' defines a policy with wildcarded resources | We use condition to allow run actions only for certain autoscaling groups | | modules/aws-iam-eks-trusted/main.tf | aws-iam-no-policy-wildcards | Resource 'module.eks_alb_ingress[0]:module.aws_iam_aws_loadbalancer_controller:aws_iam_role_policy.this' defines a policy with wildcarded resources | We use recommended [policy](https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/main/docs/install/iam_policy.json) | diff --git a/terraform/layer1-aws/README.md b/terraform/layer1-aws/README.md index 5a78d748..6c5f6e09 100644 --- a/terraform/layer1-aws/README.md +++ b/terraform/layer1-aws/README.md @@ -2,73 +2,106 @@ | Name | Version | |------|---------| -| terraform | 0.15.1 | -| aws | 3.53.0 | -| kubernetes | 2.4.1 | +| [terraform](#requirement\_terraform) | 0.15.1 | +| [aws](#requirement\_aws) | 3.53.0 | +| [kubernetes](#requirement\_kubernetes) | 2.4.1 | ## Providers | Name | Version | |------|---------| -| aws | 3.53.0 | +| [aws](#provider\_aws) | 3.53.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [acm](#module\_acm) | terraform-aws-modules/acm/aws | 3.2.0 | +| [eks](#module\_eks) | terraform-aws-modules/eks/aws | 17.3.0 | +| [pritunl](#module\_pritunl) | ../modules/aws-ec2-pritunl | n/a | +| [r53\_zone](#module\_r53\_zone) | terraform-aws-modules/route53/aws//modules/zones | 2.1.0 | +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | 3.2.0 | +| [vpc\_gateway\_endpoints](#module\_vpc\_gateway\_endpoints) | terraform-aws-modules/vpc/aws//modules/vpc-endpoints | 3.2.0 | + +## Resources + +| Name | Type | +|------|------| +| [aws_ebs_encryption_by_default.this](https://registry.terraform.io/providers/aws/3.53.0/docs/resources/ebs_encryption_by_default) | resource | +| [aws_eks_addon.coredns](https://registry.terraform.io/providers/aws/3.53.0/docs/resources/eks_addon) | resource | +| [aws_eks_addon.kube_proxy](https://registry.terraform.io/providers/aws/3.53.0/docs/resources/eks_addon) | resource | +| [aws_eks_addon.vpc_cni](https://registry.terraform.io/providers/aws/3.53.0/docs/resources/eks_addon) | resource | +| [aws_kms_key.eks](https://registry.terraform.io/providers/aws/3.53.0/docs/resources/kms_key) | resource | +| [aws_acm_certificate.main](https://registry.terraform.io/providers/aws/3.53.0/docs/data-sources/acm_certificate) | data source | +| [aws_ami.bottlerocket_ami](https://registry.terraform.io/providers/aws/3.53.0/docs/data-sources/ami) | data source | +| [aws_availability_zones.available](https://registry.terraform.io/providers/aws/3.53.0/docs/data-sources/availability_zones) | data source | +| [aws_caller_identity.current](https://registry.terraform.io/providers/aws/3.53.0/docs/data-sources/caller_identity) | data source | +| [aws_eks_cluster.main](https://registry.terraform.io/providers/aws/3.53.0/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.main](https://registry.terraform.io/providers/aws/3.53.0/docs/data-sources/eks_cluster_auth) | data source | +| [aws_route53_zone.main](https://registry.terraform.io/providers/aws/3.53.0/docs/data-sources/route53_zone) | data source | +| [aws_security_group.default](https://registry.terraform.io/providers/aws/3.53.0/docs/data-sources/security_group) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| allowed\_account\_ids | List of allowed AWS account IDs | `list` | `[]` | no | -| allowed\_ips | IP addresses allowed to connect to private resources | `list(any)` | `[]` | no | -| az\_count | Count of avaiablity zones, min 2 | `number` | `3` | no | -| cidr | Default CIDR block for VPC | `string` | `"10.0.0.0/16"` | no | -| create\_acm\_certificate | Whether to create acm certificate or use existing | `bool` | `false` | no | -| create\_r53\_zone | Create R53 zone for main public domain | `bool` | `false` | no | -| domain\_name | Main public domain name | `any` | n/a | yes | -| ecr\_repo\_retention\_count | number of images to store in ECR | `number` | `50` | no | -| ecr\_repos | List of docker repositories | `list(any)` |
[
"demo"
]
| no | -| eks\_cluster\_enabled\_log\_types | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html). Possible values: api, audit, authenticator, controllerManager, scheduler | `list(string)` |
[
"audit"
]
| no | -| eks\_cluster\_encryption\_config\_enable | Enable or not encryption for k8s secrets with aws-kms | `bool` | `false` | no | -| eks\_cluster\_log\_retention\_in\_days | Number of days to retain log events. Default retention - 90 days. | `number` | `90` | no | -| eks\_cluster\_version | Version of the EKS K8S cluster | `string` | `"1.21"` | no | -| eks\_map\_roles | Additional IAM roles to add to the aws-auth configmap. |
list(object({
rolearn = string
username = string
groups = list(string)
}))
| `[]` | no | -| eks\_workers\_additional\_policies | Additional IAM policy attached to EKS worker nodes | `list(any)` |
[
"arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
]
| no | -| eks\_write\_kubeconfig | Flag for eks module to write kubeconfig | `bool` | `false` | no | -| environment | Env name in case workspace wasn't used | `string` | `"demo"` | no | -| name | Project name, required to create unique resource names | `any` | n/a | yes | -| node\_group\_ci | Node group configuration |
object({
instance_types = list(string)
capacity_type = string
max_capacity = number
min_capacity = number
desired_capacity = number
force_update_version = bool
})
|
{
"capacity_type": "SPOT",
"desired_capacity": 0,
"force_update_version": true,
"instance_types": [
"t3a.medium",
"t3.medium"
],
"max_capacity": 5,
"min_capacity": 0
}
| no | -| node\_group\_ondemand | Node group configuration |
object({
instance_types = list(string)
capacity_type = string
max_capacity = number
min_capacity = number
desired_capacity = number
force_update_version = bool
})
|
{
"capacity_type": "ON_DEMAND",
"desired_capacity": 1,
"force_update_version": true,
"instance_types": [
"t3a.medium"
],
"max_capacity": 5,
"min_capacity": 1
}
| no | -| node\_group\_spot | Node group configuration |
object({
instance_types = list(string)
capacity_type = string
max_capacity = number
min_capacity = number
desired_capacity = number
force_update_version = bool
})
|
{
"capacity_type": "SPOT",
"desired_capacity": 1,
"force_update_version": true,
"instance_types": [
"t3a.medium",
"t3.medium"
],
"max_capacity": 5,
"min_capacity": 0
}
| no | -| region | Default infrastructure region | `string` | `"us-east-1"` | no | -| short\_region | The abbreviated name of the region, required to form unique resource names | `map` |
{
"ap-east-1": "ape1",
"ap-northeast-1": "apn1",
"ap-northeast-2": "apn2",
"ap-south-1": "aps1",
"ap-southeast-1": "apse1",
"ap-southeast-2": "apse2",
"ca-central-1": "cac1",
"cn-north-1": "cnn1",
"cn-northwest-1": "cnnw1",
"eu-central-1": "euc1",
"eu-north-1": "eun1",
"eu-west-1": "euw1",
"eu-west-2": "euw2",
"eu-west-3": "euw3",
"sa-east-1": "sae1",
"us-east-1": "use1",
"us-east-2": "use2",
"us-gov-east-1": "usge1",
"us-gov-west-1": "usgw1",
"us-west-1": "usw1",
"us-west-2": "usw2"
}
| no | -| single\_nat\_gateway | Flag to create single nat gateway for all AZs | `bool` | `true` | no | -| worker\_group\_bottlerocket | Bottlerocket worker group configuration |
object({
instance_types = list(string)
capacity_type = string
max_capacity = number
min_capacity = number
desired_capacity = number
spot_instance_pools = number
})
|
{
"capacity_type": "SPOT",
"desired_capacity": 0,
"instance_types": [
"t3a.medium",
"t3.medium"
],
"max_capacity": 5,
"min_capacity": 0,
"spot_instance_pools": 2
}
| no | -| zone\_id | R53 zone id for public domain | `any` | `null` | no | +| [addon\_coredns\_version](#input\_addon\_coredns\_version) | The version of coredns add-on | `string` | `"v1.8.3-eksbuild.1"` | no | +| [addon\_create\_coredns](#input\_addon\_create\_coredns) | Enable coredns add-on or not | `bool` | `true` | no | +| [addon\_create\_kube\_proxy](#input\_addon\_create\_kube\_proxy) | Enable kube-proxy add-on or not | `bool` | `true` | no | +| [addon\_create\_vpc\_cni](#input\_addon\_create\_vpc\_cni) | Enable vpc-cni add-on or not | `bool` | `true` | no | +| [addon\_kube\_proxy\_version](#input\_addon\_kube\_proxy\_version) | The version of kube-proxy add-on | `string` | `"v1.20.4-eksbuild.2"` | no | +| [addon\_vpc\_cni\_version](#input\_addon\_vpc\_cni\_version) | The version of vpc-cni add-on | `string` | `"v1.9.1-eksbuild.1"` | no | +| [allowed\_account\_ids](#input\_allowed\_account\_ids) | List of allowed AWS account IDs | `list` | `[]` | no | +| [allowed\_ips](#input\_allowed\_ips) | IP addresses allowed to connect to private resources | `list(any)` | `[]` | no | +| [az\_count](#input\_az\_count) | Count of avaiablity zones, min 2 | `number` | `3` | no | +| [cidr](#input\_cidr) | Default CIDR block for VPC | `string` | `"10.0.0.0/16"` | no | +| [create\_acm\_certificate](#input\_create\_acm\_certificate) | Whether to create acm certificate or use existing | `bool` | `false` | no | +| [create\_r53\_zone](#input\_create\_r53\_zone) | Create R53 zone for main public domain | `bool` | `false` | no | +| [domain\_name](#input\_domain\_name) | Main public domain name | `any` | n/a | yes | +| [eks\_cluster\_enabled\_log\_types](#input\_eks\_cluster\_enabled\_log\_types) | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html). Possible values: api, audit, authenticator, controllerManager, scheduler | `list(string)` |
[
"audit"
]
| no | +| [eks\_cluster\_encryption\_config\_enable](#input\_eks\_cluster\_encryption\_config\_enable) | Enable or not encryption for k8s secrets with aws-kms | `bool` | `false` | no | +| [eks\_cluster\_log\_retention\_in\_days](#input\_eks\_cluster\_log\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days. | `number` | `90` | no | +| [eks\_cluster\_version](#input\_eks\_cluster\_version) | Version of the EKS K8S cluster | `string` | `"1.21"` | no | +| [eks\_map\_roles](#input\_eks\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. |
list(object({
rolearn = string
username = string
groups = list(string)
}))
| `[]` | no | +| [eks\_workers\_additional\_policies](#input\_eks\_workers\_additional\_policies) | Additional IAM policy attached to EKS worker nodes | `list(any)` |
[
"arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
]
| no | +| [eks\_write\_kubeconfig](#input\_eks\_write\_kubeconfig) | Flag for eks module to write kubeconfig | `bool` | `false` | no | +| [environment](#input\_environment) | Env name in case workspace wasn't used | `string` | `"demo"` | no | +| [name](#input\_name) | Project name, required to create unique resource names | `any` | n/a | yes | +| [node\_group\_ci](#input\_node\_group\_ci) | Node group configuration |
object({
instance_types = list(string)
capacity_type = string
max_capacity = number
min_capacity = number
desired_capacity = number
force_update_version = bool
})
|
{
"capacity_type": "SPOT",
"desired_capacity": 0,
"force_update_version": true,
"instance_types": [
"t3a.medium",
"t3.medium"
],
"max_capacity": 5,
"min_capacity": 0
}
| no | +| [node\_group\_ondemand](#input\_node\_group\_ondemand) | Node group configuration |
object({
instance_types = list(string)
capacity_type = string
max_capacity = number
min_capacity = number
desired_capacity = number
force_update_version = bool
})
|
{
"capacity_type": "ON_DEMAND",
"desired_capacity": 1,
"force_update_version": true,
"instance_types": [
"t3a.medium"
],
"max_capacity": 5,
"min_capacity": 1
}
| no | +| [node\_group\_spot](#input\_node\_group\_spot) | Node group configuration |
object({
instance_types = list(string)
capacity_type = string
max_capacity = number
min_capacity = number
desired_capacity = number
force_update_version = bool
})
|
{
"capacity_type": "SPOT",
"desired_capacity": 1,
"force_update_version": true,
"instance_types": [
"t3a.medium",
"t3.medium"
],
"max_capacity": 5,
"min_capacity": 0
}
| no | +| [pritunl\_vpn\_server](#input\_pritunl\_vpn\_server) | Indicates whether or not the Pritunl VPN server is deployed. | `bool` | `false` | no | +| [region](#input\_region) | Default infrastructure region | `string` | `"us-east-1"` | no | +| [short\_region](#input\_short\_region) | The abbreviated name of the region, required to form unique resource names | `map` |
{
"ap-east-1": "ape1",
"ap-northeast-1": "apn1",
"ap-northeast-2": "apn2",
"ap-south-1": "aps1",
"ap-southeast-1": "apse1",
"ap-southeast-2": "apse2",
"ca-central-1": "cac1",
"cn-north-1": "cnn1",
"cn-northwest-1": "cnnw1",
"eu-central-1": "euc1",
"eu-north-1": "eun1",
"eu-west-1": "euw1",
"eu-west-2": "euw2",
"eu-west-3": "euw3",
"sa-east-1": "sae1",
"us-east-1": "use1",
"us-east-2": "use2",
"us-gov-east-1": "usge1",
"us-gov-west-1": "usgw1",
"us-west-1": "usw1",
"us-west-2": "usw2"
}
| no | +| [single\_nat\_gateway](#input\_single\_nat\_gateway) | Flag to create single nat gateway for all AZs | `bool` | `true` | no | +| [worker\_group\_bottlerocket](#input\_worker\_group\_bottlerocket) | Bottlerocket worker group configuration |
object({
instance_types = list(string)
capacity_type = string
max_capacity = number
min_capacity = number
desired_capacity = number
spot_instance_pools = number
})
|
{
"capacity_type": "SPOT",
"desired_capacity": 0,
"instance_types": [
"t3a.medium",
"t3.medium"
],
"max_capacity": 5,
"min_capacity": 0,
"spot_instance_pools": 2
}
| no | +| [zone\_id](#input\_zone\_id) | R53 zone id for public domain | `any` | `null` | no | ## Outputs | Name | Description | |------|-------------| -| allowed\_ips | List of allowed ip's, used for direct ssh access to instances. | -| az\_count | Count of avaiablity zones, min 2 | -| domain\_name | Domain name | -| eks\_cluster\_endpoint | Endpoint for EKS control plane. | -| eks\_cluster\_id | n/a | -| eks\_cluster\_security\_group\_id | Security group ids attached to the cluster control plane. | -| eks\_config\_map\_aws\_auth | A kubernetes configuration to authenticate to this EKS cluster. | -| eks\_kubectl\_config | kubectl config as generated by the module. | -| eks\_kubectl\_console\_config | description | -| eks\_oidc\_provider\_arn | ARN of EKS oidc provider | -| env | Suffix for the hostname depending on workspace | -| name | Project name, required to form unique resource names | -| name\_wo\_region | Project name, required to form unique resource names without short region | -| region | Target region for all infrastructure resources | -| route53\_zone\_id | ID of domain zone | -| short\_region | The abbreviated name of the region, required to form unique resource names | -| ssl\_certificate\_arn | ARN of SSL certificate | -| vpc\_cidr | CIDR block of infra VPC | -| vpc\_database\_subnets | Database subnets of infra VPC | -| vpc\_id | ID of infra VPC | -| vpc\_intra\_subnets | Private intra subnets | -| vpc\_name | Name of infra VPC | -| vpc\_private\_subnets | Private subnets of infra VPC | -| vpc\_public\_subnets | Public subnets of infra VPC | - +| [allowed\_ips](#output\_allowed\_ips) | List of allowed ip's, used for direct ssh access to instances. | +| [az\_count](#output\_az\_count) | Count of avaiablity zones, min 2 | +| [domain\_name](#output\_domain\_name) | Domain name | +| [eks\_cluster\_endpoint](#output\_eks\_cluster\_endpoint) | Endpoint for EKS control plane. | +| [eks\_cluster\_id](#output\_eks\_cluster\_id) | n/a | +| [eks\_cluster\_security\_group\_id](#output\_eks\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. | +| [eks\_config\_map\_aws\_auth](#output\_eks\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. | +| [eks\_kubectl\_config](#output\_eks\_kubectl\_config) | kubectl config as generated by the module. | +| [eks\_kubectl\_console\_config](#output\_eks\_kubectl\_console\_config) | description | +| [eks\_oidc\_provider\_arn](#output\_eks\_oidc\_provider\_arn) | ARN of EKS oidc provider | +| [env](#output\_env) | Suffix for the hostname depending on workspace | +| [name](#output\_name) | Project name, required to form unique resource names | +| [name\_wo\_region](#output\_name\_wo\_region) | Project name, required to form unique resource names without short region | +| [region](#output\_region) | Target region for all infrastructure resources | +| [route53\_zone\_id](#output\_route53\_zone\_id) | ID of domain zone | +| [short\_region](#output\_short\_region) | The abbreviated name of the region, required to form unique resource names | +| [ssl\_certificate\_arn](#output\_ssl\_certificate\_arn) | ARN of SSL certificate | +| [vpc\_cidr](#output\_vpc\_cidr) | CIDR block of infra VPC | +| [vpc\_database\_subnets](#output\_vpc\_database\_subnets) | Database subnets of infra VPC | +| [vpc\_id](#output\_vpc\_id) | ID of infra VPC | +| [vpc\_intra\_subnets](#output\_vpc\_intra\_subnets) | Private intra subnets | +| [vpc\_name](#output\_vpc\_name) | Name of infra VPC | +| [vpc\_private\_subnets](#output\_vpc\_private\_subnets) | Private subnets of infra VPC | +| [vpc\_public\_subnets](#output\_vpc\_public\_subnets) | Public subnets of infra VPC | diff --git a/terraform/layer1-aws/examples/aws-ec2-pritunl.tf b/terraform/layer1-aws/aws-ec2-pritunl.tf similarity index 89% rename from terraform/layer1-aws/examples/aws-ec2-pritunl.tf rename to terraform/layer1-aws/aws-ec2-pritunl.tf index 684bebe7..614d6586 100644 --- a/terraform/layer1-aws/examples/aws-ec2-pritunl.tf +++ b/terraform/layer1-aws/aws-ec2-pritunl.tf @@ -1,6 +1,7 @@ module "pritunl" { - source = "../modules/aws-ec2-pritunl" + count = var.pritunl_vpn_server ? 1 : 0 + source = "../modules/aws-ec2-pritunl" environment = local.env vpc_id = module.vpc.vpc_id public_subnets = module.vpc.public_subnets diff --git a/terraform/layer1-aws/variables.tf b/terraform/layer1-aws/variables.tf index 7d2b4f71..7603f367 100644 --- a/terraform/layer1-aws/variables.tf +++ b/terraform/layer1-aws/variables.tf @@ -245,3 +245,9 @@ variable "eks_cluster_encryption_config_enable" { default = false description = "Enable or not encryption for k8s secrets with aws-kms" } + +variable "pritunl_vpn_server" { + type = bool + default = false + description = "Indicates whether or not the Pritunl VPN server is deployed." +} diff --git a/terraform/layer2-k8s/eks-aws-loadbalancer-controller.tf b/terraform/layer2-k8s/eks-aws-loadbalancer-controller.tf index e22ea51d..82fdf911 100644 --- a/terraform/layer2-k8s/eks-aws-loadbalancer-controller.tf +++ b/terraform/layer2-k8s/eks-aws-loadbalancer-controller.tf @@ -1,12 +1,15 @@ locals { - aws-load-balancer-controller = { - chart = local.helm_charts[index(local.helm_charts.*.id, "aws-load-balancer-controller")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "aws-load-balancer-controller")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "aws-load-balancer-controller")], "version", null) + aws_load_balancer_controller = { + name = local.helm_releases[index(local.helm_releases.*.id, "aws-load-balancer-controller")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "aws-load-balancer-controller")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "aws-load-balancer-controller")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "aws-load-balancer-controller")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "aws-load-balancer-controller")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "aws-load-balancer-controller")].namespace } alb_ingress_controller = templatefile("${path.module}/templates/alb-ingress-controller-values.yaml", { - role_arn = var.aws_loadbalancer_controller_enable ? module.aws_iam_aws_loadbalancer_controller[0].role_arn : "", + role_arn = local.aws_load_balancer_controller.enabled ? module.aws_iam_aws_loadbalancer_controller[0].role_arn : "", region = local.region, cluster_name = local.eks_cluster_id, vpc_id = local.vpc_id @@ -15,10 +18,10 @@ locals { #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "aws_load_balancer_controller_namespace" { - count = var.aws_loadbalancer_controller_enable ? 1 : 0 + count = local.aws_load_balancer_controller.enabled ? 1 : 0 source = "../modules/kubernetes-namespace" - name = "aws-load-balancer-controller" + name = local.aws_load_balancer_controller.namespace network_policies = [ { name = "default-deny" @@ -34,7 +37,7 @@ module "aws_load_balancer_controller_namespace" { { namespace_selector = { match_labels = { - name = "aws-load-balancer-controller" + name = local.aws_load_balancer_controller.namespace } } } @@ -48,7 +51,7 @@ module "aws_load_balancer_controller_namespace" { match_expressions = { key = "app.kubernetes.io/name" operator = "In" - values = ["aws-load-balancer-controller"] + values = [local.aws_load_balancer_controller.name] } } ingress = { @@ -89,10 +92,10 @@ module "aws_load_balancer_controller_namespace" { #tfsec:ignore:aws-iam-no-policy-wildcards module "aws_iam_aws_loadbalancer_controller" { - count = var.aws_loadbalancer_controller_enable ? 1 : 0 + count = local.aws_load_balancer_controller.enabled ? 1 : 0 source = "../modules/aws-iam-eks-trusted" - name = "${local.name}-alb-ingress" + name = "${local.name}-${local.aws_load_balancer_controller.name}" region = local.region oidc_provider_arn = local.eks_oidc_provider_arn policy = jsonencode({ @@ -305,12 +308,12 @@ module "aws_iam_aws_loadbalancer_controller" { } resource "helm_release" "aws_loadbalancer_controller" { - count = var.aws_loadbalancer_controller_enable ? 1 : 0 + count = local.aws_load_balancer_controller.enabled ? 1 : 0 - name = "aws-load-balancer-controller" - chart = local.aws-load-balancer-controller.chart - repository = local.aws-load-balancer-controller.repository - version = local.aws-load-balancer-controller.chart_version + name = local.aws_load_balancer_controller.name + chart = local.aws_load_balancer_controller.chart + repository = local.aws_load_balancer_controller.repository + version = local.aws_load_balancer_controller.chart_version namespace = module.aws_load_balancer_controller_namespace[count.index].name max_history = var.helm_release_history_size diff --git a/terraform/layer2-k8s/helm-charts.yaml b/terraform/layer2-k8s/helm-releases.yaml similarity index 70% rename from terraform/layer2-k8s/helm-charts.yaml rename to terraform/layer2-k8s/helm-releases.yaml index 73613dbe..bcccb672 100644 --- a/terraform/layer2-k8s/helm-charts.yaml +++ b/terraform/layer2-k8s/helm-releases.yaml @@ -1,85 +1,127 @@ -charts: +releases: - id: aws-load-balancer-controller + enabled: false chart: aws-load-balancer-controller repository: https://aws.github.io/eks-charts version: 1.2.6 + namespace: aws-load-balancer-controller - id: aws-node-termination-handler + enabled: true chart: aws-node-termination-handler repository: https://aws.github.io/eks-charts version: 0.13.3 + namespace: aws-node-termination-handler - id: aws-calico + enabled: true chart: aws-calico repository: https://aws.github.io/eks-charts version: 0.3.4 + namespace: kube-system - id: cert-manager + enabled: false chart: cert-manager repository: https://charts.jetstack.io version: 1.1.0 + namespace: cert-manager - id: cert-mananger-certificate + enabled: false chart: ../../helm-charts/certificate repository: version: - - id: cluster-autoscaler - chart: cluster-autoscaler - repository: https://kubernetes.github.io/autoscaler - version: 9.10.5 + namespace: cert-manager - id: cert-manager-cluster-issuer + enabled: false chart: ../../helm-charts/cluster-issuer repository: version: + namespace: cert-manager + - id: cluster-autoscaler + enabled: true + chart: cluster-autoscaler + repository: https://kubernetes.github.io/autoscaler + version: 9.10.5 + namespace: cluster-autoscaler - id: elk + enabled: false chart: ../../helm-charts/elk repository: version: + namespace: elk - id: external-dns + enabled: true chart: external-dns repository: https://kubernetes-sigs.github.io/external-dns version: 1.5.0 + namespace: external-dns - id: external-secrets + enabled: true chart: kubernetes-external-secrets repository: https://external-secrets.github.io/kubernetes-external-secrets version: 6.3.0 + namespace: external-secrets - id: gitlab-runner + enabled: false chart: gitlab-runner repository: https://charts.gitlab.io version: 0.26.0 + namespace: gitlab-runner - id: ingress-nginx + enabled: true chart: ingress-nginx repository: https://kubernetes.github.io/ingress-nginx version: 3.23.0 + namespace: ingress-nginx - id: istio-operator + enabled: false chart: ../../helm-charts/istio/istio-operator repository: version: + namespace: null - id: istio-operator-resources + enabled: false chart: ../../helm-charts/istio/istio-operator-resources - repository: + repository: version: + namespace: istio-system - id: istio-resources + enabled: false chart: ../../helm-charts/istio/istio-resources repository: version: + namespace: istio-system - id: keda + enabled: false chart: keda repository: https://kedacore.github.io/charts version: 2.4.0 - - id: kiali-server + namespace: keda + - id: kiali + enabled: false chart: kiali-server repository: https://kiali.org/helm-charts version: 1.36 + namespace: kiali - id: kube-prometheus-stack + enabled: true chart: kube-prometheus-stack repository: https://prometheus-community.github.io/helm-charts version: 13.12.0 + namespace: monitoring - id: loki-stack + enabled: true chart: loki-stack repository: https://grafana.github.io/helm-charts version: 2.3.1 + namespace: loki-stack - id: reloader + enabled: true chart: reloader repository: https://stakater.github.io/stakater-charts version: 0.0.81 + namespace: reloader - id: teamcity + enabled: false chart: ../../helm-charts/teamcity repository: version: + namespace: teamcity diff --git a/terraform/layer2-k8s/locals.tf b/terraform/layer2-k8s/locals.tf index a81e0f43..af9eb90e 100644 --- a/terraform/layer2-k8s/locals.tf +++ b/terraform/layer2-k8s/locals.tf @@ -15,5 +15,5 @@ locals { eks_cluster_id = data.terraform_remote_state.layer1-aws.outputs.eks_cluster_id eks_oidc_provider_arn = data.terraform_remote_state.layer1-aws.outputs.eks_oidc_provider_arn - helm_charts = yamldecode(file("${path.module}/helm-charts.yaml"))["charts"] + helm_releases = yamldecode(file("${path.module}/helm-releases.yaml"))["releases"] } From 86230c6fc4f21c32ee8b892233099f42ec4e1cf5 Mon Sep 17 00:00:00 2001 From: maxim Date: Wed, 10 Nov 2021 17:18:45 +0600 Subject: [PATCH 2/6] enh: add count for all default releases --- terraform/layer1-aws/variables.tf | 2 +- .../eks-aws-loadbalancer-controller.tf | 1 + .../eks-aws-node-termination-handler.tf | 30 ++++--- terraform/layer2-k8s/eks-calico.tf | 31 +++---- .../eks-cert-manager-certificate.tf | 24 +++--- .../eks-cert-manager-cluster-issuer.tf | 24 +++--- terraform/layer2-k8s/eks-cert-manager.tf | 38 +++++---- .../layer2-k8s/eks-cluster-autoscaler.tf | 34 +++++--- terraform/layer2-k8s/eks-external-dns.tf | 37 +++++---- terraform/layer2-k8s/eks-external-secrets.tf | 80 ++++++------------ terraform/layer2-k8s/eks-keda.tf | 22 +++-- .../layer2-k8s/eks-kube-prometheus-stack.tf | 43 ++++++---- terraform/layer2-k8s/eks-loki-stack.tf | 68 ++++++--------- .../eks-nginx-ingress-controller.tf | 38 +++++---- terraform/layer2-k8s/helm-releases.yaml | 8 +- .../templates/loki-stack-values.yaml | 82 ------------------- terraform/modules/aws-ec2-pritunl/efs.tf | 2 +- terraform/modules/aws-ec2-pritunl/main.tf | 2 +- terraform/modules/aws-ec2-pritunl/output.tf | 2 +- .../aws-ec2-pritunl/security_groups.tf | 6 +- 20 files changed, 250 insertions(+), 324 deletions(-) diff --git a/terraform/layer1-aws/variables.tf b/terraform/layer1-aws/variables.tf index 7603f367..d4efa8b8 100644 --- a/terraform/layer1-aws/variables.tf +++ b/terraform/layer1-aws/variables.tf @@ -246,7 +246,7 @@ variable "eks_cluster_encryption_config_enable" { description = "Enable or not encryption for k8s secrets with aws-kms" } -variable "pritunl_vpn_server" { +variable "pritunl_vpn_server_enable" { type = bool default = false description = "Indicates whether or not the Pritunl VPN server is deployed." diff --git a/terraform/layer2-k8s/eks-aws-loadbalancer-controller.tf b/terraform/layer2-k8s/eks-aws-loadbalancer-controller.tf index 82fdf911..63cca726 100644 --- a/terraform/layer2-k8s/eks-aws-loadbalancer-controller.tf +++ b/terraform/layer2-k8s/eks-aws-loadbalancer-controller.tf @@ -320,4 +320,5 @@ resource "helm_release" "aws_loadbalancer_controller" { values = [ local.alb_ingress_controller ] + } diff --git a/terraform/layer2-k8s/eks-aws-node-termination-handler.tf b/terraform/layer2-k8s/eks-aws-node-termination-handler.tf index 93955132..f1071249 100644 --- a/terraform/layer2-k8s/eks-aws-node-termination-handler.tf +++ b/terraform/layer2-k8s/eks-aws-node-termination-handler.tf @@ -1,15 +1,20 @@ locals { - aws-node-termination-handler = { - chart = local.helm_charts[index(local.helm_charts.*.id, "aws-node-termination-handler")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "aws-node-termination-handler")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "aws-node-termination-handler")], "version", null) + aws_node_termination_handler = { + name = local.helm_releases[index(local.helm_releases.*.id, "aws-node-termination-handler")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "aws-node-termination-handler")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "aws-node-termination-handler")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "aws-node-termination-handler")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "aws-node-termination-handler")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "aws-node-termination-handler")].namespace } } #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "aws_node_termination_handler_namespace" { + count = local.aws_node_termination_handler.enabled ? 1 : 0 + source = "../modules/kubernetes-namespace" - name = "aws-node-termination-handler" + name = local.aws_node_termination_handler.namespace network_policies = [ { name = "default-deny" @@ -25,7 +30,7 @@ module "aws_node_termination_handler_namespace" { { namespace_selector = { match_labels = { - name = "aws-node-termination-handler" + name = local.aws_node_termination_handler.namespace } } } @@ -53,12 +58,13 @@ module "aws_node_termination_handler_namespace" { } resource "helm_release" "aws_node_termination_handler" { - name = "aws-node-termination-handler" - chart = local.aws-node-termination-handler.chart - repository = local.aws-node-termination-handler.repository - version = local.aws-node-termination-handler.chart_version - namespace = module.aws_node_termination_handler_namespace.name - wait = false + count = local.aws_node_termination_handler.enabled ? 1 : 0 + + name = local.aws_node_termination_handler.name + chart = local.aws_node_termination_handler.chart + repository = local.aws_node_termination_handler.repository + version = local.aws_node_termination_handler_version + namespace = module.aws_node_termination_handler_namespace[count.index].name max_history = var.helm_release_history_size values = [ diff --git a/terraform/layer2-k8s/eks-calico.tf b/terraform/layer2-k8s/eks-calico.tf index ed231a40..ae9187f8 100644 --- a/terraform/layer2-k8s/eks-calico.tf +++ b/terraform/layer2-k8s/eks-calico.tf @@ -1,25 +1,26 @@ locals { - aws-calico = { - chart = local.helm_charts[index(local.helm_charts.*.id, "aws-calico")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "aws-calico")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "aws-calico")], "version", null) + aws_calico = { + name = local.helm_releases[index(local.helm_releases.*.id, "aws-calico")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "aws-calico")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "aws-calico")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "aws-calico")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "aws-calico")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "aws-calico")].namespace } } -data "template_file" "calico_daemonset" { - template = file("${path.module}/templates/calico-values.yaml") -} - resource "helm_release" "calico_daemonset" { - name = "aws-calico" - chart = local.aws-calico.chart - repository = local.aws-calico.repository - version = local.aws-calico.chart_version - namespace = "kube-system" + count = local.aws_calico ? 1 : 0 + + name = local.aws_calico.name + chart = local.aws_calico.chart + repository = local.aws_calico.repository + version = local.aws_calico.chart_version + namespace = local.aws_calico.namespace max_history = var.helm_release_history_size - wait = false values = [ - data.template_file.calico_daemonset.rendered, + file("${path.module}/templates/calico-values.yaml") ] + } diff --git a/terraform/layer2-k8s/eks-cert-manager-certificate.tf b/terraform/layer2-k8s/eks-cert-manager-certificate.tf index 61e8c812..38346906 100644 --- a/terraform/layer2-k8s/eks-cert-manager-certificate.tf +++ b/terraform/layer2-k8s/eks-cert-manager-certificate.tf @@ -1,8 +1,11 @@ locals { - cert-mananger-certificate = { - chart = local.helm_charts[index(local.helm_charts.*.id, "cert-mananger-certificate")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "cert-mananger-certificate")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "cert-mananger-certificate")], "version", null) + cert_mananger_certificate = { + name = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].namespace } } @@ -16,12 +19,13 @@ data "template_file" "certificate" { } resource "helm_release" "certificate" { - name = "certificate" - chart = local.cert-mananger-certificate.chart - repository = local.cert-mananger-certificate.repository - version = local.cert-mananger-certificate.chart_version - namespace = module.ingress_nginx_namespace.name - wait = false + count = local.cert_mananger_certificate.enabled ? 1 : 0 + + name = local.cert_mananger_certificate.name + chart = local.cert_mananger_certificate.chart + repository = local.cert_mananger_certificate.repository + version = local.cert_mananger_certificate_version + namespace = local.cert_mananger_certificate.namespace max_history = var.helm_release_history_size values = [ diff --git a/terraform/layer2-k8s/eks-cert-manager-cluster-issuer.tf b/terraform/layer2-k8s/eks-cert-manager-cluster-issuer.tf index eee45736..5377e55f 100644 --- a/terraform/layer2-k8s/eks-cert-manager-cluster-issuer.tf +++ b/terraform/layer2-k8s/eks-cert-manager-cluster-issuer.tf @@ -1,8 +1,11 @@ locals { - cert-manager-cluster-issuer = { - chart = local.helm_charts[index(local.helm_charts.*.id, "cert-manager-cluster-issuer")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "cert-manager-cluster-issuer")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "cert-manager-cluster-issuer")], "version", null) + cert_manager_cluster_issuer = { + name = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].namespace } } @@ -17,12 +20,13 @@ data "template_file" "cluster_issuer" { } resource "helm_release" "cluster_issuer" { - name = "cluster-issuer" - chart = local.cert-manager-cluster-issuer.chart - repository = local.cert-manager-cluster-issuer.repository - version = local.cert-manager-cluster-issuer.chart_version - namespace = module.certmanager_namespace.name - wait = false + count = local.cert_manager_cluster_issuer.enabled ? 1 : 0 + + name = local.cert_manager_cluster_issuer.name + chart = local.cert_manager_cluster_issuer.chart + repository = local.cert_manager_cluster_issuer.repository + version = local.cert_manager_cluster_issuer_version + namespace = local.cert_manager_cluster_issuer.namespace max_history = var.helm_release_history_size values = [ diff --git a/terraform/layer2-k8s/eks-cert-manager.tf b/terraform/layer2-k8s/eks-cert-manager.tf index e5680ad9..894e28eb 100644 --- a/terraform/layer2-k8s/eks-cert-manager.tf +++ b/terraform/layer2-k8s/eks-cert-manager.tf @@ -1,8 +1,11 @@ locals { - cert-manager = { - chart = local.helm_charts[index(local.helm_charts.*.id, "cert-manager")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "cert-manager")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "cert-manager")], "version", null) + cert_manager = { + name = local.helm_releases[index(local.helm_releases.*.id, "cert-manager")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "cert-manager")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "cert-manager")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "cert-manager")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "cert-manager")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "cert-manager")].namespace } } @@ -10,14 +13,16 @@ data "template_file" "cert_manager" { template = file("${path.module}/templates/cert-manager-values.yaml") vars = { - role_arn = module.aws_iam_cert_manager.role_arn + role_arn = local.cert_manager.enabled ? module.aws_iam_cert_manager[0].role_arn : "" } } #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "certmanager_namespace" { + count = local.cert_manager.enabled ? 1 : 0 + source = "../modules/kubernetes-namespace" - name = "certmanager" + name = local.cert_manager.namespace network_policies = [ { name = "default-deny" @@ -33,7 +38,7 @@ module "certmanager_namespace" { { namespace_selector = { match_labels = { - name = "certmanager" + name = local.cert_manager.namespace } } } @@ -88,9 +93,10 @@ module "certmanager_namespace" { #tfsec:ignore:aws-iam-no-policy-wildcards module "aws_iam_cert_manager" { - source = "../modules/aws-iam-eks-trusted" + count = local.cert_manager.enabled ? 1 : 0 - name = "${local.name}-certmanager" + source = "../modules/aws-iam-eks-trusted" + name = "${local.name}-${local.cert_manager.name}" region = local.region oidc_provider_arn = local.eks_oidc_provider_arn policy = jsonencode({ @@ -128,15 +134,17 @@ module "aws_iam_cert_manager" { } resource "helm_release" "cert_manager" { - name = "cert-manager" - chart = local.cert-manager.chart - repository = local.cert-manager.repository - version = local.cert-manager.chart_version - namespace = module.certmanager_namespace.name - wait = true + count = local.cert_manager.enabled ? 1 : 0 + + name = local.cert_manager.name + chart = local.cert_manager.chart + repository = local.cert_manager.repository + version = local.cert_manager.chart_version + namespace = module.certmanager_namespace[count.index].name max_history = var.helm_release_history_size values = [ data.template_file.cert_manager.rendered, ] + } diff --git a/terraform/layer2-k8s/eks-cluster-autoscaler.tf b/terraform/layer2-k8s/eks-cluster-autoscaler.tf index 15365f82..8cc5f6f0 100644 --- a/terraform/layer2-k8s/eks-cluster-autoscaler.tf +++ b/terraform/layer2-k8s/eks-cluster-autoscaler.tf @@ -1,8 +1,11 @@ locals { - cluster-autoscaler = { - chart = local.helm_charts[index(local.helm_charts.*.id, "cluster-autoscaler")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "cluster-autoscaler")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "cluster-autoscaler")], "version", null) + cluster_autoscaler = { + name = local.helm_releases[index(local.helm_releases.*.id, "cluster-autoscaler")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "cluster-autoscaler")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "cluster-autoscaler")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "cluster-autoscaler")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "cluster-autoscaler")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "cluster-autoscaler")].namespace } } @@ -10,7 +13,7 @@ data "template_file" "cluster_autoscaler" { template = file("${path.module}/templates/cluster-autoscaler-values.yaml") vars = { - role_arn = module.aws_iam_autoscaler.role_arn + role_arn = local.cluster_autoscaler.enabled ? module.aws_iam_autoscaler[0].role_arn : "" region = local.region cluster_name = local.eks_cluster_id version = var.cluster_autoscaler_version @@ -19,8 +22,10 @@ data "template_file" "cluster_autoscaler" { #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "cluster_autoscaler_namespace" { + count = local.cluster_autoscaler.enabled ? 1 : 0 + source = "../modules/kubernetes-namespace" - name = "cluster-autoscaler" + name = local.cluster_autoscaler.namespace network_policies = [ { name = "default-deny" @@ -36,7 +41,7 @@ module "cluster_autoscaler_namespace" { { namespace_selector = { match_labels = { - name = "cluster-autoscaler" + name = local.cluster_autoscaler.namespace } } } @@ -93,8 +98,9 @@ module "cluster_autoscaler_namespace" { #tfsec:ignore:aws-iam-no-policy-wildcards module "aws_iam_autoscaler" { - source = "../modules/aws-iam-eks-trusted" + count = local.cluster_autoscaler.enabled ? 1 : 0 + source = "../modules/aws-iam-eks-trusted" name = "${local.name}-autoscaler" region = local.region oidc_provider_arn = local.eks_oidc_provider_arn @@ -134,11 +140,13 @@ module "aws_iam_autoscaler" { } resource "helm_release" "cluster_autoscaler" { - name = "cluster-autoscaler" - chart = local.cluster-autoscaler.chart - repository = local.cluster-autoscaler.repository - version = local.cluster-autoscaler.chart_version - namespace = module.cluster_autoscaler_namespace.name + count = local.cluster_autoscaler.enabled ? 1 : 0 + + name = local.cluster_autoscaler.name + chart = local.cluster_autoscaler.chart + repository = local.cluster_autoscaler.repository + version = local.cluster_autoscaler_version + namespace = module.cluster_autoscaler_namespace[count.index].name max_history = var.helm_release_history_size values = [ diff --git a/terraform/layer2-k8s/eks-external-dns.tf b/terraform/layer2-k8s/eks-external-dns.tf index dfc6aa31..6db73842 100644 --- a/terraform/layer2-k8s/eks-external-dns.tf +++ b/terraform/layer2-k8s/eks-external-dns.tf @@ -1,8 +1,11 @@ locals { - external-dns = { - chart = local.helm_charts[index(local.helm_charts.*.id, "external-dns")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "external-dns")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "external-dns")], "version", null) + external_dns = { + name = local.helm_releases[index(local.helm_releases.*.id, "external-dns")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "external-dns")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "external-dns")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "external-dns")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "external-dns")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "external-dns")].namespace } } @@ -10,7 +13,7 @@ data "template_file" "external_dns" { template = file("${path.module}/templates/external-dns.yaml") vars = { - role_arn = module.aws_iam_external_dns.role_arn + role_arn = local.external_dns.enabled ? module.aws_iam_external_dns[0].role_arn : 0 domain_name = local.domain_name zone_type = "public" } @@ -18,8 +21,10 @@ data "template_file" "external_dns" { #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "external_dns_namespace" { + count = local.external_dns.enabled ? 1 : 0 + source = "../modules/kubernetes-namespace" - name = "external-dns" + name = local.external_dns.namespace network_policies = [ { name = "default-deny" @@ -35,7 +40,7 @@ module "external_dns_namespace" { { namespace_selector = { match_labels = { - name = "external-dns" + name = local.external_dns.namespace } } } @@ -64,9 +69,10 @@ module "external_dns_namespace" { #tfsec:ignore:aws-iam-no-policy-wildcards module "aws_iam_external_dns" { - source = "../modules/aws-iam-eks-trusted" + count = local.external_dns.enabled ? 1 : 0 - name = "${local.name}-external-dns" + source = "../modules/aws-iam-eks-trusted" + name = "${local.name}-${local.external_dns.name}" region = local.region oidc_provider_arn = local.eks_oidc_provider_arn policy = jsonencode({ @@ -104,14 +110,17 @@ module "aws_iam_external_dns" { } resource "helm_release" "external_dns" { - name = "external-dns" - chart = local.external-dns.chart - repository = local.external-dns.repository - version = local.external-dns.chart_version - namespace = module.external_dns_namespace.name + count = local.external_dns.enabled ? 1 : 0 + + name = local.external_dns.name + chart = local.external_dns.chart + repository = local.external_dns.repository + version = local.external_dns.chart_version + namespace = module.external_dns_namespace[count.index].name max_history = var.helm_release_history_size values = [ data.template_file.external_dns.rendered, ] + } diff --git a/terraform/layer2-k8s/eks-external-secrets.tf b/terraform/layer2-k8s/eks-external-secrets.tf index 07d5c4f0..10340ad5 100644 --- a/terraform/layer2-k8s/eks-external-secrets.tf +++ b/terraform/layer2-k8s/eks-external-secrets.tf @@ -1,13 +1,11 @@ locals { - external-secrets = { - chart = local.helm_charts[index(local.helm_charts.*.id, "external-secrets")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "external-secrets")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "external-secrets")], "version", null) - } - reloader = { - chart = local.helm_charts[index(local.helm_charts.*.id, "reloader")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "reloader")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "reloader")], "version", null) + external_secrets = { + name = local.helm_releases[index(local.helm_releases.*.id, "external-secrets")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "external-secrets")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "external-secrets")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "external-secrets")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "external-secrets")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "external-secrets")].namespace } } @@ -15,48 +13,21 @@ data "template_file" "external_secrets" { template = file("${path.module}/templates/external-secrets-values.yaml") vars = { - role_arn = module.aws_iam_external_secrets.role_arn + role_arn = local.external_secrets.enabled ? module.aws_iam_external_secrets[0].role_arn : "" region = local.region } } #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "external_secrets_namespace" { - source = "../modules/kubernetes-namespace" - name = "external-secrets" - network_policies = [ - { - name = "default-deny" - policy_types = ["Ingress"] - pod_selector = {} - }, - { - name = "allow-this-namespace" - policy_types = ["Ingress"] - pod_selector = {} - ingress = { - from = [ - { - namespace_selector = { - match_labels = { - name = "external-secrets" - } - } - } - ] - } - } - ] -} + count = local.external_secrets.enabled ? 1 : 0 -#tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress -module "reloader_namespace" { source = "../modules/kubernetes-namespace" - name = "reloader" + name = local.external_secrets.namespace network_policies = [ { name = "default-deny" - policy_types = ["Ingress", "Egress"] + policy_types = ["Ingress"] pod_selector = {} }, { @@ -68,7 +39,7 @@ module "reloader_namespace" { { namespace_selector = { match_labels = { - name = "reloader" + name = local.external_secrets.namespace } } } @@ -95,11 +66,13 @@ module "reloader_namespace" { ] } + #tfsec:ignore:aws-iam-no-policy-wildcards module "aws_iam_external_secrets" { - source = "../modules/aws-iam-eks-trusted" + count = local.external_secrets.enabled ? 1 : 0 - name = "${local.name}-ext-secrets" + source = "../modules/aws-iam-eks-trusted" + name = "${local.name}-${local.external_secrets.name}" region = local.region oidc_provider_arn = local.eks_oidc_provider_arn policy = jsonencode({ @@ -115,24 +88,17 @@ module "aws_iam_external_secrets" { } resource "helm_release" "external_secrets" { - name = "external-secrets" - chart = local.external-secrets.chart - repository = local.external-secrets.repository - version = local.external-secrets.chart_version - namespace = module.external_secrets_namespace.name + count = local.external_secrets.enabled ? 1 : 0 + + name = local.external_secrets.name + chart = local.external_secrets.chart + repository = local.external_secrets.repository + version = local.external_secrets.chart_version + namespace = module.external_secrets_namespace[count.index].name max_history = var.helm_release_history_size values = [ data.template_file.external_secrets.rendered, ] -} -resource "helm_release" "reloader" { - name = "reloader" - chart = local.reloader.chart - repository = local.reloader.repository - version = local.reloader.chart_version - namespace = module.reloader_namespace.name - wait = false - max_history = var.helm_release_history_size } diff --git a/terraform/layer2-k8s/eks-keda.tf b/terraform/layer2-k8s/eks-keda.tf index ddf99dbb..857c728f 100644 --- a/terraform/layer2-k8s/eks-keda.tf +++ b/terraform/layer2-k8s/eks-keda.tf @@ -1,15 +1,20 @@ locals { keda = { - chart = local.helm_charts[index(local.helm_charts.*.id, "keda")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "keda")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "keda")], "version", null) + name = local.helm_releases[index(local.helm_releases.*.id, "keda")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "keda")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "keda")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "keda")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "keda")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "keda")].namespace } } #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "keda_namespace" { + count = local.keda.enabled ? 1 : 0 + source = "../modules/kubernetes-namespace" - name = "keda" + name = local.keda.namespace network_policies = [ { name = "default-deny" @@ -25,7 +30,7 @@ module "keda_namespace" { { namespace_selector = { match_labels = { - name = "keda" + name = local.keda.namespace } } } @@ -53,11 +58,12 @@ module "keda_namespace" { } resource "helm_release" "kedacore" { - name = "keda" + count = local.keda.enabled ? 1 : 0 + + name = local.keda.name chart = local.keda.chart repository = local.keda.repository version = local.keda.chart_version - namespace = module.keda_namespace.name - wait = true + namespace = module.keda_namespace[count.index].name max_history = var.helm_release_history_size } diff --git a/terraform/layer2-k8s/eks-kube-prometheus-stack.tf b/terraform/layer2-k8s/eks-kube-prometheus-stack.tf index 742c7315..7862e33d 100644 --- a/terraform/layer2-k8s/eks-kube-prometheus-stack.tf +++ b/terraform/layer2-k8s/eks-kube-prometheus-stack.tf @@ -1,10 +1,13 @@ locals { - kube-prometheus-stack = { - chart = local.helm_charts[index(local.helm_charts.*.id, "kube-prometheus-stack")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "kube-prometheus-stack")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "kube-prometheus-stack")], "version", null) + kube_prometheus_stack = { + name = local.helm_releases[index(local.helm_releases.*.id, "kube-prometheus-stack")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "kube-prometheus-stack")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "kube-prometheus-stack")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "kube-prometheus-stack")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "kube-prometheus-stack")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "kube-prometheus-stack")].namespace } - grafana_password = random_string.grafana_password.result + grafana_password = local.kube_prometheus_stack.enabled ? random_string.grafana_password[0].result : "test123" grafana_domain_name = "grafana-${local.domain_suffix}" prometheus_domain_name = "prometheus-${local.domain_suffix}" alertmanager_domain_name = "alertmanager-${local.domain_suffix}" @@ -28,8 +31,10 @@ locals { #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "monitoring_namespace" { + count = local.kube_prometheus_stack.enabled ? 1 : 0 + source = "../modules/kubernetes-namespace" - name = "monitoring" + name = local.kube_prometheus_stack.namespace network_policies = [ { name = "default-deny" @@ -45,7 +50,7 @@ module "monitoring_namespace" { { namespace_selector = { match_labels = { - name = "monitoring" + name = local.kube_prometheus_stack.namespace } } } @@ -62,7 +67,7 @@ module "monitoring_namespace" { { namespace_selector = { match_labels = { - name = "ingress-nginx" + name = local.ingress_nginx.namespace } } } @@ -76,7 +81,7 @@ module "monitoring_namespace" { match_expressions = { key = "app.kubernetes.io/name" operator = "In" - values = ["kube-prometheus-stack-operator"] + values = ["${local.kube_prometheus_stack.name}-operator"] } } ingress = { @@ -116,9 +121,10 @@ module "monitoring_namespace" { } module "aws_iam_grafana" { - source = "../modules/aws-iam-eks-trusted" + count = local.kube_prometheus_stack.enabled ? 1 : 0 - name = "${local.name}-grafana" + source = "../modules/aws-iam-eks-trusted" + name = "${local.name}-${local.kube_prometheus_stack.name}-grafana" region = local.region oidc_provider_arn = local.eks_oidc_provider_arn policy = jsonencode({ @@ -155,22 +161,25 @@ module "aws_iam_grafana" { } resource "random_string" "grafana_password" { + count = local.kube_prometheus_stack.enabled ? 1 : 0 length = 20 special = true } resource "helm_release" "prometheus_operator" { - name = "kube-prometheus-stack" - chart = local.kube-prometheus-stack.chart - repository = local.kube-prometheus-stack.repository - version = local.kube-prometheus-stack.chart_version - namespace = module.monitoring_namespace.name - wait = false + count = local.kube_prometheus_stack.enabled ? 1 : 0 + + name = local.kube_prometheus_stack.name + chart = local.kube_prometheus_stack.chart + repository = local.kube_prometheus_stack.repository + version = local.kube_prometheus_stack_version + namespace = module.monitoring_namespace[count.index].name max_history = var.helm_release_history_size values = [ local.kube_prometheus_stack_template ] + } output "grafana_domain_name" { diff --git a/terraform/layer2-k8s/eks-loki-stack.tf b/terraform/layer2-k8s/eks-loki-stack.tf index 2a27e2c5..9a04ecdb 100644 --- a/terraform/layer2-k8s/eks-loki-stack.tf +++ b/terraform/layer2-k8s/eks-loki-stack.tf @@ -1,25 +1,20 @@ locals { - loki-stack = { + loki_stack = { + name = local.helm_charts[index(local.helm_charts.*.id, "loki-stack")].id + enabled = local.helm_charts[index(local.helm_charts.*.id, "loki-stack")].enabled chart = local.helm_charts[index(local.helm_charts.*.id, "loki-stack")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "loki-stack")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "loki-stack")], "version", null) + repository = local.helm_charts[index(local.helm_charts.*.id, "loki-stack")].repository + chart_version = local.helm_charts[index(local.helm_charts.*.id, "loki-stack")].version + namespace = local.helm_charts[index(local.helm_charts.*.id, "loki-stack")].namespace } - grafana_loki_password = random_string.grafana_loki_password.result - - loki_stack_template = templatefile("${path.module}/templates/loki-stack-values.yaml", - { - grafana_domain_name = "grafana-${local.domain_suffix}" - grafana_password = local.grafana_loki_password - gitlab_client_id = local.grafana_gitlab_client_id - gitlab_client_secret = local.grafana_gitlab_client_secret - gitlab_group = local.grafana_gitlab_group - }) } #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "loki_namespace" { + count = local.loki_stack.enabled ? 1 : 0 + source = "../modules/kubernetes-namespace" - name = "loki" + name = local.loki_stack.namespace network_policies = [ { name = "default-deny" @@ -35,24 +30,7 @@ module "loki_namespace" { { namespace_selector = { match_labels = { - name = "loki" - } - } - } - ] - } - }, - { - name = "allow-ingress" - policy_types = ["Ingress"] - pod_selector = {} - ingress = { - - from = [ - { - namespace_selector = { - match_labels = { - name = "ingress-nginx" + name = local.loki_stack.namespace } } } @@ -66,7 +44,7 @@ module "loki_namespace" { match_expressions = { key = "release" operator = "In" - values = ["loki-stack"] + values = [local.loki_stack.name] } } ingress = { @@ -74,6 +52,10 @@ module "loki_namespace" { { port = "http-metrics" protocol = "TCP" + }, + { + port = "3100" + protocol = "TCP" } ] from = [ @@ -107,22 +89,18 @@ module "loki_namespace" { ] } -resource "random_string" "grafana_loki_password" { - length = 20 - special = true -} - resource "helm_release" "loki_stack" { - name = "loki-stack" - chart = local.loki-stack.chart - repository = local.loki-stack.repository - version = local.loki-stack.chart_version - namespace = module.loki_namespace.name - wait = false + count = local.loki_stack.enabled ? 1 : 0 + + name = local.loki_stack.name + chart = local.loki_stack.chart + repository = local.loki_stack.repository + version = local.loki_stack_version + namespace = module.loki_namespace[count.index].name max_history = var.helm_release_history_size values = [ - local.loki_stack_template + file("${path.module}/templates/loki-stack-values.yaml") ] depends_on = [helm_release.prometheus_operator] diff --git a/terraform/layer2-k8s/eks-nginx-ingress-controller.tf b/terraform/layer2-k8s/eks-nginx-ingress-controller.tf index db537872..04222d82 100644 --- a/terraform/layer2-k8s/eks-nginx-ingress-controller.tf +++ b/terraform/layer2-k8s/eks-nginx-ingress-controller.tf @@ -1,8 +1,11 @@ locals { - ingress-nginx = { - chart = local.helm_charts[index(local.helm_charts.*.id, "ingress-nginx")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "ingress-nginx")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "ingress-nginx")], "version", null) + ingress_nginx = { + name = local.helm_releases[index(local.helm_releases.*.id, "ingress-nginx")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "ingress-nginx")].name + chart = local.helm_releases[index(local.helm_releases.*.id, "ingress-nginx")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "ingress-nginx")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "ingress-nginx")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "ingress-nginx")].namespace } ssl_certificate_arn = var.nginx_ingress_ssl_terminator == "lb" ? data.terraform_remote_state.layer1-aws.outputs.ssl_certificate_arn : "" @@ -19,14 +22,16 @@ data "template_file" "nginx_ingress" { hostname = local.domain_name ssl_cert = local.ssl_certificate_arn proxy_real_ip_cidr = local.vpc_cidr - namespace = module.ingress_nginx_namespace.name + namespace = local.ingress_nginx.enabled ? module.ingress_nginx_namespace[0].name : "default" } } #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "ingress_nginx_namespace" { + count = local.ingress_nginx.enabled ? 1 : 0 + source = "../modules/kubernetes-namespace" - name = "ingress-nginx" + name = local.ingress_nginx.namespace network_policies = [ { name = "default-deny" @@ -42,7 +47,7 @@ module "ingress_nginx_namespace" { { namespace_selector = { match_labels = { - name = "ingress-nginx" + name = local.ingress_nginx.namespace } } } @@ -56,7 +61,7 @@ module "ingress_nginx_namespace" { match_expressions = { key = "app.kubernetes.io/name" operator = "In" - values = ["ingress-nginx"] + values = [local.ingress_nginx.name] } } ingress = { @@ -86,7 +91,7 @@ module "ingress_nginx_namespace" { match_expressions = { key = "app.kubernetes.io/name" operator = "In" - values = ["ingress-nginx"] + values = [local.ingress_nginx.name] } } ingress = { @@ -112,7 +117,7 @@ module "ingress_nginx_namespace" { match_expressions = { key = "app.kubernetes.io/name" operator = "In" - values = ["ingress-nginx"] + values = [local.ingress_nginx.name] } } ingress = { @@ -154,12 +159,13 @@ module "ingress_nginx_namespace" { } resource "helm_release" "ingress_nginx" { - name = "ingress-nginx" - chart = local.ingress-nginx.chart - repository = local.ingress-nginx.repository - version = local.ingress-nginx.chart_version - namespace = module.ingress_nginx_namespace.name - wait = false + count = local.ingress_nginx.enabled ? 1 : 0 + + name = local.ingress_nginx.name + chart = local.ingress_nginx.chart + repository = local.ingress_nginx.repository + version = local.ingress_nginx.chart_version + namespace = module.ingress_nginx_namespace[count.index].name max_history = var.helm_release_history_size values = [ diff --git a/terraform/layer2-k8s/helm-releases.yaml b/terraform/layer2-k8s/helm-releases.yaml index bcccb672..f200cc6d 100644 --- a/terraform/layer2-k8s/helm-releases.yaml +++ b/terraform/layer2-k8s/helm-releases.yaml @@ -22,19 +22,19 @@ releases: chart: cert-manager repository: https://charts.jetstack.io version: 1.1.0 - namespace: cert-manager + namespace: certmanager - id: cert-mananger-certificate enabled: false chart: ../../helm-charts/certificate repository: version: - namespace: cert-manager + namespace: ingress-nginx - id: cert-manager-cluster-issuer enabled: false chart: ../../helm-charts/cluster-issuer repository: version: - namespace: cert-manager + namespace: certmanager - id: cluster-autoscaler enabled: true chart: cluster-autoscaler @@ -112,7 +112,7 @@ releases: chart: loki-stack repository: https://grafana.github.io/helm-charts version: 2.3.1 - namespace: loki-stack + namespace: loki - id: reloader enabled: true chart: reloader diff --git a/terraform/layer2-k8s/templates/loki-stack-values.yaml b/terraform/layer2-k8s/templates/loki-stack-values.yaml index c5ac3197..6c471608 100644 --- a/terraform/layer2-k8s/templates/loki-stack-values.yaml +++ b/terraform/layer2-k8s/templates/loki-stack-values.yaml @@ -37,85 +37,3 @@ fluent-bit: grafana: enabled: false - sidecar: - datasources: - enabled: true - image: - tag: 7.2.1 - - ingress: - enabled: true - annotations: - kubernetes.io/ingress.class: nginx - nginx.ingress.kubernetes.io/force-ssl-redirect: "true" - path: / - hosts: - - ${grafana_domain_name} - tls: - - hosts: - - ${grafana_domain_name} - - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: eks.amazonaws.com/capacityType - operator: In - values: - - SPOT - - persistence: - type: pvc - enabled: true - # storageClassName: default - accessModes: - - ReadWriteOnce - size: 10Gi - - adminUser: admin - adminPassword: "${grafana_password}" - - env: - # all values must be quoted - GF_SERVER_ROOT_URL: "https://${grafana_domain_name}" - GF_USERS_ALLOW_SIGN_UP: "false" - GF_AUTH_GITLAB_ENABLED: "true" - GF_AUTH_GITLAB_ALLOW_SIGN_UP: "true" - GF_AUTH_GITLAB_CLIENT_ID: "${gitlab_client_id}" - GF_AUTH_GITLAB_CLIENT_SECRET: "${gitlab_client_secret}" - GF_AUTH_GITLAB_SCOPES: "api" - GF_AUTH_GITLAB_AUTH_URL: "https://gitlab.com/oauth/authorize" - GF_AUTH_GITLAB_TOKEN_URL: "https://gitlab.com/oauth/token" - GF_AUTH_GITLAB_API_URL: "https://gitlab.com/api/v4" - GF_AUTH_GITLAB_ALLOWED_GROUPS: "${gitlab_group}" - - datasources: - datasources.yaml: - apiVersion: 1 - datasources: - - name: Loki - type: loki - url: http://loki-stack:3100 - jsonData: - maxLines: 1000 - - dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'logs' - orgId: 1 - folder: 'logs' - type: file - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards/logs - - dashboards: - logs: - logs: - ## Dashboard for quick search application logs for loki with two datasources loki and prometheus - https://grafana.com/grafana/dashboards/12019 - url: https://s3.amazonaws.com/grafana-dashboards.maddevs.org/common/aws-eks-base/loki-dashboard-quick-search.json - diff --git a/terraform/modules/aws-ec2-pritunl/efs.tf b/terraform/modules/aws-ec2-pritunl/efs.tf index 3f0a9f87..bc2e0de3 100644 --- a/terraform/modules/aws-ec2-pritunl/efs.tf +++ b/terraform/modules/aws-ec2-pritunl/efs.tf @@ -21,6 +21,6 @@ resource "aws_efs_mount_target" "this" { file_system_id = aws_efs_file_system.this.id subnet_id = var.public_subnets[count.index] security_groups = [ - module.efs_sg.this_security_group_id + module.efs_sg.security_group_id ] } diff --git a/terraform/modules/aws-ec2-pritunl/main.tf b/terraform/modules/aws-ec2-pritunl/main.tf index 164b231c..1326e105 100644 --- a/terraform/modules/aws-ec2-pritunl/main.tf +++ b/terraform/modules/aws-ec2-pritunl/main.tf @@ -12,7 +12,7 @@ resource "aws_launch_template" "this" { image_id = data.aws_ami.amazon_linux_2.id instance_type = var.instance_type ebs_optimized = false - vpc_security_group_ids = [module.ec2_sg.this_security_group_id] + vpc_security_group_ids = [module.ec2_sg.security_group_id] iam_instance_profile { arn = aws_iam_instance_profile.this_instance_profile.arn diff --git a/terraform/modules/aws-ec2-pritunl/output.tf b/terraform/modules/aws-ec2-pritunl/output.tf index 388c9ad2..37073e80 100644 --- a/terraform/modules/aws-ec2-pritunl/output.tf +++ b/terraform/modules/aws-ec2-pritunl/output.tf @@ -2,5 +2,5 @@ output "pritunl_endpoint" { value = aws_eip.this.id } output "pritunl_security_group" { - value = module.ec2_sg.this_security_group_id + value = module.ec2_sg.security_group_id } diff --git a/terraform/modules/aws-ec2-pritunl/security_groups.tf b/terraform/modules/aws-ec2-pritunl/security_groups.tf index bb37d482..0ecaf7c1 100644 --- a/terraform/modules/aws-ec2-pritunl/security_groups.tf +++ b/terraform/modules/aws-ec2-pritunl/security_groups.tf @@ -1,5 +1,6 @@ module "ec2_sg" { - source = "terraform-aws-modules/security-group/aws" + source = "terraform-aws-modules/security-group/aws" + version = "4.4.0" name = var.name description = "${var.name} security group" @@ -20,7 +21,8 @@ module "ec2_sg" { } module "efs_sg" { - source = "terraform-aws-modules/security-group/aws" + source = "terraform-aws-modules/security-group/aws" + version = "4.4.0" name = "${var.name}-efs" description = "${var.name} efs security group" From bc3411bf260fe7a7f497f7eeb5a52f3f278012b9 Mon Sep 17 00:00:00 2001 From: maxim Date: Fri, 12 Nov 2021 14:48:09 +0600 Subject: [PATCH 3/6] feature: introduce a new k8s storageclass and switch all PVC to it. --- terraform/layer2-k8s/eks-storageclass.tf | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 terraform/layer2-k8s/eks-storageclass.tf diff --git a/terraform/layer2-k8s/eks-storageclass.tf b/terraform/layer2-k8s/eks-storageclass.tf new file mode 100644 index 00000000..3c3b9c3a --- /dev/null +++ b/terraform/layer2-k8s/eks-storageclass.tf @@ -0,0 +1,14 @@ +resource "kubernetes_storage_class" "advanced" { + metadata { + name = "advanced" + } + storage_provisioner = "kubernetes.io/aws-ebs" + reclaim_policy = "Retain" + allow_volume_expansion = true + volume_binding_mode = "WaitForFirstConsumer" + parameters = { + type = "gp2" + fsType = "ext4" + encrypted = "true" # It is set to true for cases when global EBS encryption is disabled. + } +} From 5a52034f828959da2afcccb0f5cb859f09416d64 Mon Sep 17 00:00:00 2001 From: maxim Date: Fri, 12 Nov 2021 14:51:45 +0600 Subject: [PATCH 4/6] fix helm_releases installation. add count to all releases --- README.md | 179 ++++++++-------- terraform/layer1-aws/aws-ec2-pritunl.tf | 3 +- terraform/layer2-k8s/.terraform.lock.hcl | 18 ++ terraform/layer2-k8s/README.md | 201 +++++++++++++++--- .../eks-aws-loadbalancer-controller.tf | 2 +- .../eks-aws-node-termination-handler.tf | 2 +- terraform/layer2-k8s/eks-calico.tf | 2 +- .../eks-cert-manager-certificate.tf | 37 ---- .../eks-cert-manager-cluster-issuer.tf | 38 ---- terraform/layer2-k8s/eks-cert-manager.tf | 71 +++++++ .../layer2-k8s/eks-cluster-autoscaler.tf | 2 +- .../layer2-k8s/{examples => }/eks-elk.tf | 110 +++++----- .../{examples => }/eks-gitlab-runner.tf | 65 +++--- terraform/layer2-k8s/eks-istio.tf | 122 +++++++++++ .../layer2-k8s/eks-kube-prometheus-stack.tf | 8 +- terraform/layer2-k8s/eks-loki-stack.tf | 14 +- .../eks-nginx-ingress-controller.tf | 6 +- terraform/layer2-k8s/eks-reloader.tf | 69 ++++++ terraform/layer2-k8s/examples/eks-istio.tf | 98 --------- terraform/layer2-k8s/examples/eks-teamcity.tf | 108 +++++----- terraform/layer2-k8s/helm-releases.yaml | 2 +- .../layer2-k8s/templates/elk-values.yaml | 14 +- .../istio/istio-resources-values.yaml | 9 + .../templates/loki-stack-values.yaml | 1 + .../templates/prometheus-values.yaml | 6 +- .../layer2-k8s/templates/teamcity-values.yaml | 2 +- .../aws-ec2-pritunl/security_groups.tf | 2 +- 27 files changed, 725 insertions(+), 466 deletions(-) delete mode 100644 terraform/layer2-k8s/eks-cert-manager-certificate.tf delete mode 100644 terraform/layer2-k8s/eks-cert-manager-cluster-issuer.tf rename terraform/layer2-k8s/{examples => }/eks-elk.tf (64%) rename terraform/layer2-k8s/{examples => }/eks-gitlab-runner.tf (63%) create mode 100644 terraform/layer2-k8s/eks-istio.tf create mode 100644 terraform/layer2-k8s/eks-reloader.tf delete mode 100644 terraform/layer2-k8s/examples/eks-istio.tf diff --git a/README.md b/README.md index fe9d995a..f6d1862c 100644 --- a/README.md +++ b/README.md @@ -72,8 +72,8 @@ You can find more about this project in Anton Babenko stream: - [Destroy infrastructure by `terragrunt`](#destroy-infrastructure-by-terragrunt) - [What to do after deployment](#what-to-do-after-deployment) - [Update terraform version](#update-terraform-version) - - [Updated terraform providers](#updated-terraform-providers) - - [examples](#examples) + - [Update terraform providers](#update-terraform-providers) + - [Additional components](#additional-components) - [TFSEC](#tfsec) - [Contributing](#contributing) @@ -453,18 +453,11 @@ Or in each layer run command: terragrunt init -upgrade ``` -### examples +### Additional components -Each layer has an `examples/` directory that contains working examples that expand the basic configuration. The files’ names and contents are in accordance with our coding conventions, so no additional description is required. If you need to use something, just move it from this folder to the root of the layer. - -This will allow you to expand your basic functionality by launching a monitoring system based on ELK or Prometheus Stack, etc. - - -* If you want to deploy **`ELK stack`**, move `layer1-aws/examples/aws-s3-bucket-elastic-stack.tf` and `layer2-k8s/examples/eks-elk.tf` to the root of the layers. -* If you want to deploy **`Pritunl VPN`** server just move `layer1-aws/examples/aws-ec2-pritunl.tf` to the root of the layer. -* If you want to deploy **`Gitlab runner`** that runs workers as k8s pods (in EKS cluster), move `layer1-aws/examples/aws-s3-bucket-gitlab-runner-cache.tf` and `layer2-k8s/examples/eks-gitlab-runner.tf` to the root of the layers. -* If you want to deploy `Istio Operator` move `layer2-k8s/examples/eks-istio.tf` to the root of the layer. -* If you want to deploy `Teamcity` move `layer2-k8s/examples/eks-teamcity.tf` to the root of the layer. +This boiler instalss all basic and necessary components. However, we also provide several additional components. Both layers have such components. To enable them in: +* layer1-aws: search `***_enable` variables and set them to **true** +* layer2-k8s: check `helm-releases.yaml` file and set **enabled: true** or **enabled:false** for components that you want to **deploy** or to **unistall** ## TFSEC @@ -478,95 +471,95 @@ We use GitHub Actions and [tfsec](https://github.com/aquasecurity/tfsec) to chec | layer1-aws/aws-eks.tf | aws-eks-no-public-cluster-access | Resource 'module.eks:aws_eks_cluster.this[0]' has public access is explicitly set to enabled | By default we create public accessible EKS cluster from anywhere | | layer1-aws/aws-eks.tf | aws-eks-no-public-cluster-access-to-cidr | Resource 'module.eks:aws_eks_cluster.this[0]' has public access cidr explicitly set to wide open | By default we create public accessible EKS cluster from anywhere | | layer1-aws/aws-eks.tf | aws-vpc-no-public-egress-sgr | Resource 'module.eks:aws_security_group_rule.workers_egress_internet[0]' defines a fully open egress security group rule | We use recommended option. [More info](https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html) | -| modules/aws-ec2-pritunl/security_groups.tf | aws-vpc-no-public-ingress-sgr | Resource 'module.pritunl[0]:module.ec2_sg:aws_security_group_rule.ingress_with_cidr_blocks[1]' defines a fully open ingress security group rule. | This is a VPN server and it need to have egress traffic to anywhere by default | -| modules/aws-ec2-pritunl/security_groups.tf | aws-vpc-no-public-ingress-sgr | Resource 'module.pritunl[0]:module.ec2_sg:aws_security_group_rule.ingress_with_cidr_blocks[1]' defines a fully open ingress security group rule. | This is a VPN server and by default it needs to have ingress traffic from anywhere | -| modules/aws-iam-eks-trusted/main.tf | aws-iam-no-policy-wildcards | Resource 'module.aws_iam_external_secrets:aws_iam_role_policy.this' defines a policy with wildcarded resources. | We use this policy for external-secrets and grant it access to all secrets. | -| modules/aws-iam-eks-trusted/main.tf | aws-iam-no-policy-wildcards | Resource 'module.aws_iam_autoscaler:aws_iam_role_policy.this' defines a policy with wildcarded resources | We use condition to allow run actions only for certain autoscaling groups | +| modules/aws-ec2-pritunl/security_groups.tf | aws-vpc-no-public-egress-sgr | Resource 'module.pritunl[0]:module.ec2_sg:aws_security_group_rule.egress_with_cidr_blocks[0]' defines a fully open egress security group rule. | This is a VPN server and it need to have egress traffic to anywhere by default | +| modules/aws-ec2-pritunl/security_groups.tf | aws-vpc-no-public-egress-sgr | Resource 'module.pritunl[0]:module.ec2_sg:aws_security_group_rule.ingress_with_cidr_blocks[1]' defines a fully open ingress security group rule. | This is a VPN server and by default it needs to have ingress traffic from anywhere | +| modules/aws-iam-eks-trusted/main.tf | aws-iam-no-policy-wildcards | Resource 'module.aws_iam_external_secrets[0]:aws_iam_role_policy.this' defines a policy with wildcarded resources. | We use this policy for external-secrets and grant it access to all secrets. | +| modules/aws-iam-eks-trusted/main.tf | aws-iam-no-policy-wildcards | Resource 'module.aws_iam_autoscaler[0]:aws_iam_role_policy.this' defines a policy with wildcarded resources | We use condition to allow run actions only for certain autoscaling groups | | modules/aws-iam-eks-trusted/main.tf | aws-iam-no-policy-wildcards | Resource 'module.eks_alb_ingress[0]:module.aws_iam_aws_loadbalancer_controller:aws_iam_role_policy.this' defines a policy with wildcarded resources | We use recommended [policy](https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/main/docs/install/iam_policy.json) | | layer2-k8s/locals.tf | general-secrets-sensitive-in-local | Local 'locals.' includes a potentially sensitive value which is defined within the project | tfsec complains on helm_repo_external_secrets url because it contains the word *secret* | -| modules/aws-iam-eks-trusted/main.tf | aws-iam-no-policy-wildcards | Resource 'module.aws_iam_external_dns:aws_iam_role_policy.this' defines a policy with wildcarded resources | We use the policy from the [documentation](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md#iam-policy) | -| modules/aws-iam-eks-trusted/main.tf | aws-iam-no-policy-wildcards | Resource 'module.aws_iam_cert_manager:aws_iam_role_policy.this' defines a policy with wildcarded resources | Certmanager uses Route53 to create DNS records and validate wildcard certificates. By default we allow it to manage all zones | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.monitoring_namespace:kubernetes_network_policy.this[3]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.ingress_nginx_namespace:kubernetes_network_policy.this[5]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.ingress_nginx_namespace:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.external_dns_namespace:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.aws_node_termination_handler_namespace:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.reloader_namespace:kubernetes_network_policy.this[2]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.loki_namespace:kubernetes_network_policy.this[4]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.monitoring_namespace:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.external_secrets_namespace:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.external_secrets_namespace:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.cluster_autoscaler_namespace:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.certmanager_namespace:kubernetes_network_policy.this[3]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.loki_namespace:kubernetes_network_policy.this[3]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.cluster_autoscaler_namespace:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.certmanager_namespace:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.aws_node_termination_handler_namespace:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.ingress_nginx_namespace:kubernetes_network_policy.this[4]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.certmanager_namespace:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.reloader_namespace:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.cluster_autoscaler_namespace:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.certmanager_namespace:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.aws_node_termination_handler_namespace:kubernetes_network_policy.this[2]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.reloader_namespace:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.monitoring_namespace:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.loki_namespace:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.loki_namespace:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.external_dns_namespace:kubernetes_network_policy.this[2]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.monitoring_namespace:kubernetes_network_policy.this[4]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.loki_namespace:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.ingress_nginx_namespace:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.cluster_autoscaler_namespace:kubernetes_network_policy.this[3]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.ingress_nginx_namespace:kubernetes_network_policy.this[3]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.ingress_nginx_namespace:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.external_dns_namespace:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.monitoring_namespace:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.gitlab_runner_namespace:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.elk_namespace:kubernetes_network_policy.this[3]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.elk_namespace:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.gitlab_runner_namespace:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.gitlab_runner_namespace:kubernetes_network_policy.this[2]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.gitlab_runner_namespace:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.elk_namespace:kubernetes_network_policy.this[4]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.elk_namespace:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.elk_namespace:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/aws-iam-eks-trusted/main.tf | aws-iam-no-policy-wildcards | Resource 'module.aws_iam_external_dns[0]:aws_iam_role_policy.this' defines a policy with wildcarded resources | We use the policy from the [documentation](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md#iam-policy) | +| modules/aws-iam-eks-trusted/main.tf | aws-iam-no-policy-wildcards | Resource 'module.aws_iam_cert_manager[0]:aws_iam_role_policy.this' defines a policy with wildcarded resources | Certmanager uses Route53 to create DNS records and validate wildcard certificates. By default we allow it to manage all zones | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.monitoring_namespace[0]:kubernetes_network_policy.this[3]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.ingress_nginx_namespace[0]:kubernetes_network_policy.this[5]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.ingress_nginx_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.external_dns_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.aws_node_termination_handler_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.reloader_namespace[0]:kubernetes_network_policy.this[2]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.loki_namespace[0]:kubernetes_network_policy.this[4]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.monitoring_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.external_secrets_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.external_secrets_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.cluster_autoscaler_namespace[0]:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.certmanager_namespace[0]:kubernetes_network_policy.this[3]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.loki_namespace[0]:kubernetes_network_policy.this[3]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.cluster_autoscaler_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.certmanager_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.aws_node_termination_handler_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.ingress_nginx_namespace[0]:kubernetes_network_policy.this[4]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.certmanager_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.reloader_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.cluster_autoscaler_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.certmanager_namespace[0]:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.aws_node_termination_handler_namespace[0]:kubernetes_network_policy.this[2]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.reloader_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.monitoring_namespace[0]:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.loki_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.loki_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.external_dns_namespace[0]:kubernetes_network_policy.this[2]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.monitoring_namespace[0]:kubernetes_network_policy.this[4]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.loki_namespace[0]:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.ingress_nginx_namespace[0]:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.cluster_autoscaler_namespace[0]:kubernetes_network_policy.this[3]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.ingress_nginx_namespace[0]:kubernetes_network_policy.this[3]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.ingress_nginx_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.external_dns_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.monitoring_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.gitlab_runner_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.elk_namespace[0]:kubernetes_network_policy.this[3]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.elk_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.gitlab_runner_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.gitlab_runner_namespace[0]:kubernetes_network_policy.this[2]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.gitlab_runner_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.elk_namespace[0]:kubernetes_network_policy.this[4]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.elk_namespace[0]:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.elk_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | | modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.aws_load_balancer_controller_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | | modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.aws_load_balancer_controller_namespace[0]:kubernetes_network_policy.this[3]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | | modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.aws_load_balancer_controller_namespace[0]:kubernetes_network_policy.this[2]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | | modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.aws_load_balancer_controller_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.keda_namespace:kubernetes_network_policy.this[2]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.keda_namespace:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.keda_namespace:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.reloader_namespace:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.certmanager_namespace:kubernetes_network_policy.this[3]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.cluster_autoscaler_namespace:kubernetes_network_policy.this[3]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.external_dns_namespace:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.monitoring_namespace:kubernetes_network_policy.this[3]' allows ingress traffic from the internet | We allow traffic from 0.0.0.0/0 to trigger webhooks only on certain port and certain pods | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.monitoring_namespace:kubernetes_network_policy.this[4]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.loki_namespace:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.external_secrets_namespace:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.certmanager_namespace:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.ingress_nginx_namespace:kubernetes_network_policy.this[5]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.loki_namespace:kubernetes_network_policy.this[4]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.aws_node_termination_handler_namespace:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.reloader_namespace:kubernetes_network_policy.this[2]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.ingress_nginx_namespace:kubernetes_network_policy.this[2]' allows ingress traffic from the internet | We allow traffic from 0.0.0.0/0 to trigger webhooks only on certain port and certain pods | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.cluster_autoscaler_namespace:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.certmanager_namespace:kubernetes_network_policy.this[2]' allows ingress traffic from the internet | We allow traffic from 0.0.0.0/0 to trigger webhooks only on certain port and certain pods | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.aws_node_termination_handler_namespace:kubernetes_network_policy.this[2]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.monitoring_namespace:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.ingress_nginx_namespace:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.external_dns_namespace:kubernetes_network_policy.this[2]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.ingress_nginx_namespace:kubernetes_network_policy.this[3]' allows ingress traffic from the internet | We allow traffic from 0.0.0.0/0 to trigger webhooks only on certain port and certain pods | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.elk_namespace:kubernetes_network_policy.this[4]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.elk_namespace:kubernetes_network_policy.this[3]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.gitlab_runner_namespace:kubernetes_network_policy.this[2]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.elk_namespace:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.keda_namespace[0]:kubernetes_network_policy.this[2]' allows egress traffic to the internet | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.keda_namespace[0]:kubernetes_network_policy.this[1]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-egress | Resource 'module.keda_namespace[0]:kubernetes_network_policy.this[0]' allows all egress traffic by default | We don't want to deny egress traffic in a default installation | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.reloader_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.certmanager_namespace[0]:kubernetes_network_policy.this[3]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.cluster_autoscaler_namespace[0]:kubernetes_network_policy.this[3]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.external_dns_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.monitoring_namespace[0]:kubernetes_network_policy.this[3]' allows ingress traffic from the internet | We allow traffic from 0.0.0.0/0 to trigger webhooks only on certain port and certain pods | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.monitoring_namespace[0]:kubernetes_network_policy.this[4]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.loki_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.external_secrets_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.certmanager_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.ingress_nginx_namespace[0]:kubernetes_network_policy.this[5]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.loki_namespace[0]:kubernetes_network_policy.this[4]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.aws_node_termination_handler_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.reloader_namespace[0]:kubernetes_network_policy.this[2]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.ingress_nginx_namespace[0]:kubernetes_network_policy.this[2]' allows ingress traffic from the internet | We allow traffic from 0.0.0.0/0 to trigger webhooks only on certain port and certain pods | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.cluster_autoscaler_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.certmanager_namespace[0]:kubernetes_network_policy.this[2]' allows ingress traffic from the internet | We allow traffic from 0.0.0.0/0 to trigger webhooks only on certain port and certain pods | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.aws_node_termination_handler_namespace[0]:kubernetes_network_policy.this[2]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.monitoring_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.ingress_nginx_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.external_dns_namespace[0]:kubernetes_network_policy.this[2]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.ingress_nginx_namespace[0]:kubernetes_network_policy.this[3]' allows ingress traffic from the internet | We allow traffic from 0.0.0.0/0 to trigger webhooks only on certain port and certain pods | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.elk_namespace[0]:kubernetes_network_policy.this[4]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.elk_namespace[0]:kubernetes_network_policy.this[3]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.gitlab_runner_namespace[0]:kubernetes_network_policy.this[2]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.elk_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | | modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.aws_load_balancer_controller_namespace[0]:kubernetes_network_policy.this[3]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | | modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.aws_load_balancer_controller_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | | modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.aws_load_balancer_controller_namespace[0]:kubernetes_network_policy.this[2]' allows ingress traffic from the internet | We allow traffic from 0.0.0.0/0 to trigger webhooks only on certain port and certain pods | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.keda_namespace:kubernetes_network_policy.this[2]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | -| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.keda_namespace:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.keda_namespace[0]:kubernetes_network_policy.this[2]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | +| modules/kubernetes-namespace/network-policy.tf | kubernetes-network-no-public-ingress | Resource 'module.keda_namespace[0]:kubernetes_network_policy.this[0]' allows all ingress traffic by default | We deny all ingress trafic by default, but tfsec doesn't work as expected (bug) | ## Contributing diff --git a/terraform/layer1-aws/aws-ec2-pritunl.tf b/terraform/layer1-aws/aws-ec2-pritunl.tf index 614d6586..e18c44bc 100644 --- a/terraform/layer1-aws/aws-ec2-pritunl.tf +++ b/terraform/layer1-aws/aws-ec2-pritunl.tf @@ -1,5 +1,6 @@ +#tfsec:ignore:aws-vpc-no-public-egress-sgr tfsec:ignore:aws-vpc-no-public-ingress-sgr module "pritunl" { - count = var.pritunl_vpn_server ? 1 : 0 + count = var.pritunl_vpn_server_enable ? 1 : 0 source = "../modules/aws-ec2-pritunl" environment = local.env diff --git a/terraform/layer2-k8s/.terraform.lock.hcl b/terraform/layer2-k8s/.terraform.lock.hcl index 514c291b..7245fcd8 100644 --- a/terraform/layer2-k8s/.terraform.lock.hcl +++ b/terraform/layer2-k8s/.terraform.lock.hcl @@ -132,6 +132,24 @@ provider "registry.terraform.io/hashicorp/template" { ] } +provider "registry.terraform.io/hashicorp/time" { + version = "0.7.2" + hashes = [ + "h1:NKy1QrNLlP5mKy5Tea6lQSRsVoyydJQKh6WvNTdBF4I=", + "zh:0bbe0158c2a9e3f5be911b7e94477586110c51746bb13d102054f22754565bda", + "zh:3250af7fd49b8aaf2ccc895588af05197d886e38b727e3ba33bcbb8cc96ad34d", + "zh:35e4de0437f4fa9c1ad69aaf8136413be2369ea607d78e04bb68dc66a6a520b8", + "zh:369756417a6272e79cad31eb2c82c202f6a4b6e4204a893f656644ba9e149fa2", + "zh:390370f1179d89b33c3a0731691e772d5450a7d59fc66671ec625e201db74aa2", + "zh:3d12ac905259d225c685bc42e5507ed0fbdaa5a09c30dce7c1932d908df857f7", + "zh:75f63e5e1c68e6c5bccba4568c3564e2774eb3a7a19189eb8e2b6e0d58c8f8cc", + "zh:7c22a2078a608e3e0278c4cbc9c483909062ebd1843bddaf8f176346c6d378b1", + "zh:7cfb3c02f78f0060d59c757c4726ab45a962ce4a9cf4833beca704a1020785bd", + "zh:a0325917f47c28a2ed088dedcea0d9520d91b264e63cc667fe4336ac993c0c11", + "zh:c181551d4c0a40b52e236f1755cc340aeca0fb5dcfd08b3b1c393a7667d2f327", + ] +} + provider "registry.terraform.io/hashicorp/tls" { version = "3.1.0" hashes = [ diff --git a/terraform/layer2-k8s/README.md b/terraform/layer2-k8s/README.md index 30c3bf11..c0e37d5d 100644 --- a/terraform/layer2-k8s/README.md +++ b/terraform/layer2-k8s/README.md @@ -1,45 +1,182 @@ ## Requirements -| Name | Version | -|------|---------| -| terraform | 1.0.10 | -| aws | 3.64.2 | -| helm | 2.4.1 | -| kubernetes | 2.6.1 | +| Name | Version | +| ---------- | ------- | +| terraform | 1.0.10 | +| aws | 3.64.2 | +| helm | 2.4.1 | +| kubernetes | 2.6.1 | ## Providers -| Name | Version | -|------|---------| -| aws | 3.64.2 | -| helm | 2.4.1 | -| random | n/a | -| template | n/a | -| terraform | n/a | +| Name | Version | +| --------- | ------- | +| aws | 3.64.2 | +| helm | 2.4.1 | +| random | n/a | +| template | n/a | +| terraform | n/a | ## Inputs -| Name | Description | Type | Default | Required | -|------|-------------|------|---------|:--------:| -| additional\_allowed\_ips | IP addresses allowed to connect to private resources | `list(any)` | `[]` | no | -| allowed\_account\_ids | List of allowed AWS account IDs | `list` | `[]` | no | -| aws\_loadbalancer\_controller\_enable | Disable or Enable aws-loadbalancer-controller. You need to enable it if you want to use Fargate | `bool` | `false` | no | -| cluster\_autoscaler\_version | Version of cluster autoscaler | `string` | `"v1.21.0"` | no | -| elk\_index\_retention\_days | Days before remove index from system elasticsearch | `number` | `14` | no | -| elk\_snapshot\_retention\_days | Days to capture index in snapshot | `number` | `90` | no | -| helm\_release\_history\_size | How much helm releases to store | `number` | `5` | no | -| nginx\_ingress\_ssl\_terminator | Select SSL termination type | `string` | `"lb"` | no | -| region | Default infrastructure region | `string` | `"us-east-1"` | no | -| remote\_state\_bucket | Name of the bucket for terraform state | `string` | n/a | yes | -| remote\_state\_key | Key of the remote state for terraform\_remote\_state | `string` | `"layer1-aws"` | no | +| Name | Description | Type | Default | Required | +| ------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------- | -------------- | :------: | +| additional\_allowed\_ips | IP addresses allowed to connect to private resources | `list(any)` | `[]` | no | +| allowed\_account\_ids | List of allowed AWS account IDs | `list` | `[]` | no | +| aws\_loadbalancer\_controller\_enable | Disable or Enable aws-loadbalancer-controller. You need to enable it if you want to use Fargate | `bool` | `false` | no | +| cluster\_autoscaler\_version | Version of cluster autoscaler | `string` | `"v1.21.0"` | no | +| elk\_index\_retention\_days | Days before remove index from system elasticsearch | `number` | `14` | no | +| elk\_snapshot\_retention\_days | Days to capture index in snapshot | `number` | `90` | no | +| helm\_release\_history\_size | How much helm releases to store | `number` | `5` | no | +| nginx\_ingress\_ssl\_terminator | Select SSL termination type | `string` | `"lb"` | no | +| region | Default infrastructure region | `string` | `"us-east-1"` | no | +| remote\_state\_bucket | Name of the bucket for terraform state | `string` | n/a | yes | +| remote\_state\_key | Key of the remote state for terraform\_remote\_state | `string` | `"layer1-aws"` | no | ## Outputs -| Name | Description | -|------|-------------| -| alertmanager\_domain\_name | Alertmanager ui address | +| Name | Description | +| ----------------------------- | -------------------------------------------------------- | +| alertmanager\_domain\_name | Alertmanager ui address | | get\_grafana\_admin\_password | Command which gets admin password from kubernetes secret | -| grafana\_admin\_password | Grafana admin password | -| grafana\_domain\_name | Grafana dashboards address | -| prometheus\_domain\_name | Prometheus ui address | +| grafana\_admin\_password | Grafana admin password | +| grafana\_domain\_name | Grafana dashboards address | +| prometheus\_domain\_name | Prometheus ui address | + + +## Requirements + +| Name | Version | +| ---------------------------------------------------------------------------- | ------- | +| [terraform](#requirement\_terraform) | 1.0.10 | +| [aws](#requirement\_aws) | 3.64.2 | +| [helm](#requirement\_helm) | 2.4.1 | +| [kubernetes](#requirement\_kubernetes) | 2.6.1 | + +## Providers + +| Name | Version | +| ---------------------------------------------------------------------- | ------- | +| [aws](#provider\_aws) | 3.64.2 | +| [helm](#provider\_helm) | 2.4.1 | +| [kubernetes](#provider\_kubernetes) | 2.6.1 | +| [random](#provider\_random) | 3.1.0 | +| [template](#provider\_template) | 2.2.0 | +| [terraform](#provider\_terraform) | n/a | +| [time](#provider\_time) | 0.7.2 | + +## Modules + +| Name | Source | Version | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | ------- | +| [aws\_iam\_autoscaler](#module\_aws\_iam\_autoscaler) | ../modules/aws-iam-eks-trusted | n/a | +| [aws\_iam\_aws\_loadbalancer\_controller](#module\_aws\_iam\_aws\_loadbalancer\_controller) | ../modules/aws-iam-eks-trusted | n/a | +| [aws\_iam\_cert\_manager](#module\_aws\_iam\_cert\_manager) | ../modules/aws-iam-eks-trusted | n/a | +| [aws\_iam\_elastic\_stack](#module\_aws\_iam\_elastic\_stack) | ../modules/aws-iam-user-with-policy | n/a | +| [aws\_iam\_external\_dns](#module\_aws\_iam\_external\_dns) | ../modules/aws-iam-eks-trusted | n/a | +| [aws\_iam\_external\_secrets](#module\_aws\_iam\_external\_secrets) | ../modules/aws-iam-eks-trusted | n/a | +| [aws\_iam\_gitlab\_runner](#module\_aws\_iam\_gitlab\_runner) | ../modules/aws-iam-eks-trusted | n/a | +| [aws\_iam\_grafana](#module\_aws\_iam\_grafana) | ../modules/aws-iam-eks-trusted | n/a | +| [aws\_load\_balancer\_controller\_namespace](#module\_aws\_load\_balancer\_controller\_namespace) | ../modules/kubernetes-namespace | n/a | +| [aws\_node\_termination\_handler\_namespace](#module\_aws\_node\_termination\_handler\_namespace) | ../modules/kubernetes-namespace | n/a | +| [certmanager\_namespace](#module\_certmanager\_namespace) | ../modules/kubernetes-namespace | n/a | +| [cluster\_autoscaler\_namespace](#module\_cluster\_autoscaler\_namespace) | ../modules/kubernetes-namespace | n/a | +| [elastic\_tls](#module\_elastic\_tls) | ../modules/self-signed-certificate | n/a | +| [elk\_namespace](#module\_elk\_namespace) | ../modules/kubernetes-namespace | n/a | +| [external\_dns\_namespace](#module\_external\_dns\_namespace) | ../modules/kubernetes-namespace | n/a | +| [external\_secrets\_namespace](#module\_external\_secrets\_namespace) | ../modules/kubernetes-namespace | n/a | +| [fargate\_namespace](#module\_fargate\_namespace) | ../modules/kubernetes-namespace | n/a | +| [gitlab\_runner\_namespace](#module\_gitlab\_runner\_namespace) | ../modules/kubernetes-namespace | n/a | +| [ingress\_nginx\_namespace](#module\_ingress\_nginx\_namespace) | ../modules/kubernetes-namespace | n/a | +| [istio\_system\_namespace](#module\_istio\_system\_namespace) | ../modules/kubernetes-namespace | n/a | +| [keda\_namespace](#module\_keda\_namespace) | ../modules/kubernetes-namespace | n/a | +| [kiali\_namespace](#module\_kiali\_namespace) | ../modules/kubernetes-namespace | n/a | +| [loki\_namespace](#module\_loki\_namespace) | ../modules/kubernetes-namespace | n/a | +| [monitoring\_namespace](#module\_monitoring\_namespace) | ../modules/kubernetes-namespace | n/a | +| [reloader\_namespace](#module\_reloader\_namespace) | ../modules/kubernetes-namespace | n/a | + +## Resources + +| Name | Type | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | +| [aws_s3_bucket.elastic_stack](https://registry.terraform.io/providers/aws/3.64.2/docs/resources/s3_bucket) | resource | +| [aws_s3_bucket.gitlab_runner_cache](https://registry.terraform.io/providers/aws/3.64.2/docs/resources/s3_bucket) | resource | +| [aws_s3_bucket_public_access_block.elastic_stack_public_access_block](https://registry.terraform.io/providers/aws/3.64.2/docs/resources/s3_bucket_public_access_block) | resource | +| [aws_s3_bucket_public_access_block.gitlab_runner_cache_public_access_block](https://registry.terraform.io/providers/aws/3.64.2/docs/resources/s3_bucket_public_access_block) | resource | +| [helm_release.aws_loadbalancer_controller](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.aws_node_termination_handler](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.calico_daemonset](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.cert_manager](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.certificate](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.cluster_autoscaler](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.cluster_issuer](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.elk](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.external_dns](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.external_secrets](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.gitlab_runner](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.ingress_nginx](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.istio_operator](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.istio_operator_resources](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.istio_resources](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.kedacore](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.kiali](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.loki_stack](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.prometheus_operator](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [helm_release.reloader](https://registry.terraform.io/providers/helm/2.4.1/docs/resources/release) | resource | +| [kubernetes_secret.elasticsearch_certificates](https://registry.terraform.io/providers/kubernetes/2.6.1/docs/resources/secret) | resource | +| [kubernetes_secret.elasticsearch_credentials](https://registry.terraform.io/providers/kubernetes/2.6.1/docs/resources/secret) | resource | +| [kubernetes_secret.elasticsearch_s3_user_creds](https://registry.terraform.io/providers/kubernetes/2.6.1/docs/resources/secret) | resource | +| [kubernetes_secret.kibana_enc_key](https://registry.terraform.io/providers/kubernetes/2.6.1/docs/resources/secret) | resource | +| [kubernetes_storage_class.advanced](https://registry.terraform.io/providers/kubernetes/2.6.1/docs/resources/storage_class) | resource | +| [random_string.elasticsearch_password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [random_string.grafana_password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [random_string.kibana_enc_key](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [random_string.kibana_password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [time_sleep.wait_10_seconds](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep) | resource | +| [aws_caller_identity.current](https://registry.terraform.io/providers/aws/3.64.2/docs/data-sources/caller_identity) | data source | +| [aws_eks_cluster.main](https://registry.terraform.io/providers/aws/3.64.2/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.main](https://registry.terraform.io/providers/aws/3.64.2/docs/data-sources/eks_cluster_auth) | data source | +| [aws_secretsmanager_secret.infra](https://registry.terraform.io/providers/aws/3.64.2/docs/data-sources/secretsmanager_secret) | data source | +| [aws_secretsmanager_secret_version.infra](https://registry.terraform.io/providers/aws/3.64.2/docs/data-sources/secretsmanager_secret_version) | data source | +| [template_file.cert_manager](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file) | data source | +| [template_file.certificate](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file) | data source | +| [template_file.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file) | data source | +| [template_file.cluster_issuer](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file) | data source | +| [template_file.elk](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file) | data source | +| [template_file.external_dns](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file) | data source | +| [template_file.external_secrets](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file) | data source | +| [template_file.ingress_nginx](https://registry.terraform.io/providers/hashicorp/template/latest/docs/data-sources/file) | data source | +| [terraform_remote_state.layer1-aws](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | + +## Inputs + +| Name | Description | Type | Default | Required | +| ---------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ----------- | -------------- | :------: | +| [additional\_allowed\_ips](#input\_additional\_allowed\_ips) | IP addresses allowed to connect to private resources | `list(any)` | `[]` | no | +| [allowed\_account\_ids](#input\_allowed\_account\_ids) | List of allowed AWS account IDs | `list` | `[]` | no | +| [aws\_loadbalancer\_controller\_enable](#input\_aws\_loadbalancer\_controller\_enable) | Disable or Enable aws-loadbalancer-controller. You need to enable it if you want to use Fargate | `bool` | `false` | no | +| [cluster\_autoscaler\_version](#input\_cluster\_autoscaler\_version) | Version of cluster autoscaler | `string` | `"v1.21.0"` | no | +| [elk\_index\_retention\_days](#input\_elk\_index\_retention\_days) | Days before remove index from system elasticsearch | `number` | `14` | no | +| [elk\_snapshot\_retention\_days](#input\_elk\_snapshot\_retention\_days) | Days to capture index in snapshot | `number` | `90` | no | +| [helm\_release\_history\_size](#input\_helm\_release\_history\_size) | How much helm releases to store | `number` | `5` | no | +| [nginx\_ingress\_ssl\_terminator](#input\_nginx\_ingress\_ssl\_terminator) | Select SSL termination type | `string` | `"lb"` | no | +| [region](#input\_region) | Default infrastructure region | `string` | `"us-east-1"` | no | +| [remote\_state\_bucket](#input\_remote\_state\_bucket) | Name of the bucket for terraform state | `string` | n/a | yes | +| [remote\_state\_key](#input\_remote\_state\_key) | Key of the remote state for terraform\_remote\_state | `string` | `"layer1-aws"` | no | + +## Outputs + +| Name | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------- | +| [alertmanager\_domain\_name](#output\_alertmanager\_domain\_name) | Alertmanager ui address | +| [apm\_domain\_name](#output\_apm\_domain\_name) | n/a | +| [elastic\_stack\_bucket\_name](#output\_elastic\_stack\_bucket\_name) | Name of the bucket for ELKS snapshots | +| [elasticsearch\_elastic\_password](#output\_elasticsearch\_elastic\_password) | Password of the superuser 'elastic' | +| [get\_grafana\_admin\_password](#output\_get\_grafana\_admin\_password) | Command which gets admin password from kubernetes secret | +| [gitlab\_runner\_cache\_bucket\_name](#output\_gitlab\_runner\_cache\_bucket\_name) | Name of the s3 bucket for gitlab-runner cache | +| [grafana\_admin\_password](#output\_grafana\_admin\_password) | Grafana admin password | +| [grafana\_domain\_name](#output\_grafana\_domain\_name) | Grafana dashboards address | +| [kibana\_domain\_name](#output\_kibana\_domain\_name) | Kibana dashboards address | +| [prometheus\_domain\_name](#output\_prometheus\_domain\_name) | Prometheus ui address | + \ No newline at end of file diff --git a/terraform/layer2-k8s/eks-aws-loadbalancer-controller.tf b/terraform/layer2-k8s/eks-aws-loadbalancer-controller.tf index 63cca726..43a36981 100644 --- a/terraform/layer2-k8s/eks-aws-loadbalancer-controller.tf +++ b/terraform/layer2-k8s/eks-aws-loadbalancer-controller.tf @@ -95,7 +95,7 @@ module "aws_iam_aws_loadbalancer_controller" { count = local.aws_load_balancer_controller.enabled ? 1 : 0 source = "../modules/aws-iam-eks-trusted" - name = "${local.name}-${local.aws_load_balancer_controller.name}" + name = "${local.name}-aws-lb-controller" region = local.region oidc_provider_arn = local.eks_oidc_provider_arn policy = jsonencode({ diff --git a/terraform/layer2-k8s/eks-aws-node-termination-handler.tf b/terraform/layer2-k8s/eks-aws-node-termination-handler.tf index f1071249..ecf504d2 100644 --- a/terraform/layer2-k8s/eks-aws-node-termination-handler.tf +++ b/terraform/layer2-k8s/eks-aws-node-termination-handler.tf @@ -63,7 +63,7 @@ resource "helm_release" "aws_node_termination_handler" { name = local.aws_node_termination_handler.name chart = local.aws_node_termination_handler.chart repository = local.aws_node_termination_handler.repository - version = local.aws_node_termination_handler_version + version = local.aws_node_termination_handler.chart_version namespace = module.aws_node_termination_handler_namespace[count.index].name max_history = var.helm_release_history_size diff --git a/terraform/layer2-k8s/eks-calico.tf b/terraform/layer2-k8s/eks-calico.tf index ae9187f8..3425bd59 100644 --- a/terraform/layer2-k8s/eks-calico.tf +++ b/terraform/layer2-k8s/eks-calico.tf @@ -10,7 +10,7 @@ locals { } resource "helm_release" "calico_daemonset" { - count = local.aws_calico ? 1 : 0 + count = local.aws_calico.enabled ? 1 : 0 name = local.aws_calico.name chart = local.aws_calico.chart diff --git a/terraform/layer2-k8s/eks-cert-manager-certificate.tf b/terraform/layer2-k8s/eks-cert-manager-certificate.tf deleted file mode 100644 index 38346906..00000000 --- a/terraform/layer2-k8s/eks-cert-manager-certificate.tf +++ /dev/null @@ -1,37 +0,0 @@ -locals { - cert_mananger_certificate = { - name = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].id - enabled = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].enabled - chart = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].chart - repository = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].repository - chart_version = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].version - namespace = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].namespace - } -} - -data "template_file" "certificate" { - template = file("${path.module}/templates/certificate-values.yaml") - - vars = { - domain_name = "*.${local.domain_name}" - common_name = local.domain_name - } -} - -resource "helm_release" "certificate" { - count = local.cert_mananger_certificate.enabled ? 1 : 0 - - name = local.cert_mananger_certificate.name - chart = local.cert_mananger_certificate.chart - repository = local.cert_mananger_certificate.repository - version = local.cert_mananger_certificate_version - namespace = local.cert_mananger_certificate.namespace - max_history = var.helm_release_history_size - - values = [ - data.template_file.certificate.rendered, - ] - - # This dep needs for correct apply - depends_on = [helm_release.cert_manager, helm_release.cluster_issuer] -} diff --git a/terraform/layer2-k8s/eks-cert-manager-cluster-issuer.tf b/terraform/layer2-k8s/eks-cert-manager-cluster-issuer.tf deleted file mode 100644 index 5377e55f..00000000 --- a/terraform/layer2-k8s/eks-cert-manager-cluster-issuer.tf +++ /dev/null @@ -1,38 +0,0 @@ -locals { - cert_manager_cluster_issuer = { - name = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].id - enabled = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].enabled - chart = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].chart - repository = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].repository - chart_version = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].version - namespace = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].namespace - } -} - -data "template_file" "cluster_issuer" { - template = file("${path.module}/templates/cluster-issuer-values.yaml") - - vars = { - region = local.region - zone = local.domain_name - zone_id = local.zone_id - } -} - -resource "helm_release" "cluster_issuer" { - count = local.cert_manager_cluster_issuer.enabled ? 1 : 0 - - name = local.cert_manager_cluster_issuer.name - chart = local.cert_manager_cluster_issuer.chart - repository = local.cert_manager_cluster_issuer.repository - version = local.cert_manager_cluster_issuer_version - namespace = local.cert_manager_cluster_issuer.namespace - max_history = var.helm_release_history_size - - values = [ - data.template_file.cluster_issuer.rendered, - ] - - # This dep needs for correct apply - depends_on = [helm_release.cert_manager] -} diff --git a/terraform/layer2-k8s/eks-cert-manager.tf b/terraform/layer2-k8s/eks-cert-manager.tf index 894e28eb..06530b5b 100644 --- a/terraform/layer2-k8s/eks-cert-manager.tf +++ b/terraform/layer2-k8s/eks-cert-manager.tf @@ -7,6 +7,22 @@ locals { chart_version = local.helm_releases[index(local.helm_releases.*.id, "cert-manager")].version namespace = local.helm_releases[index(local.helm_releases.*.id, "cert-manager")].namespace } + cert_mananger_certificate = { + name = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "cert-mananger-certificate")].namespace + } + cert_manager_cluster_issuer = { + name = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "cert-manager-cluster-issuer")].namespace + } } data "template_file" "cert_manager" { @@ -17,6 +33,25 @@ data "template_file" "cert_manager" { } } +data "template_file" "cluster_issuer" { + template = file("${path.module}/templates/cluster-issuer-values.yaml") + + vars = { + region = local.region + zone = local.domain_name + zone_id = local.zone_id + } +} + +data "template_file" "certificate" { + template = file("${path.module}/templates/certificate-values.yaml") + + vars = { + domain_name = "*.${local.domain_name}" + common_name = local.domain_name + } +} + #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "certmanager_namespace" { count = local.cert_manager.enabled ? 1 : 0 @@ -148,3 +183,39 @@ resource "helm_release" "cert_manager" { ] } + +resource "helm_release" "cluster_issuer" { + count = local.cert_manager_cluster_issuer.enabled ? 1 : 0 + + name = local.cert_manager_cluster_issuer.name + chart = local.cert_manager_cluster_issuer.chart + repository = local.cert_manager_cluster_issuer.repository + version = local.cert_manager_cluster_issuer.chart_version + namespace = local.cert_manager_cluster_issuer.namespace + max_history = var.helm_release_history_size + + values = [ + data.template_file.cluster_issuer.rendered, + ] + + # This dep needs for correct apply + depends_on = [helm_release.cert_manager] +} + +resource "helm_release" "certificate" { + count = local.cert_mananger_certificate.enabled ? 1 : 0 + + name = local.cert_mananger_certificate.name + chart = local.cert_mananger_certificate.chart + repository = local.cert_mananger_certificate.repository + version = local.cert_mananger_certificate.chart_version + namespace = local.cert_mananger_certificate.namespace + max_history = var.helm_release_history_size + + values = [ + data.template_file.certificate.rendered, + ] + + # This dep needs for correct apply + depends_on = [helm_release.cert_manager, helm_release.cluster_issuer] +} diff --git a/terraform/layer2-k8s/eks-cluster-autoscaler.tf b/terraform/layer2-k8s/eks-cluster-autoscaler.tf index 8cc5f6f0..d5ed7717 100644 --- a/terraform/layer2-k8s/eks-cluster-autoscaler.tf +++ b/terraform/layer2-k8s/eks-cluster-autoscaler.tf @@ -145,7 +145,7 @@ resource "helm_release" "cluster_autoscaler" { name = local.cluster_autoscaler.name chart = local.cluster_autoscaler.chart repository = local.cluster_autoscaler.repository - version = local.cluster_autoscaler_version + version = local.cluster_autoscaler.chart_version namespace = module.cluster_autoscaler_namespace[count.index].name max_history = var.helm_release_history_size diff --git a/terraform/layer2-k8s/examples/eks-elk.tf b/terraform/layer2-k8s/eks-elk.tf similarity index 64% rename from terraform/layer2-k8s/examples/eks-elk.tf rename to terraform/layer2-k8s/eks-elk.tf index e58bbf4a..87e3c28e 100644 --- a/terraform/layer2-k8s/examples/eks-elk.tf +++ b/terraform/layer2-k8s/eks-elk.tf @@ -1,8 +1,11 @@ locals { elk = { - chart = local.helm_charts[index(local.helm_charts.*.id, "elk")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "elk")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "elk")], "version", null) + name = local.helm_releases[index(local.helm_releases.*.id, "elk")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "elk")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "elk")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "elk")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "elk")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "elk")].namespace } kibana_domain_name = "kibana-${local.domain_suffix}" apm_domain_name = "apm-${local.domain_suffix}" @@ -12,22 +15,22 @@ data "template_file" "elk" { template = file("${path.module}/templates/elk-values.yaml") vars = { - bucket_name = aws_s3_bucket.elastic_stack.id - storage_class_name = kubernetes_storage_class.elk.id + bucket_name = local.elk.enabled ? aws_s3_bucket.elastic_stack[0].id : "bucket_name" snapshot_retention_days = var.elk_snapshot_retention_days index_retention_days = var.elk_index_retention_days apm_domain_name = local.apm_domain_name kibana_domain_name = local.kibana_domain_name kibana_user = "kibana-${local.env}" - kibana_password = random_string.kibana_password.result - kibana_base64_creds = base64encode("kibana-${local.env}:${random_string.kibana_password.result}") + kibana_password = local.elk.enabled ? random_string.kibana_password[0].result : "password" } } #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "elk_namespace" { + count = local.elk.enabled ? 1 : 0 + source = "../modules/kubernetes-namespace" - name = "elk" + name = local.elk.namespace network_policies = [ { name = "default-deny" @@ -43,7 +46,7 @@ module "elk_namespace" { { namespace_selector = { match_labels = { - name = "elk" + name = local.elk.namespace } } } @@ -60,7 +63,7 @@ module "elk_namespace" { { namespace_selector = { match_labels = { - name = "ingress-nginx" + name = local.ingress_nginx.namespace } } } @@ -107,19 +110,21 @@ module "elk_namespace" { } module "elastic_tls" { - source = "../modules/self-signed-certificate" + count = local.elk.enabled ? 1 : 0 + source = "../modules/self-signed-certificate" name = local.name common_name = "elasticsearch-master" - dns_names = [local.domain_name, "*.${local.domain_name}", "elasticsearch-master", "elasticsearch-master.${module.elk_namespace.name}", "kibana", "kibana.${module.elk_namespace.name}", "kibana-kibana", "kibana-kibana.${module.elk_namespace.name}", "logstash", "logstash.${module.elk_namespace.name}"] + dns_names = [local.domain_name, "*.${local.domain_name}", "elasticsearch-master", "elasticsearch-master.${module.elk_namespace[count.index].name}", "kibana", "kibana.${module.elk_namespace[count.index].name}", "kibana-kibana", "kibana-kibana.${module.elk_namespace[count.index].name}", "logstash", "logstash.${module.elk_namespace[count.index].name}"] validity_period_hours = 8760 early_renewal_hours = 336 } module "aws_iam_elastic_stack" { - source = "../modules/aws-iam-user-with-policy" + count = local.elk.enabled ? 1 : 0 - name = "${local.name}-elk" + source = "../modules/aws-iam-user-with-policy" + name = "${local.name}-${local.elk.name}" policy = jsonencode({ "Version" : "2012-10-17", "Statement" : [ @@ -132,7 +137,7 @@ module "aws_iam_elastic_stack" { "s3:ListBucketVersions" ], "Resource" : [ - "arn:aws:s3:::${local.elastic_stack_bucket_name}" + "arn:aws:s3:::${aws_s3_bucket.elastic_stack[count.index].id}" ] }, { @@ -145,7 +150,7 @@ module "aws_iam_elastic_stack" { "s3:ListMultipartUploadParts" ], "Resource" : [ - "arn:aws:s3:::${local.elastic_stack_bucket_name}/*" + "arn:aws:s3:::${aws_s3_bucket.elastic_stack[count.index].id}/*" ] } ] @@ -153,82 +158,81 @@ module "aws_iam_elastic_stack" { } ### ADDITIONAL RESOURCES FOR ELK - -resource "kubernetes_storage_class" "elk" { - metadata { - name = "elk" - } - storage_provisioner = "kubernetes.io/aws-ebs" - reclaim_policy = "Retain" - allow_volume_expansion = true - parameters = { - type = "gp2" - encrypted = true - fsType = "ext4" - } -} - resource "kubernetes_secret" "elasticsearch_credentials" { + count = local.elk.enabled ? 1 : 0 + metadata { name = "elastic-credentials" - namespace = module.elk_namespace.name + namespace = module.elk_namespace[count.index].name } data = { "username" = "elastic" - "password" = random_string.elasticsearch_password.result + "password" = random_string.elasticsearch_password[count.index].result } } resource "kubernetes_secret" "elasticsearch_certificates" { + count = local.elk.enabled ? 1 : 0 + metadata { name = "elastic-certificates" - namespace = module.elk_namespace.name + namespace = module.elk_namespace[count.index].name } data = { - "tls.crt" = module.elastic_tls.cert_pem - "tls.key" = module.elastic_tls.private_key_pem - "tls.p8" = module.elastic_tls.p8 + "tls.crt" = module.elastic_tls[count.index].cert_pem + "tls.key" = module.elastic_tls[count.index].private_key_pem + "tls.p8" = module.elastic_tls[count.index].p8 } } resource "kubernetes_secret" "elasticsearch_s3_user_creds" { + count = local.elk.enabled ? 1 : 0 + metadata { name = "elasticsearch-s3-user-creds" - namespace = module.elk_namespace.name + namespace = module.elk_namespace[count.index].name } data = { - "aws_s3_user_access_key" = module.aws_iam_elastic_stack.access_key_id - "aws_s3_user_secret_key" = module.aws_iam_elastic_stack.access_secret_key + "aws_s3_user_access_key" = module.aws_iam_elastic_stack[count.index].access_key_id + "aws_s3_user_secret_key" = module.aws_iam_elastic_stack[count.index].access_secret_key } } resource "random_string" "elasticsearch_password" { + count = local.elk.enabled ? 1 : 0 + length = 32 special = false upper = true } resource "kubernetes_secret" "kibana_enc_key" { + count = local.elk.enabled ? 1 : 0 + metadata { name = "kibana-encryption-key" - namespace = module.elk_namespace.name + namespace = module.elk_namespace[count.index].name } data = { - "encryptionkey" = random_string.kibana_enc_key.result + "encryptionkey" = random_string.kibana_enc_key[count.index].result } } resource "random_string" "kibana_enc_key" { + count = local.elk.enabled ? 1 : 0 + length = 32 special = false upper = true } resource "random_string" "kibana_password" { + count = local.elk.enabled ? 1 : 0 + length = 32 special = false upper = true @@ -236,9 +240,11 @@ resource "random_string" "kibana_password" { #tfsec:ignore:aws-s3-enable-versioning tfsec:ignore:aws-s3-enable-bucket-logging resource "aws_s3_bucket" "elastic_stack" { - bucket = "${local.name}-elastic-stack" - acl = "private" + count = local.elk.enabled ? 1 : 0 + bucket = "${local.name}-elastic-stack" + acl = "private" + force_destroy = true server_side_encryption_configuration { rule { apply_server_side_encryption_by_default { @@ -254,7 +260,9 @@ resource "aws_s3_bucket" "elastic_stack" { } resource "aws_s3_bucket_public_access_block" "elastic_stack_public_access_block" { - bucket = aws_s3_bucket.elastic_stack.id + count = local.elk.enabled ? 1 : 0 + + bucket = aws_s3_bucket.elastic_stack[count.index].id # Block new public ACLs and uploading public objects block_public_acls = true # Retroactively remove public access granted through public ACLs @@ -266,12 +274,14 @@ resource "aws_s3_bucket_public_access_block" "elastic_stack_public_access_block" } resource "helm_release" "elk" { - name = "elk" + count = local.elk.enabled ? 1 : 0 + + name = local.elk.name chart = local.elk.chart repository = local.elk.repository version = local.elk.chart_version - namespace = module.elk_namespace.name - wait = false + namespace = module.elk_namespace[count.index].name + timeout = "900" max_history = var.helm_release_history_size values = [ @@ -291,12 +301,12 @@ output "apm_domain_name" { } output "elasticsearch_elastic_password" { - value = random_string.elasticsearch_password.result + value = local.elk.enabled ? random_string.elasticsearch_password[0].result : null sensitive = true description = "Password of the superuser 'elastic'" } output "elastic_stack_bucket_name" { - value = aws_s3_bucket.elastic_stack.id + value = local.elk.enabled ? aws_s3_bucket.elastic_stack[0].id : null description = "Name of the bucket for ELKS snapshots" } diff --git a/terraform/layer2-k8s/examples/eks-gitlab-runner.tf b/terraform/layer2-k8s/eks-gitlab-runner.tf similarity index 63% rename from terraform/layer2-k8s/examples/eks-gitlab-runner.tf rename to terraform/layer2-k8s/eks-gitlab-runner.tf index 43dd2b85..3d62426a 100644 --- a/terraform/layer2-k8s/examples/eks-gitlab-runner.tf +++ b/terraform/layer2-k8s/eks-gitlab-runner.tf @@ -1,21 +1,26 @@ locals { - gitlab-runner = { - chart = local.helm_charts[index(local.helm_charts.*.id, "gitlab-runner")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "gitlab-runner")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "gitlab-runner")], "version", null) + gitlab_runner = { + name = local.helm_releases[index(local.helm_releases.*.id, "gitlab-runner")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "gitlab-runner")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "gitlab-runner")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "gitlab-runner")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "gitlab-runner")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "gitlab-runner")].namespace } gitlab_runner_template = templatefile("${path.module}/templates/gitlab-runner-values.yaml", { registration_token = local.gitlab_registration_token - namespace = module.ci_namespace.name - role_arn = module.aws_iam_gitlab_runner.role_arn - bucket_name = aws_s3_bucket.gitlab_runner_cache.id + namespace = local.gitlab_runner.enabled ? module.gitlab_runner_namespace[0].name : "default" + role_arn = local.gitlab_runner.enabled ? module.aws_iam_gitlab_runner[0].role_arn : "" + bucket_name = local.gitlab_runner.enabled ? aws_s3_bucket.gitlab_runner_cache[0].id : "bucket_name" region = local.region }) } #tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress module "gitlab_runner_namespace" { + count = local.gitlab_runner.enabled ? 1 : 0 + source = "../modules/kubernetes-namespace" name = "gitlab-runner" network_policies = [ @@ -62,9 +67,11 @@ module "gitlab_runner_namespace" { #tfsec:ignore:aws-s3-enable-versioning tfsec:ignore:aws-s3-enable-bucket-logging resource "aws_s3_bucket" "gitlab_runner_cache" { - bucket = "${local.name}-gitlab-runner-cache" - acl = "private" + count = local.gitlab_runner.enabled ? 1 : 0 + bucket = "${local.name}-gitlab-runner-cache" + acl = "private" + force_destroy = true server_side_encryption_configuration { rule { apply_server_side_encryption_by_default { @@ -73,27 +80,27 @@ resource "aws_s3_bucket" "gitlab_runner_cache" { } } + tags = { + Name = "${local.name}-gitlab-runner-cache" + Environment = local.env + } + lifecycle_rule { id = "gitlab-runner-cache-lifecycle-rule" enabled = true - tags = { "rule" = "gitlab-runner-cache-lifecycle-rule" } - expiration { days = 120 } } - - tags = { - Name = "${local.name}-gitlab-runner-cache" - Environment = local.env - } } resource "aws_s3_bucket_public_access_block" "gitlab_runner_cache_public_access_block" { - bucket = aws_s3_bucket.gitlab_runner_cache.id + count = local.gitlab_runner.enabled ? 1 : 0 + + bucket = aws_s3_bucket.gitlab_runner_cache[count.index].id # Block new public ACLs and uploading public objects block_public_acls = true # Retroactively remove public access granted through public ACLs @@ -105,9 +112,10 @@ resource "aws_s3_bucket_public_access_block" "gitlab_runner_cache_public_access_ } module "aws_iam_gitlab_runner" { - source = "../modules/aws-iam-eks-trusted" + count = local.gitlab_runner.enabled ? 1 : 0 - name = "${local.name}-ci" + source = "../modules/aws-iam-eks-trusted" + name = "${local.name}-${local.gitlab_runner.name}" region = local.region oidc_provider_arn = local.eks_oidc_provider_arn policy = jsonencode({ @@ -136,8 +144,8 @@ module "aws_iam_gitlab_runner" { "s3:*" ], "Resource" : [ - "arn:aws:s3:::${aws_s3_bucket.gitlab_runner_cache.id}", - "arn:aws:s3:::${aws_s3_bucket.gitlab_runner_cache.id}/*" + "arn:aws:s3:::${aws_s3_bucket.gitlab_runner_cache[count.index].id}", + "arn:aws:s3:::${aws_s3_bucket.gitlab_runner_cache[count.index].id}/*" ] } ] @@ -145,12 +153,13 @@ module "aws_iam_gitlab_runner" { } resource "helm_release" "gitlab_runner" { - name = "gitlab-runner" - chart = local.gitlab-runner.chart - repository = local.gitlab-runner.repository - version = local.gitlab-runner.chart_version - namespace = module.gitlab_runner_namespace.name - wait = false + count = local.gitlab_runner.enabled ? 1 : 0 + + name = local.gitlab_runner.name + chart = local.gitlab_runner.chart + repository = local.gitlab_runner.repository + version = local.gitlab_runner.chart_version + namespace = module.gitlab_runner_namespace[count.index].name max_history = var.helm_release_history_size values = [ @@ -160,6 +169,6 @@ resource "helm_release" "gitlab_runner" { } output "gitlab_runner_cache_bucket_name" { - value = aws_s3_bucket.gitlab_runner_cache.id + value = local.gitlab_runner.enabled ? aws_s3_bucket.gitlab_runner_cache[0].id : null description = "Name of the s3 bucket for gitlab-runner cache" } diff --git a/terraform/layer2-k8s/eks-istio.tf b/terraform/layer2-k8s/eks-istio.tf new file mode 100644 index 00000000..a29b593f --- /dev/null +++ b/terraform/layer2-k8s/eks-istio.tf @@ -0,0 +1,122 @@ +locals { + istio_operator = { + name = local.helm_releases[index(local.helm_releases.*.id, "istio-operator")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "istio-operator")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "istio-operator")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "istio-operator")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "istio-operator")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "istio-operator")].namespace + } + istio_operator_resources = { + name = local.helm_releases[index(local.helm_releases.*.id, "istio-operator-resources")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "istio-operator-resources")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "istio-operator-resources")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "istio-operator-resources")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "istio-operator-resources")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "istio-operator-resources")].namespace + } + istio_resources = { + name = local.helm_releases[index(local.helm_releases.*.id, "istio-resources")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "istio-resources")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "istio-resources")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "istio-resources")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "istio-resources")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "istio-resources")].namespace + } + kiali_server = { + name = local.helm_releases[index(local.helm_releases.*.id, "kiali")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "kiali")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "kiali")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "kiali")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "kiali")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "kiali")].namespace + } +} + +module "istio_system_namespace" { + count = local.istio_operator_resources.enabled ? 1 : 0 + + source = "../modules/kubernetes-namespace" + name = local.istio_operator_resources.namespace +} + +module "kiali_namespace" { + count = local.kiali_server.enabled ? 1 : 0 + + source = "../modules/kubernetes-namespace" + name = local.kiali_server.namespace +} + +resource "helm_release" "istio_operator" { + count = local.istio_operator.enabled ? 1 : 0 + + name = local.istio_operator.name + chart = local.istio_operator.chart + repository = local.istio_operator.repository + version = local.istio_operator.chart_version + max_history = var.helm_release_history_size + + values = [ + file("${path.module}/templates/istio/istio-operator-values.yaml") + ] + +} + +resource "helm_release" "istio_operator_resources" { + count = local.istio_operator_resources.enabled ? 1 : 0 + + name = local.istio_operator_resources.name + chart = local.istio_operator_resources.chart + repository = local.istio_operator_resources.repository + version = local.istio_operator_resources.chart_version + namespace = module.istio_system_namespace[count.index].name + max_history = var.helm_release_history_size + + values = [ + file("${path.module}/templates/istio/istio-resources-values.yaml") + ] + + depends_on = [helm_release.istio_operator, helm_release.prometheus_operator] +} + +resource "time_sleep" "wait_10_seconds" { + count = local.istio_resources.enabled ? 1 : 0 + + create_duration = "10s" + + depends_on = [helm_release.istio_operator_resources] +} + +resource "helm_release" "istio_resources" { + count = local.istio_resources.enabled ? 1 : 0 + + name = local.istio_resources.name + chart = local.istio_resources.chart + repository = local.istio_resources.repository + version = local.istio_resources.chart_version + namespace = module.istio_system_namespace[count.index].name + max_history = var.helm_release_history_size + + values = [ + file("${path.module}/templates/istio/istio-resources-values.yaml") + ] + + depends_on = [time_sleep.wait_10_seconds] +} + +resource "helm_release" "kiali" { + count = local.kiali_server.enabled ? 1 : 0 + + name = local.kiali_server.name + chart = local.kiali_server.chart + repository = local.kiali_server.repository + version = local.kiali_server.chart_version + namespace = module.kiali_namespace[count.index].name + max_history = var.helm_release_history_size + + values = [ + file("${path.module}/templates/istio/istio-kiali-values.yaml") + ] + + depends_on = [helm_release.istio_operator, helm_release.prometheus_operator] +} diff --git a/terraform/layer2-k8s/eks-kube-prometheus-stack.tf b/terraform/layer2-k8s/eks-kube-prometheus-stack.tf index 67af6af5..20ca1699 100644 --- a/terraform/layer2-k8s/eks-kube-prometheus-stack.tf +++ b/terraform/layer2-k8s/eks-kube-prometheus-stack.tf @@ -20,7 +20,7 @@ locals { default_region = local.region grafana_domain_name = local.grafana_domain_name grafana_password = local.grafana_password - role_arn = module.aws_iam_grafana.role_arn + role_arn = local.kube_prometheus_stack.enabled ? module.aws_iam_grafana[0].role_arn : "" gitlab_client_id = local.grafana_gitlab_client_id gitlab_client_secret = local.grafana_gitlab_client_secret gitlab_group = local.grafana_gitlab_group @@ -79,7 +79,7 @@ module "monitoring_namespace" { policy_types = ["Ingress"] pod_selector = { match_expressions = { - key = "app.kubernetes.io/name" + key = "app" operator = "In" values = ["${local.kube_prometheus_stack.name}-operator"] } @@ -124,7 +124,7 @@ module "aws_iam_grafana" { count = local.kube_prometheus_stack.enabled ? 1 : 0 source = "../modules/aws-iam-eks-trusted" - name = "${local.name}-${local.kube_prometheus_stack.name}-grafana" + name = "${local.name}-grafana" region = local.region oidc_provider_arn = local.eks_oidc_provider_arn policy = jsonencode({ @@ -172,7 +172,7 @@ resource "helm_release" "prometheus_operator" { name = local.kube_prometheus_stack.name chart = local.kube_prometheus_stack.chart repository = local.kube_prometheus_stack.repository - version = local.kube_prometheus_stack_version + version = local.kube_prometheus_stack.chart_version namespace = module.monitoring_namespace[count.index].name max_history = var.helm_release_history_size diff --git a/terraform/layer2-k8s/eks-loki-stack.tf b/terraform/layer2-k8s/eks-loki-stack.tf index 9a04ecdb..76d97ff0 100644 --- a/terraform/layer2-k8s/eks-loki-stack.tf +++ b/terraform/layer2-k8s/eks-loki-stack.tf @@ -1,11 +1,11 @@ locals { loki_stack = { - name = local.helm_charts[index(local.helm_charts.*.id, "loki-stack")].id - enabled = local.helm_charts[index(local.helm_charts.*.id, "loki-stack")].enabled - chart = local.helm_charts[index(local.helm_charts.*.id, "loki-stack")].chart - repository = local.helm_charts[index(local.helm_charts.*.id, "loki-stack")].repository - chart_version = local.helm_charts[index(local.helm_charts.*.id, "loki-stack")].version - namespace = local.helm_charts[index(local.helm_charts.*.id, "loki-stack")].namespace + name = local.helm_releases[index(local.helm_releases.*.id, "loki-stack")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "loki-stack")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "loki-stack")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "loki-stack")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "loki-stack")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "loki-stack")].namespace } } @@ -95,7 +95,7 @@ resource "helm_release" "loki_stack" { name = local.loki_stack.name chart = local.loki_stack.chart repository = local.loki_stack.repository - version = local.loki_stack_version + version = local.loki_stack.chart_version namespace = module.loki_namespace[count.index].name max_history = var.helm_release_history_size diff --git a/terraform/layer2-k8s/eks-nginx-ingress-controller.tf b/terraform/layer2-k8s/eks-nginx-ingress-controller.tf index 04222d82..62272273 100644 --- a/terraform/layer2-k8s/eks-nginx-ingress-controller.tf +++ b/terraform/layer2-k8s/eks-nginx-ingress-controller.tf @@ -1,7 +1,7 @@ locals { ingress_nginx = { name = local.helm_releases[index(local.helm_releases.*.id, "ingress-nginx")].id - enabled = local.helm_releases[index(local.helm_releases.*.id, "ingress-nginx")].name + enabled = local.helm_releases[index(local.helm_releases.*.id, "ingress-nginx")].enabled chart = local.helm_releases[index(local.helm_releases.*.id, "ingress-nginx")].chart repository = local.helm_releases[index(local.helm_releases.*.id, "ingress-nginx")].repository chart_version = local.helm_releases[index(local.helm_releases.*.id, "ingress-nginx")].version @@ -15,7 +15,7 @@ locals { ) } -data "template_file" "nginx_ingress" { +data "template_file" "ingress_nginx" { template = file("${path.module}/templates/${local.template_name}") vars = { @@ -169,7 +169,7 @@ resource "helm_release" "ingress_nginx" { max_history = var.helm_release_history_size values = [ - data.template_file.nginx_ingress.rendered, + data.template_file.ingress_nginx.rendered, ] depends_on = [helm_release.prometheus_operator] diff --git a/terraform/layer2-k8s/eks-reloader.tf b/terraform/layer2-k8s/eks-reloader.tf new file mode 100644 index 00000000..e5e63d68 --- /dev/null +++ b/terraform/layer2-k8s/eks-reloader.tf @@ -0,0 +1,69 @@ +locals { + reloader = { + name = local.helm_releases[index(local.helm_releases.*.id, "reloader")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "reloader")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "reloader")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "reloader")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "reloader")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "reloader")].namespace + } +} + +#tfsec:ignore:kubernetes-network-no-public-egress tfsec:ignore:kubernetes-network-no-public-ingress +module "reloader_namespace" { + count = local.reloader.enabled ? 1 : 0 + + source = "../modules/kubernetes-namespace" + name = local.reloader.namespace + network_policies = [ + { + name = "default-deny" + policy_types = ["Ingress", "Egress"] + pod_selector = {} + }, + { + name = "allow-this-namespace" + policy_types = ["Ingress"] + pod_selector = {} + ingress = { + from = [ + { + namespace_selector = { + match_labels = { + name = local.reloader.namespace + } + } + } + ] + } + }, + { + name = "allow-egress" + policy_types = ["Egress"] + pod_selector = {} + egress = { + to = [ + { + ip_block = { + cidr = "0.0.0.0/0" + except = [ + "169.254.169.254/32" + ] + } + } + ] + } + } + ] +} + +resource "helm_release" "reloader" { + count = local.reloader.enabled ? 1 : 0 + + name = local.reloader.name + chart = local.reloader.chart + repository = local.reloader.repository + version = local.reloader.chart_version + namespace = module.reloader_namespace[count.index].name + max_history = var.helm_release_history_size +} diff --git a/terraform/layer2-k8s/examples/eks-istio.tf b/terraform/layer2-k8s/examples/eks-istio.tf deleted file mode 100644 index f4e7651b..00000000 --- a/terraform/layer2-k8s/examples/eks-istio.tf +++ /dev/null @@ -1,98 +0,0 @@ -local { - istio-operator = { - chart = local.helm_charts[index(local.helm_charts.*.id, "istio-operator")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "istio-operator")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "istio-operator")], "version", null) - } - istio-operator-resources = { - chart = local.helm_charts[index(local.helm_charts.*.id, "istio-operator-resources")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "istio-operator-resources")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "istio-operator-resources")], "version", null) - } - istio-resources = { - chart = local.helm_charts[index(local.helm_charts.*.id, "istio-resources")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "istio-resources")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "istio-resources")], "version", null) - } - kiali-server = { - chart = local.helm_charts[index(local.helm_charts.*.id, "kiali-server")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "kiali-server")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "kiali-server")], "version", null) - } -} - -module "istio_system_namespace" { - source = "../modules/kubernetes-namespace" - name = "istio-system" -} - -module "kiali_namespace" { - source = "../modules/kubernetes-namespace" - name = "kiali" -} - -resource "helm_release" "istio_operator" { - name = "istio-operator" - chart = local.istio-operator.chart - repository = local.istio-operator.repository - version = local.istio-operator.chart_version - max_history = var.helm_release_history_size - wait = true - - values = [ - file("${path.module}/templates/istio/istio-operator-values.yaml") - ] -} - -resource "helm_release" "istio_operator_resources" { - name = "istio-operator-resources" - chart = local.istio-operator-resources.chart - repository = local.istio-operator-resources.repository - version = local.istio-operator-resources.chart_version - namespace = module.istio_system_namespace.name - max_history = var.helm_release_history_size - wait = true - - values = [ - file("${path.module}/templates/istio/istio-resources-values.yaml") - ] - - depends_on = [helm_release.istio_operator, helm_release.prometheus_operator] -} - -resource "time_sleep" "wait_10_seconds" { - depends_on = [helm_release.istio_operator_resources] - - create_duration = "10s" -} - -resource "helm_release" "istio_resources" { - name = "istio-resources" - chart = local.istio-resources.chart - repository = local.istio-resources.repository - version = local.istio-resources.chart_version - namespace = module.istio_system_namespace.name - max_history = var.helm_release_history_size - wait = false - - values = [ - file("${path.module}/templates/istio/istio-resources-values.yaml") - ] - - depends_on = [time_sleep.wait_10_seconds] -} - -resource "helm_release" "kiali" { - name = "kiali-server" - chart = local.kiali-server.chart - repository = local.kiali-server.repository - version = local.kiali-server.chart_version - namespace = module.kiali_namespace.name - max_history = var.helm_release_history_size - wait = false - - values = [ - file("${path.module}/templates/istio/istio-kiali-values.yaml") - ] - depends_on = [helm_release.istio_operator, helm_release.prometheus_operator] -} diff --git a/terraform/layer2-k8s/examples/eks-teamcity.tf b/terraform/layer2-k8s/examples/eks-teamcity.tf index 3f8cef05..b0cf717d 100644 --- a/terraform/layer2-k8s/examples/eks-teamcity.tf +++ b/terraform/layer2-k8s/examples/eks-teamcity.tf @@ -1,76 +1,55 @@ +# This release is broken now. It must be fixed. Issues have been created. + locals { teamcity = { - chart = local.helm_charts[index(local.helm_charts.*.id, "teamcity")].chart - repository = lookup(local.helm_charts[index(local.helm_charts.*.id, "teamcity")], "repository", null) - chart_version = lookup(local.helm_charts[index(local.helm_charts.*.id, "teamcity")], "version", null) + name = local.helm_releases[index(local.helm_releases.*.id, "teamcity")].id + enabled = local.helm_releases[index(local.helm_releases.*.id, "teamcity")].enabled + chart = local.helm_releases[index(local.helm_releases.*.id, "teamcity")].chart + repository = local.helm_releases[index(local.helm_releases.*.id, "teamcity")].repository + chart_version = local.helm_releases[index(local.helm_releases.*.id, "teamcity")].version + namespace = local.helm_releases[index(local.helm_releases.*.id, "teamcity")].namespace } teamcity_domain_name = "teamcity-${local.domain_suffix}" } -module "eks_rbac_teamcity" { - source = "../modules/eks-rbac-ci" - - name = "${local.name}-teamcity" - role_arn = module.aws_iam_teamcity.role_arn - namespace = module.ci_namespace.name -} - -data "template_file" "teamcity_agent" { - template = file("${path.module}/templates/teamcity-agent-pod-template.yaml") +data "template_file" "teamcity" { + template = file("${path.module}/templates/teamcity-values.yaml") vars = { + domain_name = local.teamcity_domain_name + storage_class_name = kubernetes_storage_class.teamcity.id service_account_name = module.eks_rbac_teamcity.service_account_name } } -module "teamcity_namespace" { - source = "../modules/kubernetes-namespace" - name = "teamcity" -} - -data "template_file" "teamcity" { - template = file("${path.module}/templates/teamcity-values.yaml") +data "template_file" "teamcity_agent" { + template = file("${path.module}/templates/teamcity-agent-pod-template.yaml") vars = { - domain_name = local.teamcity_domain_name - storage_class_name = kubernetes_storage_class.teamcity.id service_account_name = module.eks_rbac_teamcity.service_account_name } } -resource "helm_release" "teamcity" { - name = "teamcity" - chart = local.teamcity.chart - repository = local.teamcity.repository - version = local.teamcity.chart_version - namespace = module.teamcity_namespace.name - wait = false - cleanup_on_fail = true - max_history = var.helm_release_history_size +module "eks_rbac_teamcity" { + count = local.teamcity.enabled ? 1 : 0 - values = [ - data.template_file.teamcity.rendered - ] + source = "../modules/eks-rbac-ci" + name = "${local.name}-${local.teamcity.name}" + role_arn = module.aws_iam_teamcity[count.index].role_arn + namespace = module.teamcity_namespace[count.index].name } -resource "kubernetes_storage_class" "teamcity" { - metadata { - name = "teamcity" - } - storage_provisioner = "kubernetes.io/aws-ebs" - reclaim_policy = "Retain" - allow_volume_expansion = true - parameters = { - type = "gp2" - encrypted = true - fsType = "ext4" - } +module "teamcity_namespace" { + count = local.teamcity.enabled ? 1 : 0 + + source = "../modules/kubernetes-namespace" + name = local.teamcity.namespace } module "aws_iam_teamcity" { source = "../modules/aws-iam-eks-trusted" - name = "${local.name}-teamcity" + name = "${local.name}-${local.teamcity.name}" region = local.region oidc_provider_arn = local.eks_oidc_provider_arn policy = jsonencode({ @@ -79,7 +58,17 @@ module "aws_iam_teamcity" { { "Effect" : "Allow", "Action" : [ - "ecr:*", + "ecr:GetAuthorizationToken", + "ecr:GetDownloadUrlForLayer", + "ecr:BatchGetImage", + "ecr:BatchCheckLayerAvailability", + "ecr:PutImage", + "ecr:InitiateLayerUpload", + "ecr:UploadLayerPart", + "ecr:CompleteLayerUpload", + "ecr:ListTagsForResource", + "ecr:DescribeImageScanFindings", + "ecr:DescribeImages" ], "Resource" : "*" } @@ -87,19 +76,30 @@ module "aws_iam_teamcity" { }) } +resource "helm_release" "teamcity" { + count = local.teamcity.enabled ? 1 : 0 + + name = local.teamcity.name + chart = local.teamcity.chart + repository = local.teamcity.repository + version = local.teamcity.chart_version + namespace = module.teamcity_namespace[count.index].name + max_history = var.helm_release_history_size + + values = [ + data.template_file.teamcity.rendered + ] +} + output "teamcity_domain_name" { value = local.teamcity_domain_name description = "Teamcity server" } output "teamcity_service_account_name" { - value = module.eks_rbac_teamcity.service_account_name + value = local.teamcity.enabled ? module.eks_rbac_teamcity[0].service_account_name : null } output "teamcity_agent_pod_template" { - value = data.template_file.teamcity_agent.rendered -} - -output "teamcity_kubernetes_api_url" { - value = data.aws_eks_cluster.main.endpoint + value = local.teamcity.enabled ? data.template_file.teamcity_agent.rendered : null } diff --git a/terraform/layer2-k8s/helm-releases.yaml b/terraform/layer2-k8s/helm-releases.yaml index f200cc6d..7642ef6a 100644 --- a/terraform/layer2-k8s/helm-releases.yaml +++ b/terraform/layer2-k8s/helm-releases.yaml @@ -76,7 +76,7 @@ releases: chart: ../../helm-charts/istio/istio-operator repository: version: - namespace: null + namespace: - id: istio-operator-resources enabled: false chart: ../../helm-charts/istio/istio-operator-resources diff --git a/terraform/layer2-k8s/templates/elk-values.yaml b/terraform/layer2-k8s/templates/elk-values.yaml index c8864209..54c8bb79 100644 --- a/terraform/layer2-k8s/templates/elk-values.yaml +++ b/terraform/layer2-k8s/templates/elk-values.yaml @@ -1,5 +1,5 @@ apm-server: - enabled: true + enabled: false ingress: enabled: true annotations: @@ -67,7 +67,7 @@ elasticsearch: volumeClaimTemplate: accessModes: [ "ReadWriteOnce" ] - storageClassName: "${storage_class_name}" + storageClassName: advanced resources: requests: storage: 100Gi @@ -186,7 +186,7 @@ filebeat: operator: Exists metricbeat: - enabled: true + enabled: false daemonset: extraEnvs: - name: 'ELASTICSEARCH_USERNAME' @@ -324,12 +324,6 @@ kibana: annotations: kubernetes.io/ingress.class: nginx nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" - nginx.ingress.kubernetes.io/auth-url: "https://$host/oauth2/auth" - nginx.ingress.kubernetes.io/auth-signin: "https://$host/oauth2/start?rd=$escaped_request_uri" - nginx.ingress.kubernetes.io/configuration-snippet: | - proxy_set_header Authorization "Basic ${kibana_base64_creds}"; - proxy_set_header es-security-runas-user $http_x_forwarded_user; - proxy_set_header x-proxy-user $http_x_forwarded_user; path: / hosts: - ${kibana_domain_name} @@ -365,8 +359,6 @@ kibana: key: /usr/share/kibana/config/certs/tls.key certificate: /usr/share/kibana/config/certs/tls.crt xpack.security.encryptionKey: $${KIBANA_ENCRYPTION_KEY} - elasticsearch.requestHeadersWhitelist: [ es-security-runas-user, authorization ] - xpack.monitoring.elasticsearch.requestHeadersWhitelist: [ es-security-runas-user, authorization ] elasticsearch.ssl: verificationMode: none certificateAuthorities: /usr/share/kibana/config/certs/tls.crt diff --git a/terraform/layer2-k8s/templates/istio/istio-resources-values.yaml b/terraform/layer2-k8s/templates/istio/istio-resources-values.yaml index 51cfd032..5b4f9f5c 100644 --- a/terraform/layer2-k8s/templates/istio/istio-resources-values.yaml +++ b/terraform/layer2-k8s/templates/istio/istio-resources-values.yaml @@ -1,5 +1,14 @@ istioOperator: components: + pilot: + k8s: + resources: + requests: + cpu: "500m" + memory: "2Gi" + limits: + cpu: "500m" + memory: "2Gi" ingressGateways: - name: istio-ingressgateway enabled: true diff --git a/terraform/layer2-k8s/templates/loki-stack-values.yaml b/terraform/layer2-k8s/templates/loki-stack-values.yaml index 6c471608..880d83f8 100644 --- a/terraform/layer2-k8s/templates/loki-stack-values.yaml +++ b/terraform/layer2-k8s/templates/loki-stack-values.yaml @@ -11,6 +11,7 @@ loki: accessModes: - ReadWriteOnce size: 10Gi + storageClassName: advanced serviceMonitor: enabled: true diff --git a/terraform/layer2-k8s/templates/prometheus-values.yaml b/terraform/layer2-k8s/templates/prometheus-values.yaml index afc3cf09..0c941423 100644 --- a/terraform/layer2-k8s/templates/prometheus-values.yaml +++ b/terraform/layer2-k8s/templates/prometheus-values.yaml @@ -17,6 +17,7 @@ prometheus: storageSpec: volumeClaimTemplate: spec: + storageClassName: advanced accessModes: ["ReadWriteOnce"] resources: requests: @@ -87,8 +88,6 @@ grafana: persistence: enabled: false - accessModes: ["ReadWriteOnce"] - size: 5Gi sidecar: datasources: @@ -105,7 +104,7 @@ grafana: defaultRegion: "${default_region}" - name: Loki type: loki - url: http://loki-stack:3100 + url: http://loki-stack.loki:3100 jsonData: maxLines: 1000 @@ -179,6 +178,7 @@ alertmanager: storage: volumeClaimTemplate: spec: + storageClassName: advanced accessModes: ["ReadWriteOnce"] resources: requests: diff --git a/terraform/layer2-k8s/templates/teamcity-values.yaml b/terraform/layer2-k8s/templates/teamcity-values.yaml index 86c64faf..5450cbb1 100644 --- a/terraform/layer2-k8s/templates/teamcity-values.yaml +++ b/terraform/layer2-k8s/templates/teamcity-values.yaml @@ -16,7 +16,7 @@ server: persistence: type: pvc enabled: true - storageClassName: "${storage_class_name}" + storageClassName: advanced accessModes: - ReadWriteOnce size: 50Gi diff --git a/terraform/modules/aws-ec2-pritunl/security_groups.tf b/terraform/modules/aws-ec2-pritunl/security_groups.tf index 0ecaf7c1..a3d1fd74 100644 --- a/terraform/modules/aws-ec2-pritunl/security_groups.tf +++ b/terraform/modules/aws-ec2-pritunl/security_groups.tf @@ -33,7 +33,7 @@ module "efs_sg" { protocol = "6" from_port = 2049 to_port = 2049 - source_security_group_id = module.ec2_sg.this_security_group_id + source_security_group_id = module.ec2_sg.security_group_id } ] } From c354eb630d8c313b95f0fdcf26f3b3efbc1b9e50 Mon Sep 17 00:00:00 2001 From: maxim Date: Fri, 12 Nov 2021 14:57:05 +0600 Subject: [PATCH 5/6] fix grammar mistake in README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f6d1862c..f026fa31 100644 --- a/README.md +++ b/README.md @@ -455,7 +455,7 @@ terragrunt init -upgrade ### Additional components -This boiler instalss all basic and necessary components. However, we also provide several additional components. Both layers have such components. To enable them in: +This boiler installs all basic and necessary components. However, we also provide several additional components. Both layers have such components. To enable them in: * layer1-aws: search `***_enable` variables and set them to **true** * layer2-k8s: check `helm-releases.yaml` file and set **enabled: true** or **enabled:false** for components that you want to **deploy** or to **unistall** From 8848567625d21aaf7b4ff6d92ec499ed2454af20 Mon Sep 17 00:00:00 2001 From: maxim Date: Mon, 15 Nov 2021 11:51:22 +0600 Subject: [PATCH 6/6] fix: Use count for data.template_file too. Delete example folders --- terraform/layer2-k8s/eks-cert-manager.tf | 17 ++++++++++------- terraform/layer2-k8s/eks-cluster-autoscaler.tf | 7 ++++--- terraform/layer2-k8s/eks-elk.tf | 15 ++++++++------- terraform/layer2-k8s/eks-external-dns.tf | 7 ++++--- terraform/layer2-k8s/eks-external-secrets.tf | 7 ++++--- .../layer2-k8s/eks-kube-prometheus-stack.tf | 10 +++++----- .../layer2-k8s/eks-nginx-ingress-controller.tf | 7 ++++--- .../eks-teamcity.tf => eks-teamcity.tf.broken} | 10 ++++++---- 8 files changed, 45 insertions(+), 35 deletions(-) rename terraform/layer2-k8s/{examples/eks-teamcity.tf => eks-teamcity.tf.broken} (93%) diff --git a/terraform/layer2-k8s/eks-cert-manager.tf b/terraform/layer2-k8s/eks-cert-manager.tf index 06530b5b..f46fe406 100644 --- a/terraform/layer2-k8s/eks-cert-manager.tf +++ b/terraform/layer2-k8s/eks-cert-manager.tf @@ -26,16 +26,18 @@ locals { } data "template_file" "cert_manager" { - template = file("${path.module}/templates/cert-manager-values.yaml") + count = local.cert_manager.enabled ? 1 : 0 + template = file("${path.module}/templates/cert-manager-values.yaml") vars = { - role_arn = local.cert_manager.enabled ? module.aws_iam_cert_manager[0].role_arn : "" + role_arn = module.aws_iam_cert_manager[count.index].role_arn } } data "template_file" "cluster_issuer" { - template = file("${path.module}/templates/cluster-issuer-values.yaml") + count = local.cert_manager_cluster_issuer.enabled ? 1 : 0 + template = file("${path.module}/templates/cluster-issuer-values.yaml") vars = { region = local.region zone = local.domain_name @@ -44,8 +46,9 @@ data "template_file" "cluster_issuer" { } data "template_file" "certificate" { - template = file("${path.module}/templates/certificate-values.yaml") + count = local.cert_mananger_certificate.enabled ? 1 : 0 + template = file("${path.module}/templates/certificate-values.yaml") vars = { domain_name = "*.${local.domain_name}" common_name = local.domain_name @@ -179,7 +182,7 @@ resource "helm_release" "cert_manager" { max_history = var.helm_release_history_size values = [ - data.template_file.cert_manager.rendered, + data.template_file.cert_manager[count.index].rendered, ] } @@ -195,7 +198,7 @@ resource "helm_release" "cluster_issuer" { max_history = var.helm_release_history_size values = [ - data.template_file.cluster_issuer.rendered, + data.template_file.cluster_issuer[count.index].rendered, ] # This dep needs for correct apply @@ -213,7 +216,7 @@ resource "helm_release" "certificate" { max_history = var.helm_release_history_size values = [ - data.template_file.certificate.rendered, + data.template_file.certificate[count.index].rendered, ] # This dep needs for correct apply diff --git a/terraform/layer2-k8s/eks-cluster-autoscaler.tf b/terraform/layer2-k8s/eks-cluster-autoscaler.tf index d5ed7717..db2aee3c 100644 --- a/terraform/layer2-k8s/eks-cluster-autoscaler.tf +++ b/terraform/layer2-k8s/eks-cluster-autoscaler.tf @@ -10,10 +10,11 @@ locals { } data "template_file" "cluster_autoscaler" { - template = file("${path.module}/templates/cluster-autoscaler-values.yaml") + count = local.cluster_autoscaler.enabled ? 1 : 0 + template = file("${path.module}/templates/cluster-autoscaler-values.yaml") vars = { - role_arn = local.cluster_autoscaler.enabled ? module.aws_iam_autoscaler[0].role_arn : "" + role_arn = module.aws_iam_autoscaler[count.index].role_arn region = local.region cluster_name = local.eks_cluster_id version = var.cluster_autoscaler_version @@ -150,7 +151,7 @@ resource "helm_release" "cluster_autoscaler" { max_history = var.helm_release_history_size values = [ - data.template_file.cluster_autoscaler.rendered, + data.template_file.cluster_autoscaler[count.index].rendered, ] depends_on = [helm_release.prometheus_operator] diff --git a/terraform/layer2-k8s/eks-elk.tf b/terraform/layer2-k8s/eks-elk.tf index 87e3c28e..9194e122 100644 --- a/terraform/layer2-k8s/eks-elk.tf +++ b/terraform/layer2-k8s/eks-elk.tf @@ -12,16 +12,17 @@ locals { } data "template_file" "elk" { - template = file("${path.module}/templates/elk-values.yaml") + count = local.elk.enabled ? 1 : 0 + template = file("${path.module}/templates/elk-values.yaml") vars = { - bucket_name = local.elk.enabled ? aws_s3_bucket.elastic_stack[0].id : "bucket_name" + bucket_name = aws_s3_bucket.elastic_stack[count.index].id snapshot_retention_days = var.elk_snapshot_retention_days index_retention_days = var.elk_index_retention_days apm_domain_name = local.apm_domain_name kibana_domain_name = local.kibana_domain_name kibana_user = "kibana-${local.env}" - kibana_password = local.elk.enabled ? random_string.kibana_password[0].result : "password" + kibana_password = random_string.kibana_password[count.index].result } } @@ -285,19 +286,19 @@ resource "helm_release" "elk" { max_history = var.helm_release_history_size values = [ - data.template_file.elk.rendered + data.template_file.elk[count.index].rendered ] } output "kibana_domain_name" { - value = local.kibana_domain_name + value = local.elk.enabled ? local.kibana_domain_name : null description = "Kibana dashboards address" } output "apm_domain_name" { - value = local.apm_domain_name - description = "" + value = local.elk.enabled ? local.apm_domain_name : null + description = "APM domain name" } output "elasticsearch_elastic_password" { diff --git a/terraform/layer2-k8s/eks-external-dns.tf b/terraform/layer2-k8s/eks-external-dns.tf index 6db73842..01822c6a 100644 --- a/terraform/layer2-k8s/eks-external-dns.tf +++ b/terraform/layer2-k8s/eks-external-dns.tf @@ -10,10 +10,11 @@ locals { } data "template_file" "external_dns" { - template = file("${path.module}/templates/external-dns.yaml") + count = local.external_dns.enabled ? 1 : 0 + template = file("${path.module}/templates/external-dns.yaml") vars = { - role_arn = local.external_dns.enabled ? module.aws_iam_external_dns[0].role_arn : 0 + role_arn = module.aws_iam_external_dns[count.index].role_arn domain_name = local.domain_name zone_type = "public" } @@ -120,7 +121,7 @@ resource "helm_release" "external_dns" { max_history = var.helm_release_history_size values = [ - data.template_file.external_dns.rendered, + data.template_file.external_dns[count.index].rendered, ] } diff --git a/terraform/layer2-k8s/eks-external-secrets.tf b/terraform/layer2-k8s/eks-external-secrets.tf index 10340ad5..16fd1512 100644 --- a/terraform/layer2-k8s/eks-external-secrets.tf +++ b/terraform/layer2-k8s/eks-external-secrets.tf @@ -10,10 +10,11 @@ locals { } data "template_file" "external_secrets" { - template = file("${path.module}/templates/external-secrets-values.yaml") + count = local.external_secrets.enabled ? 1 : 0 + template = file("${path.module}/templates/external-secrets-values.yaml") vars = { - role_arn = local.external_secrets.enabled ? module.aws_iam_external_secrets[0].role_arn : "" + role_arn = module.aws_iam_external_secrets[count.index].role_arn region = local.region } } @@ -98,7 +99,7 @@ resource "helm_release" "external_secrets" { max_history = var.helm_release_history_size values = [ - data.template_file.external_secrets.rendered, + data.template_file.external_secrets[count.index].rendered, ] } diff --git a/terraform/layer2-k8s/eks-kube-prometheus-stack.tf b/terraform/layer2-k8s/eks-kube-prometheus-stack.tf index 20ca1699..ab66bee3 100644 --- a/terraform/layer2-k8s/eks-kube-prometheus-stack.tf +++ b/terraform/layer2-k8s/eks-kube-prometheus-stack.tf @@ -183,27 +183,27 @@ resource "helm_release" "prometheus_operator" { } output "grafana_domain_name" { - value = local.grafana_domain_name + value = local.kube_prometheus_stack.enabled ? local.grafana_domain_name : null description = "Grafana dashboards address" } output "alertmanager_domain_name" { - value = local.alertmanager_domain_name + value = local.kube_prometheus_stack.enabled ? local.alertmanager_domain_name : null description = "Alertmanager ui address" } output "prometheus_domain_name" { - value = local.prometheus_domain_name + value = local.kube_prometheus_stack.enabled ? local.prometheus_domain_name : null description = "Prometheus ui address" } output "grafana_admin_password" { - value = local.grafana_password + value = local.kube_prometheus_stack.enabled ? local.grafana_password : null sensitive = true description = "Grafana admin password" } output "get_grafana_admin_password" { - value = "kubectl get secret --namespace monitoring kube-prometheus-stack-grafana -o jsonpath='{.data.admin-password}' | base64 --decode ; echo" + value = local.kube_prometheus_stack.enabled ? "kubectl get secret --namespace monitoring kube-prometheus-stack-grafana -o jsonpath='{.data.admin-password}' | base64 --decode ; echo" : null description = "Command which gets admin password from kubernetes secret" } diff --git a/terraform/layer2-k8s/eks-nginx-ingress-controller.tf b/terraform/layer2-k8s/eks-nginx-ingress-controller.tf index 62272273..dd9024f2 100644 --- a/terraform/layer2-k8s/eks-nginx-ingress-controller.tf +++ b/terraform/layer2-k8s/eks-nginx-ingress-controller.tf @@ -16,13 +16,14 @@ locals { } data "template_file" "ingress_nginx" { - template = file("${path.module}/templates/${local.template_name}") + count = local.ingress_nginx.enabled ? 1 : 0 + template = file("${path.module}/templates/${local.template_name}") vars = { hostname = local.domain_name ssl_cert = local.ssl_certificate_arn proxy_real_ip_cidr = local.vpc_cidr - namespace = local.ingress_nginx.enabled ? module.ingress_nginx_namespace[0].name : "default" + namespace = module.ingress_nginx_namespace[count.index].name } } @@ -169,7 +170,7 @@ resource "helm_release" "ingress_nginx" { max_history = var.helm_release_history_size values = [ - data.template_file.ingress_nginx.rendered, + data.template_file.ingress_nginx[count.index].rendered, ] depends_on = [helm_release.prometheus_operator] diff --git a/terraform/layer2-k8s/examples/eks-teamcity.tf b/terraform/layer2-k8s/eks-teamcity.tf.broken similarity index 93% rename from terraform/layer2-k8s/examples/eks-teamcity.tf rename to terraform/layer2-k8s/eks-teamcity.tf.broken index b0cf717d..ac35c94f 100644 --- a/terraform/layer2-k8s/examples/eks-teamcity.tf +++ b/terraform/layer2-k8s/eks-teamcity.tf.broken @@ -13,8 +13,9 @@ locals { } data "template_file" "teamcity" { - template = file("${path.module}/templates/teamcity-values.yaml") + count = local.teamcity.enabled ? 1 : 0 + template = file("${path.module}/templates/teamcity-values.yaml") vars = { domain_name = local.teamcity_domain_name storage_class_name = kubernetes_storage_class.teamcity.id @@ -23,8 +24,9 @@ data "template_file" "teamcity" { } data "template_file" "teamcity_agent" { - template = file("${path.module}/templates/teamcity-agent-pod-template.yaml") + count = local.teamcity.enabled ? 1 : 0 + template = file("${path.module}/templates/teamcity-agent-pod-template.yaml") vars = { service_account_name = module.eks_rbac_teamcity.service_account_name } @@ -87,12 +89,12 @@ resource "helm_release" "teamcity" { max_history = var.helm_release_history_size values = [ - data.template_file.teamcity.rendered + data.template_file.teamcity[count.index].rendered ] } output "teamcity_domain_name" { - value = local.teamcity_domain_name + value = local.teamcity.enabled ? local.teamcity_domain_name : null description = "Teamcity server" }